Merge tag 'v5.16-rc8' into android-mainline

Linux 5.16-rc8

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4c1d6911025ca7ecf65af4de49a0e0085596409a
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..67e5228
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,23 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+load("//build/kleaf:common_kernels.bzl", "define_common_kernels")
+load("@kernel_toolchain_info//:dict.bzl", "CLANG_VERSION")
+
+define_common_kernels(toolchain_version = CLANG_VERSION)
diff --git a/Documentation/ABI/stable/sysfs-module b/Documentation/ABI/stable/sysfs-module
index 560b4a3..f204eb04 100644
--- a/Documentation/ABI/stable/sysfs-module
+++ b/Documentation/ABI/stable/sysfs-module
@@ -45,3 +45,21 @@
 Description:
 		If the module source has MODULE_VERSION, this file will contain
 		the version of the source code.
+
+What:		/sys/module/MODULENAME/scmversion
+Date:		November 2020
+KernelVersion:	Android Common Kernel -- android12-5.10+
+Contact:	Will McVicker <willmcvicker@google.com>
+Description:	This read-only file will appear if modpost was supplied with an
+		SCM version for the module. It can be enabled with the config
+		MODULE_SCMVERSION. The SCM version is retrieved by
+		scripts/setlocalversion, which means that the presence of this
+		file depends on CONFIG_LOCALVERSION_AUTO=y. When read, the SCM
+		version that the module was compiled with is returned. The SCM
+		version is returned in the following format::
+
+		===
+		Git:		g[a-f0-9]\+(-dirty)\?
+		Mercurial:	hg[a-f0-9]\+(-dirty)\?
+		Subversion:	svn[0-9]\+
+		===
diff --git a/Documentation/ABI/testing/OWNERS b/Documentation/ABI/testing/OWNERS
new file mode 100644
index 0000000..3ab5dca
--- /dev/null
+++ b/Documentation/ABI/testing/OWNERS
@@ -0,0 +1 @@
+per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS
diff --git a/Documentation/ABI/testing/sysfs-fs-incfs b/Documentation/ABI/testing/sysfs-fs-incfs
new file mode 100644
index 0000000..690c687
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-fs-incfs
@@ -0,0 +1,64 @@
+What:		/sys/fs/incremental-fs/features/corefs
+Date:		2019
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Reads 'supported'. Always present.
+
+What:		/sys/fs/incremental-fs/features/v2
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Reads 'supported'. Present if all v2 features of incfs are
+		supported.
+
+What:		/sys/fs/incremental-fs/features/zstd
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Reads 'supported'. Present if zstd compression is supported
+		for data blocks.
+
+What:		/sys/fs/incremental-fs/instances/[name]
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Folder created when incfs is mounted with the sysfs_name=[name]
+		option. If this option is used, the following values are created
+		in this folder.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_delayed_min
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns a count of the number of reads that were delayed as a
+		result of the per UID read timeouts min time setting.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns total delay time for all files since first mount as a
+		result of the per UID read timeouts min time setting.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns a count of the number of reads that were delayed as a
+		result of waiting for a pending read.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns total delay time for all files since first mount as a
+		result of waiting for a pending read.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns number of reads that failed because of hash verification
+		failures.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_failed_other
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns number of reads that failed for reasons other than
+		timing out or hash failures.
+
+What:		/sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
+Date:		April 2021
+Contact:	Paul Lawrence <paullawrence@google.com>
+Description:	Returns number of reads that timed out.
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644
index 0000000..acb19b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
@@ -0,0 +1,16 @@
+What:		/sys/kernel/wakeup_reasons/last_resume_reason
+Date:		February 2014
+Contact:	Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_resume_reason is
+		used to report wakeup reasons after system exited suspend.
+
+What:		/sys/kernel/wakeup_reasons/last_suspend_time
+Date:		March 2015
+Contact:	jinqian <jinqian@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_suspend_time is
+		used to report time spent in last suspend cycle. It contains
+		two numbers (in seconds) separated by space. First number is
+		the time spent in suspend and resume processes. Second number
+		is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2fba824..ece8776 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1645,6 +1645,10 @@
 				If specified, z/VM IUCV HVC accepts connections
 				from listed z/VM user IDs only.
 
+	hvc_dcc.enable=	[ARM,ARM64]	Enable DCC driver at runtime. For GKI,
+				disabled at runtime by default to prevent
+				crashes in devices which do not support DCC.
+
 	hv_nopvspin	[X86,HYPER_V] Disables the paravirt spinlock optimizations
 				      which allow the hypervisor to 'idle' the
 				      guest on lock contention.
diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst
index 4d151fb..c2bb3c5 100644
--- a/Documentation/block/inline-encryption.rst
+++ b/Documentation/block/inline-encryption.rst
@@ -77,10 +77,10 @@
 ============
 
 We introduce ``struct blk_crypto_key`` to represent an inline encryption key and
-how it will be used.  This includes the actual bytes of the key; the size of the
-key; the algorithm and data unit size the key will be used with; and the number
-of bytes needed to represent the maximum data unit number the key will be used
-with.
+how it will be used.  This includes the type of the key (standard or
+hardware-wrapped); the actual bytes of the key; the size of the key; the
+algorithm and data unit size the key will be used with; and the number of bytes
+needed to represent the maximum data unit number the key will be used with.
 
 We introduce ``struct bio_crypt_ctx`` to represent an encryption context.  It
 contains a data unit number and a pointer to a blk_crypto_key.  We add pointers
@@ -302,3 +302,215 @@
 When the crypto API fallback is enabled, this means that all bios with and
 encryption context will use the fallback, and IO will complete as usual.  When
 the fallback is disabled, a bio with an encryption context will be failed.
+
+.. _hardware_wrapped_keys:
+
+Hardware-wrapped keys
+=====================
+
+Motivation and threat model
+---------------------------
+
+Linux storage encryption (dm-crypt, fscrypt, eCryptfs, etc.) traditionally
+relies on the raw encryption key(s) being present in kernel memory so that the
+encryption can be performed.  This traditionally isn't seen as a problem because
+the key(s) won't be present during an offline attack, which is the main type of
+attack that storage encryption is intended to protect from.
+
+However, there is an increasing desire to also protect users' data from other
+types of attacks (to the extent possible), including:
+
+- Cold boot attacks, where an attacker with physical access to a system suddenly
+  powers it off, then immediately dumps the system memory to extract recently
+  in-use encryption keys, then uses these keys to decrypt user data on-disk.
+
+- Online attacks where the attacker is able to read kernel memory without fully
+  compromising the system, followed by an offline attack where any extracted
+  keys can be used to decrypt user data on-disk.  An example of such an online
+  attack would be if the attacker is able to run some code on the system that
+  exploits a Meltdown-like vulnerability but is unable to escalate privileges.
+
+- Online attacks where the attacker fully compromises the system, but their data
+  exfiltration is significantly time-limited and/or bandwidth-limited, so in
+  order to completely exfiltrate the data they need to extract the encryption
+  keys to use in a later offline attack.
+
+Hardware-wrapped keys are a feature of inline encryption hardware that is
+designed to protect users' data from the above attacks (to the extent possible),
+without introducing limitations such as a maximum number of keys.
+
+Note that it is impossible to **fully** protect users' data from these attacks.
+Even in the attacks where the attacker "just" gets read access to kernel memory,
+they can still extract any user data that is present in memory, including
+plaintext pagecache pages of encrypted files.  The focus here is just on
+protecting the encryption keys, as those instantly give access to **all** user
+data in any following offline attack, rather than just some of it (where which
+data is included in that "some" might not be controlled by the attacker).
+
+Solution overview
+-----------------
+
+Inline encryption hardware typically has "keyslots" into which software can
+program keys for the hardware to use; the contents of keyslots typically can't
+be read back by software.  As such, the above security goals could be achieved
+if the kernel simply erased its copy of the key(s) after programming them into
+keyslot(s) and thereafter only referred to them via keyslot number.
+
+However, that naive approach runs into the problem that it limits the number of
+unlocked keys to the number of keyslots, which typically is a small number.  In
+cases where there is only one encryption key system-wide (e.g., a full-disk
+encryption key), that can be tolerable.  However, in general there can be many
+logged-in users with many different keys, and/or many running applications with
+application-specific encrypted storage areas.  This is especially true if
+file-based encryption (e.g. fscrypt) is being used.
+
+Thus, it is important for the kernel to still have a way to "remind" the
+hardware about a key, without actually having the raw key itself.  This would
+ensure that the number of hardware keyslots only limits the number of active I/O
+requests, not other things such as the number of logged-in users, the number of
+running apps, or the number of encrypted storage areas that apps can create.
+
+Somewhat less importantly, it is also desirable that the raw keys are never
+visible to software at all, even while being initially unlocked.  This would
+ensure that a read-only compromise of system memory will never allow a key to be
+extracted to be used off-system, even if it occurs when a key is being unlocked.
+
+To solve all these problems, some vendors of inline encryption hardware have
+made their hardware support *hardware-wrapped keys*.  Hardware-wrapped keys
+are encrypted keys that can only be unwrapped (decrypted) and used by hardware
+-- either by the inline encryption hardware itself, or by a dedicated hardware
+block that can directly provision keys to the inline encryption hardware.
+
+(We refer to them as "hardware-wrapped keys" rather than simply "wrapped keys"
+to add some clarity in cases where there could be other types of wrapped keys,
+such as in file-based encryption.  Key wrapping is a commonly used technique.)
+
+The key which wraps (encrypts) hardware-wrapped keys is a hardware-internal key
+that is never exposed to software; it is either a persistent key (a "long-term
+wrapping key") or a per-boot key (an "ephemeral wrapping key").  The long-term
+wrapped form of the key is what is initially unlocked, but it is discarded as
+soon as it is converted into an ephemerally-wrapped key.  In-use
+hardware-wrapped keys are always ephemerally-wrapped, not long-term wrapped.
+
+As inline encryption hardware can only be used to encrypt/decrypt data on-disk,
+the hardware also includes a level of indirection; it doesn't use the unwrapped
+key directly for inline encryption, but rather derives both an inline encryption
+key and a "software secret" from it.  Software can use the "software secret" for
+tasks that can't use the inline encryption hardware, such as filenames
+encryption.  The software secret is not protected from memory compromise.
+
+Key hierarchy
+-------------
+
+Here is the key hierarchy for a hardware-wrapped key::
+
+                       Hardware-wrapped key
+                                |
+                                |
+                          <Hardware KDF>
+                                |
+                  -----------------------------
+                  |                           |
+        Inline encryption key           Software secret
+
+The components are:
+
+- *Hardware-wrapped key*: a key for the hardware's KDF (Key Derivation
+  Function), in ephemerally-wrapped form.  The key wrapping algorithm is a
+  hardware implementation detail that doesn't impact kernel operation, but a
+  strong authenticated encryption algorithm such as AES-256-GCM is recommended.
+
+- *Hardware KDF*: a KDF (Key Derivation Function) which the hardware uses to
+  derive subkeys after unwrapping the wrapped key.  The hardware's choice of KDF
+  doesn't impact kernel operation, but it does need to be known for testing
+  purposes, and it's also assumed to have at least a 256-bit security strength.
+  All known hardware uses the SP800-108 KDF in Counter Mode with AES-256-CMAC,
+  with a particular choice of labels and contexts; new hardware should use this
+  already-vetted KDF.
+
+- *Inline encryption key*: a derived key which the hardware directly provisions
+  to a keyslot of the inline encryption hardware, without exposing it to
+  software.  In all known hardware, this will always be an AES-256-XTS key.
+  However, in principle other encryption algorithms could be supported too.
+  Hardware must derive distinct subkeys for each supported encryption algorithm.
+
+- *Software secret*: a derived key which the hardware returns to software so
+  that software can use it for cryptographic tasks that can't use inline
+  encryption.  This value is cryptographically isolated from the inline
+  encryption key, i.e. knowing one doesn't reveal the other.  (The KDF ensures
+  this.)  Currently, the software secret is always 32 bytes and thus is suitable
+  for cryptographic applications that require up to a 256-bit security strength.
+  Some use cases (e.g. full-disk encryption) won't require the software secret.
+
+Example: in the case of fscrypt, the fscrypt master key (the key used to unlock
+a particular set of encrypted directories) is made hardware-wrapped.  The inline
+encryption key is used as the file contents encryption key, while the software
+secret (rather than the master key directly) is used to key fscrypt's KDF
+(HKDF-SHA512) to derive other subkeys such as filenames encryption keys.
+
+Note that currently this design assumes a single inline encryption key per
+hardware-wrapped key, without any further key derivation.  Thus, in the case of
+fscrypt, currently hardware-wrapped keys are only compatible with the "inline
+encryption optimized" settings, which use one file contents encryption key per
+encryption policy rather than one per file.  This design could be extended to
+make the hardware derive per-file keys using per-file nonces passed down the
+storage stack, and in fact some hardware already supports this; future work is
+planned to remove this limitation by adding the corresponding kernel support.
+
+Kernel support
+--------------
+
+The inline encryption support of the kernel's block layer ("blk-crypto") has
+been extended to support hardware-wrapped keys as an alternative to standard
+keys, when hardware support is available.  This works in the following way:
+
+- A ``key_types_supported`` field is added to the crypto capabilities in
+  ``struct blk_crypto_profile``.  This allows device drivers to declare that
+  they support standard keys, hardware-wrapped keys, or both.
+
+- ``struct blk_crypto_key`` can now contain a hardware-wrapped key as an
+  alternative to a standard key; a ``key_type`` field is added to
+  ``struct blk_crypto_config`` to distinguish between the different key types.
+  This allows users of blk-crypto to en/decrypt data using a hardware-wrapped
+  key in a way very similar to using a standard key.
+
+- A new method ``blk_crypto_ll_ops::derive_sw_secret`` is added.  Device drivers
+  that support hardware-wrapped keys must implement this method.  Users of
+  blk-crypto can call ``blk_crypto_derive_sw_secret()`` to access this method.
+
+- The programming and eviction of hardware-wrapped keys happens via
+  ``blk_crypto_ll_ops::keyslot_program`` and
+  ``blk_crypto_ll_ops::keyslot_evict``, just like it does for standard keys.  If
+  a driver supports hardware-wrapped keys, then it must handle hardware-wrapped
+  keys being passed to these methods.
+
+blk-crypto-fallback doesn't support hardware-wrapped keys.  Therefore,
+hardware-wrapped keys can only be used with actual inline encryption hardware.
+
+Currently, the kernel only works with hardware-wrapped keys in
+ephemerally-wrapped form.  No generic kernel interfaces are provided for
+generating or importing hardware-wrapped keys in the first place, or converting
+them to ephemerally-wrapped form.  In Android, SoC vendors are required to
+support these operations in their KeyMint implementation (a hardware abstraction
+layer in userspace); for details, see the `Android documentation
+<https://source.android.com/security/encryption/hw-wrapped-keys>`_.
+
+Testability
+-----------
+
+Both the hardware KDF and the inline encryption itself are well-defined
+algorithms that don't depend on any secrets other than the unwrapped key.
+Therefore, if the unwrapped key is known to software, these algorithms can be
+reproduced in software in order to verify the ciphertext that is written to disk
+by the inline encryption hardware.
+
+However, the unwrapped key will only be known to software for testing if the
+"import" functionality is used.  Proper testing is not possible in the
+"generate" case where the hardware generates the key itself.  The correct
+operation of the "generate" mode thus relies on the security and correctness of
+the hardware RNG and its use to generate the key, as well as the testing of the
+"import" mode as that should cover all parts other than the key generation.
+
+For an example of a test that verifies the ciphertext written to disk in the
+"import" mode, see `Android's vts_kernel_encryption_test
+<https://android.googlesource.com/platform/test/vts-testcase/kernel/+/refs/heads/master/encryption/>`_.
diff --git a/Documentation/device-mapper/dm-bow.txt b/Documentation/device-mapper/dm-bow.txt
new file mode 100644
index 0000000..e3fc4d2
--- /dev/null
+++ b/Documentation/device-mapper/dm-bow.txt
@@ -0,0 +1,99 @@
+dm_bow (backup on write)
+========================
+
+dm_bow is a device mapper driver that uses the free space on a device to back up
+data that is overwritten. The changes can then be committed by a simple state
+change, or rolled back by removing the dm_bow device and running a command line
+utility over the underlying device.
+
+dm_bow has three states, set by writing ‘1’ or ‘2’ to /sys/block/dm-?/bow/state.
+It is only possible to go from state 0 (initial state) to state 1, and then from
+state 1 to state 2.
+
+State 0: dm_bow collects all trims to the device and assumes that these mark
+free space on the overlying file system that can be safely used. Typically the
+mount code would create the dm_bow device, mount the file system, call the
+FITRIM ioctl on the file system then switch to state 1. These trims are not
+propagated to the underlying device.
+
+State 1: All writes to the device cause the underlying data to be backed up to
+the free (trimmed) area as needed in such a way as they can be restored.
+However, the writes, with one exception, then happen exactly as they would
+without dm_bow, so the device is always in a good final state. The exception is
+that sector 0 is used to keep a log of the latest changes, both to indicate that
+we are in this state and to allow rollback. See below for all details. If there
+isn't enough free space, writes are failed with -ENOSPC.
+
+State 2: The transition to state 2 triggers replacing the special sector 0 with
+the normal sector 0, and the freeing of all state information. dm_bow then
+becomes a pass-through driver, allowing the device to continue to be used with
+minimal performance impact.
+
+Usage
+=====
+dm-bow takes one command line parameter, the name of the underlying device.
+
+dm-bow will typically be used in the following way. dm-bow will be loaded with a
+suitable underlying device and the resultant device will be mounted. A file
+system trim will be issued via the FITRIM ioctl, then the device will be
+switched to state 1. The file system will now be used as normal. At some point,
+the changes can either be committed by switching to state 2, or rolled back by
+unmounting the file system, removing the dm-bow device and running the command
+line utility. Note that rebooting the device will be equivalent to unmounting
+and removing, but the command line utility must still be run
+
+Details of operation in state 1
+===============================
+
+dm_bow maintains a type for all sectors. A sector can be any of:
+
+SECTOR0
+SECTOR0_CURRENT
+UNCHANGED
+FREE
+CHANGED
+BACKUP
+
+SECTOR0 is the first sector on the device, and is used to hold the log of
+changes. This is the one exception.
+
+SECTOR0_CURRENT is a sector picked from the FREE sectors, and is where reads and
+writes from the true sector zero are redirected to. Note that like any backup
+sector, if the sector is written to directly, it must be moved again.
+
+UNCHANGED means that the sector has not been changed since we entered state 1.
+Thus if it is written to or trimmed, the contents must first be backed up.
+
+FREE means that the sector was trimmed in state 0 and has not yet been written
+to or used for backup. On being written to, a FREE sector is changed to CHANGED.
+
+CHANGED means that the sector has been modified, and can be further modified
+without further backup.
+
+BACKUP means that this is a free sector being used as a backup. On being written
+to, the contents must first be backed up again.
+
+All backup operations are logged to the first sector. The log sector has the
+format:
+--------------------------------------------------------
+| Magic | Count | Sequence | Log entry | Log entry | …
+--------------------------------------------------------
+
+Magic is a magic number. Count is the number of log entries. Sequence is 0
+initially. A log entry is
+
+-----------------------------------
+| Source | Dest | Size | Checksum |
+-----------------------------------
+
+When SECTOR0 is full, the log sector is backed up and another empty log sector
+created with sequence number one higher. The first entry in any log entry with
+sequence > 0 therefore must be the log of the backing up of the previous log
+sector. Note that sequence is not strictly needed, but is a useful sanity check
+and potentially limits the time spent trying to restore a corrupted snapshot.
+
+On entering state 1, dm_bow has a list of free sectors. All other sectors are
+unchanged. Sector0_current is selected from the free sectors and the contents of
+sector 0 are copied there. The sector 0 is backed up, which triggers the first
+log entry to be written.
+
diff --git a/Documentation/filesystems/OWNERS b/Documentation/filesystems/OWNERS
new file mode 100644
index 0000000..a63dbf4
--- /dev/null
+++ b/Documentation/filesystems/OWNERS
@@ -0,0 +1 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
diff --git a/Documentation/filesystems/incfs.rst b/Documentation/filesystems/incfs.rst
new file mode 100644
index 0000000..03ae39e
--- /dev/null
+++ b/Documentation/filesystems/incfs.rst
@@ -0,0 +1,82 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================================
+incfs: A stacked incremental filesystem for Linux
+=================================================
+
+/sys/fs interface
+=================
+
+Please update Documentation/ABI/testing/sys-fs-incfs if you update this
+section.
+
+incfs creates the following files in /sys/fs.
+
+Features
+--------
+
+/sys/fs/incremental-fs/features/corefs
+  Reads 'supported'. Always present.
+
+/sys/fs/incremental-fs/features/v2
+  Reads 'supported'. Present if all v2 features of incfs are supported. These
+  are:
+    fs-verity support
+    inotify support
+    ioclts:
+      INCFS_IOC_SET_READ_TIMEOUTS
+      INCFS_IOC_GET_READ_TIMEOUTS
+      INCFS_IOC_GET_BLOCK_COUNT
+      INCFS_IOC_CREATE_MAPPED_FILE
+    .incomplete folder
+    .blocks_written pseudo file
+    report_uid mount option
+
+/sys/fs/incremental-fs/features/zstd
+  Reads 'supported'. Present if zstd compression is supported for data blocks.
+
+Optional per mount
+------------------
+
+For each incfs mount, the mount option sysfs_name=[name] creates a /sys/fs
+node called:
+
+/sys/fs/incremental-fs/instances/[name]
+
+This will contain the following files:
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_min
+  Returns a count of the number of reads that were delayed as a result of the
+  per UID read timeouts min time setting.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
+  Returns total delay time for all files since first mount as a result of the
+  per UID read timeouts min time setting.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
+  Returns a count of the number of reads that were delayed as a result of
+  waiting for a pending read.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
+  Returns total delay time for all files since first mount as a result of
+  waiting for a pending read.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
+  Returns number of reads that failed because of hash verification failures.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_other
+  Returns number of reads that failed for reasons other than timing out or
+  hash failures.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
+  Returns number of reads that timed out.
+
+For reads_delayed_*** settings, note that a file can count for both
+reads_delayed_min and reads_delayed_pending if incfs first waits for a pending
+read then has to wait further for the min time. In that case, the time spent
+waiting is split between reads_delayed_pending_us, which is increased by the
+time spent waiting for the pending read, and reads_delayed_min_us, which is
+increased by the remainder of the time spent waiting.
+
+Reads that timed out are not added to the reads_delayed_pending or the
+reads_delayed_pending_us counters.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 8d7f141..34a7d1d 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -431,6 +431,8 @@
  [stack]                    the stack of the main process
  [vdso]                     the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]              an anonymous mapping that has been
+                            named by userspace
  =======                    ====================================
 
  or if empty, the mapping is anonymous.
@@ -464,6 +466,7 @@
     Locked:                0 kB
     THPeligible:           0
     VmFlags: rd ex mr mw me dw
+    Name: name from userspace
 
 The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  Following lines show the size of the mapping
@@ -561,6 +564,9 @@
 might change in future as well. So each consumer of these flags has to
 follow each specific kernel version for the exact semantic.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index a1f3eb7..e9b6fe2 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -21,6 +21,7 @@
 	   --- 4.1 Kernel Includes
 	   --- 4.2 Single Subdirectory
 	   --- 4.3 Several Subdirectories
+	   --- 4.4 UAPI Headers Installation
 	=== 5. Module Installation
 	   --- 5.1 INSTALL_MOD_PATH
 	   --- 5.2 INSTALL_MOD_DIR
@@ -131,6 +132,10 @@
 		/lib/modules/<kernel_release>/extra/, but a prefix may
 		be added with INSTALL_MOD_PATH (discussed in section 5).
 
+	headers_install
+		Export headers in a format suitable for userspace. The default
+		location is $PWD/usr. INSTALL_HDR_PATH can change this path.
+
 	clean
 		Remove all generated files in the module directory only.
 
@@ -406,6 +411,17 @@
 	pointing to the directory where the currently executing kbuild
 	file is located.
 
+4.4 UAPI Headers Installation
+-----------------------------
+
+	External modules may export headers to userspace in a similar
+	fashion to the in-tree counterpart drivers. kbuild supports
+	running headers_install target in an out-of-tree. The location
+	where kbuild searches for headers is $(M)/include/uapi and
+	$(M)/arch/$(SRCARCH)/include/uapi.
+
+	See also Documentation/kbuild/headers_install.rst.
+
 
 5. Module Installation
 ======================
diff --git a/Documentation/scheduler/sched-energy.rst b/Documentation/scheduler/sched-energy.rst
index 8fbce5e..83f1179 100644
--- a/Documentation/scheduler/sched-energy.rst
+++ b/Documentation/scheduler/sched-energy.rst
@@ -402,7 +402,7 @@
 because it is the only one providing some degree of consistency between
 frequency requests and energy predictions.
 
-Using EAS with any other governor than schedutil is not supported.
+Using EAS with any other governor than schedutil is not recommended.
 
 
 6.5 Scale-invariant utilization signals
diff --git a/Kconfig b/Kconfig
index 745bc77..57a142d 100644
--- a/Kconfig
+++ b/Kconfig
@@ -30,3 +30,6 @@
 source "lib/Kconfig.debug"
 
 source "Documentation/Kconfig"
+
+# ANDROID: Set KCONFIG_EXT_PREFIX to decend into an external project.
+source "$(KCONFIG_EXT_PREFIX)Kconfig.ext"
diff --git a/Kconfig.ext b/Kconfig.ext
new file mode 100644
index 0000000..48d805f
--- /dev/null
+++ b/Kconfig.ext
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# This file is intentionally empty. It's used as a placeholder for when
+# KCONFIG_EXT_PREFIX isn't defined.
diff --git a/MAINTAINERS b/MAINTAINERS
index fb18ce7..e0202ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9313,6 +9313,13 @@
 F:	drivers/hwmon/ina2xx.c
 F:	include/linux/platform_data/ina2xx.h
 
+INCREMENTAL FILE SYSTEM
+M:	Paul Lawrence <paullawrence@google.com>
+L:	linux-unionfs@vger.kernel.org
+S:	Supported
+F:	fs/incfs/
+F:	tools/testing/selftests/filesystems/incfs/
+
 INDUSTRY PACK SUBSYSTEM (IPACK)
 M:	Samuel Iglesias Gonsalvez <siglesias@igalia.com>
 M:	Jens Taprogge <jens.taprogge@taprogge.org>
diff --git a/Makefile b/Makefile
index 16d7f83..a8c5f1d 100644
--- a/Makefile
+++ b/Makefile
@@ -136,6 +136,24 @@
 
 export KBUILD_EXTMOD
 
+# ANDROID: set up mixed-build support. mixed-build allows device kernel modules
+# to be compiled against a GKI kernel. This approach still uses the headers and
+# Kbuild from device kernel, so care must be taken to ensure that those headers match.
+ifdef KBUILD_MIXED_TREE
+# Need vmlinux.symvers for modpost and System.map for depmod, check whether they exist in KBUILD_MIXED_TREE
+required_mixed_files=vmlinux.symvers System.map
+$(if $(filter-out $(words $(required_mixed_files)), \
+		$(words $(wildcard $(add-prefix $(KBUILD_MIXED_TREE)/,$(required_mixed_files))))),,\
+	$(error KBUILD_MIXED_TREE=$(KBUILD_MIXED_TREE) doesn't contain $(required_mixed_files)))
+endif
+
+mixed-build-prefix = $(if $(KBUILD_MIXED_TREE),$(KBUILD_MIXED_TREE)/)
+export KBUILD_MIXED_TREE
+# This is a hack for kleaf to set mixed-build-prefix within the execution of a make rule, e.g.
+# within __modinst_pre.
+# TODO(b/205893923): Revert this hack once it is properly handled.
+export mixed-build-prefix
+
 # Kbuild will save output files in the current working directory.
 # This does not need to match to the root of the kernel source tree.
 #
@@ -476,7 +494,7 @@
 KBZIP2		= bzip2
 KLZOP		= lzop
 LZMA		= lzma
-LZ4		= lz4c
+LZ4		= lz4
 XZ		= xz
 ZSTD		= zstd
 
@@ -671,11 +689,13 @@
 libs-y		:= lib/
 endif # KBUILD_EXTMOD
 
+ifndef KBUILD_MIXED_TREE
 # The all: target is the default when no target is given on the
 # command line.
 # This allow a user to issue only 'make' to build a kernel including modules
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
+endif
 
 CFLAGS_GCOV	:= -fprofile-arcs -ftest-coverage
 ifdef CONFIG_CC_IS_GCC
@@ -900,7 +920,13 @@
 else
 CC_FLAGS_LTO	:= -flto
 endif
+
+ifeq ($(SRCARCH),x86)
+# Workaround for compiler / linker bug
 CC_FLAGS_LTO	+= -fvisibility=hidden
+else
+CC_FLAGS_LTO	+= -fvisibility=default
+endif
 
 # Limit inlining across translation units to reduce binary size
 KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
@@ -1091,6 +1117,41 @@
 export MODORDER := $(extmod_prefix)modules.order
 export MODULES_NSDEPS := $(extmod_prefix)modules.nsdeps
 
+# ---------------------------------------------------------------------------
+# Kernel headers
+
+PHONY += headers
+
+#Default location for installed headers
+ifeq ($(KBUILD_EXTMOD),)
+PHONY += archheaders archscripts
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
+else
+hdr-prefix = $(KBUILD_EXTMOD)/
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst dst=$(KBUILD_EXTMOD)/usr/include objtree=$(objtree)/$(KBUILD_EXTMOD) obj
+endif
+
+export INSTALL_HDR_PATH = $(objtree)/$(hdr-prefix)usr
+
+quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
+      cmd_headers_install = \
+	mkdir -p $(INSTALL_HDR_PATH); \
+	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
+	$(hdr-prefix)usr/include $(INSTALL_HDR_PATH);
+
+PHONY += headers_install
+headers_install: headers
+	$(call cmd,headers_install)
+
+headers:
+ifeq ($(KBUILD_EXTMOD),)
+	$(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
+	  $(error Headers not exportable for the $(SRCARCH) architecture))
+endif
+	$(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)include/uapi
+	$(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)arch/$(SRCARCH)/include/uapi
+
 ifeq ($(KBUILD_EXTMOD),)
 core-y			+= kernel/ certs/ mm/ fs/ ipc/ security/ crypto/
 core-$(CONFIG_BLOCK)	+= block/
@@ -1157,8 +1218,10 @@
 	$(CONFIG_SHELL) $< "$(LD)" "$(KBUILD_LDFLAGS)" "$(LDFLAGS_vmlinux)";    \
 	$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
+ifndef KBUILD_MIXED_TREE
 vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE
 	+$(call if_changed_dep,link-vmlinux)
+endif
 
 targets := vmlinux
 
@@ -1167,7 +1230,8 @@
 $(sort $(vmlinux-deps) $(subdir-modorder)): descend ;
 
 filechk_kernel.release = \
-	echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+	echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion \
+		$(srctree) $(BRANCH) $(KMI_GENERATION))"
 
 # Store (new) KERNELRELEASE string in include/config/kernel.release
 include/config/kernel.release: FORCE
@@ -1257,33 +1321,6 @@
 	$(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \
 	$(srctree)/scripts/headerdep.pl -I$(srctree)/include
 
-# ---------------------------------------------------------------------------
-# Kernel headers
-
-#Default location for installed headers
-export INSTALL_HDR_PATH = $(objtree)/usr
-
-quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
-      cmd_headers_install = \
-	mkdir -p $(INSTALL_HDR_PATH); \
-	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
-	usr/include $(INSTALL_HDR_PATH)
-
-PHONY += headers_install
-headers_install: headers
-	$(call cmd,headers_install)
-
-PHONY += archheaders archscripts
-
-hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
-
-PHONY += headers
-headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
-	$(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
-	  $(error Headers not exportable for the $(SRCARCH) architecture))
-	$(Q)$(MAKE) $(hdr-inst)=include/uapi
-	$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
-
 # Deprecated. It is no-op now.
 PHONY += headers_check
 headers_check:
@@ -1369,7 +1406,9 @@
 # Devicetree files
 
 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
-dtstree := arch/$(SRCARCH)/boot/dts
+# ANDROID: allow this to be overridden by the build environment. This allows
+# one to compile a device tree that is located out-of-tree.
+dtstree ?= arch/$(SRCARCH)/boot/dts
 endif
 
 ifneq ($(dtstree),)
@@ -1435,7 +1474,9 @@
 # using awk while concatenating to the final file.
 
 PHONY += modules
-modules: $(if $(KBUILD_BUILTIN),vmlinux) modules_check modules_prepare
+# if KBUILD_BUILTIN && !KBUILD_MIXED_TREE, depend on vmlinux
+modules: $(if $(KBUILD_BUILTIN), $(if $(KBUILD_MIXED_TREE),,vmlinux))
+modules: modules_check modules_prepare
 
 cmd_modules_order = $(AWK) '!x[$$0]++' $(real-prereqs) > $@
 
@@ -1480,8 +1521,8 @@
 		ln -s $(CURDIR) $(MODLIB)/build ; \
 	fi
 	@sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order
-	@cp -f modules.builtin $(MODLIB)/
-	@cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
+	@cp -f $(mixed-build-prefix)modules.builtin $(MODLIB)/
+	@cp -f $(or $(mixed-build-prefix),$(objtree)/)modules.builtin.modinfo $(MODLIB)/
 
 endif # CONFIG_MODULES
 
@@ -1742,6 +1783,8 @@
 	@echo  ''
 	@echo  '  modules         - default target, build the module(s)'
 	@echo  '  modules_install - install the module'
+	@echo  '  headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'
+	@echo  '                    (default: $(abspath $(INSTALL_HDR_PATH)))'
 	@echo  '  clean           - remove generated files in module directory only'
 	@echo  ''
 
@@ -1766,7 +1809,7 @@
 
 quiet_cmd_depmod = DEPMOD  $(MODLIB)
       cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
-                   $(KERNELRELEASE)
+                   $(KERNELRELEASE) $(mixed-build-prefix)
 
 modules_install:
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
@@ -1845,7 +1888,7 @@
 $(build-dirs): prepare
 	$(Q)$(MAKE) $(build)=$@ \
 	single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \
-	need-builtin=1 need-modorder=1
+	$(if $(KBUILD_MIXED_TREE),,need-builtin=1) need-modorder=1
 
 clean-dirs := $(addprefix _clean_, $(clean-dirs))
 PHONY += $(clean-dirs) clean
@@ -1950,7 +1993,8 @@
 	$(PERL) $(srctree)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
 
 kernelrelease:
-	@echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+	@echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion \
+		$(srctree) $(BRANCH) $(KMI_GENERATION))"
 
 kernelversion:
 	@echo $(KERNELVERSION)
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..7dfe6c1
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,12 @@
+# The full list of approvers is defined in
+# https://android.googlesource.com/kernel/common/+/refs/meta/config/OWNERS
+
+# The following OWNERS are defined at the top level to improve the OWNERS
+# suggestions through any user interface. Consider those people the ones that
+# can help with finding the best person to review.
+adelva@google.com
+gregkh@google.com
+maennich@google.com
+saravanak@google.com
+surenb@google.com
+tkjos@google.com
diff --git a/OWNERS_DrNo b/OWNERS_DrNo
new file mode 100644
index 0000000..5224e3f
--- /dev/null
+++ b/OWNERS_DrNo
@@ -0,0 +1,23 @@
+# Authoritative list of Dr. No reviewers to approve changes on GKI release
+# branches, such as android12-5.10.
+#
+# This file has no effect in this branch, but is referred to from release
+# branches. So, please do not move or rename.
+#
+# See the GKI release documentation (go/gki-dr-no) for further details.
+
+# Main reviewers
+adelva@google.com
+maennich@google.com
+saravanak@google.com
+smuckle@google.com
+tkjos@google.com
+willdeacon@google.com
+
+# GKI Release Team
+howardsoc@google.com #{LAST_RESORT_SUGGESTION}
+szuweilin@google.com #{LAST_RESORT_SUGGESTION}
+
+# Backup
+sspatil@google.com #{LAST_RESORT_SUGGESTION}
+malchev@google.com #{LAST_RESORT_SUGGESTION}
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..4a1deb3
--- /dev/null
+++ b/README.md
@@ -0,0 +1,150 @@
+# How do I submit patches to Android Common Kernels
+
+1. BEST: Make all of your changes to upstream Linux. If appropriate, backport to the stable releases.
+   These patches will be merged automatically in the corresponding common kernels. If the patch is already
+   in upstream Linux, post a backport of the patch that conforms to the patch requirements below.
+   - Do not send patches upstream that contain only symbol exports. To be considered for upstream Linux,
+additions of `EXPORT_SYMBOL_GPL()` require an in-tree modular driver that uses the symbol -- so include
+the new driver or changes to an existing driver in the same patchset as the export.
+   - When sending patches upstream, the commit message must contain a clear case for why the patch
+is needed and beneficial to the community. Enabling out-of-tree drivers or functionality is not
+not a persuasive case.
+
+2. LESS GOOD: Develop your patches out-of-tree (from an upstream Linux point-of-view). Unless these are
+   fixing an Android-specific bug, these are very unlikely to be accepted unless they have been
+   coordinated with kernel-team@android.com. If you want to proceed, post a patch that conforms to the
+   patch requirements below.
+
+# Common Kernel patch requirements
+
+- All patches must conform to the Linux kernel coding standards and pass `scripts/checkpatch.pl`
+- Patches shall not break gki_defconfig or allmodconfig builds for arm, arm64, x86, x86_64 architectures
+(see  https://source.android.com/setup/build/building-kernels)
+- If the patch is not merged from an upstream branch, the subject must be tagged with the type of patch:
+`UPSTREAM:`, `BACKPORT:`, `FROMGIT:`, `FROMLIST:`, or `ANDROID:`.
+- All patches must have a `Change-Id:` tag (see https://gerrit-review.googlesource.com/Documentation/user-changeid.html)
+- If an Android bug has been assigned, there must be a `Bug:` tag.
+- All patches must have a `Signed-off-by:` tag by the author and the submitter
+
+Additional requirements are listed below based on patch type
+
+## Requirements for backports from mainline Linux: `UPSTREAM:`, `BACKPORT:`
+
+- If the patch is a cherry-pick from Linux mainline with no changes at all
+    - tag the patch subject with `UPSTREAM:`.
+    - add upstream commit information with a `(cherry picked from commit ...)` line
+    - Example:
+        - if the upstream commit message is
+```
+        important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+        UPSTREAM: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch requires any changes from the upstream version, tag the patch with `BACKPORT:`
+instead of `UPSTREAM:`.
+    - use the same tags as `UPSTREAM:`
+    - add comments about the changes under the `(cherry picked from commit ...)` line
+    - Example:
+```
+        BACKPORT: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+        [joe: Resolved minor conflict in drivers/foo/bar.c ]
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+## Requirements for other backports: `FROMGIT:`, `FROMLIST:`,
+
+- If the patch has been merged into an upstream maintainer tree, but has not yet
+been merged into Linux mainline
+    - tag the patch subject with `FROMGIT:`
+    - add info on where the patch came from as `(cherry picked from commit <sha1> <repo> <branch>)`. This
+must be a stable maintainer branch (not rebased, so don't use `linux-next` for example).
+    - if changes were required, use `BACKPORT: FROMGIT:`
+    - Example:
+        - if the commit message in the maintainer tree is
+```
+        important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+        FROMGIT: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        (cherry picked from commit 878a2fd9de10b03d11d2f622250285c7e63deace
+         https://git.kernel.org/pub/scm/linux/kernel/git/foo/bar.git test-branch)
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+
+- If the patch has been submitted to LKML, but not accepted into any maintainer tree
+    - tag the patch subject with `FROMLIST:`
+    - add a `Link:` tag with a link to the submittal on lore.kernel.org
+    - add a `Bug:` tag with the Android bug (required for patches not accepted into
+a maintainer tree)
+    - if changes were required, use `BACKPORT: FROMLIST:`
+    - Example:
+```
+        FROMLIST: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Link: https://lore.kernel.org/lkml/20190619171517.GA17557@someone.com/
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+## Requirements for Android-specific patches: `ANDROID:`
+
+- If the patch is fixing a bug to Android-specific code
+    - tag the patch subject with `ANDROID:`
+    - add a `Fixes:` tag that cites the patch with the bug
+    - Example:
+```
+        ANDROID: fix android-specific bug in foobar.c
+
+        This is the detailed description of the important fix
+
+        Fixes: 1234abcd2468 ("foobar: add cool feature")
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch is a new feature
+    - tag the patch subject with `ANDROID:`
+    - add a `Bug:` tag with the Android bug (required for android-specific features)
+
diff --git a/arch/arm/OWNERS b/arch/arm/OWNERS
new file mode 100644
index 0000000..54f66d6
--- /dev/null
+++ b/arch/arm/OWNERS
@@ -0,0 +1 @@
+include ../arm64/OWNERS
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index cde5b6d..25bfa9a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -51,6 +51,10 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c4207cf..0604a71 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1982,6 +1982,12 @@
 	  the boot loader doesn't provide any, the default kernel command
 	  string provided in CMDLINE will be used.
 
+config CMDLINE_EXTEND
+	bool "Extend bootloader kernel arguments"
+	help
+	  The command-line arguments provided by the boot loader will be
+	  appended to the default kernel command string.
+
 config CMDLINE_FORCE
 	bool "Always use the default kernel command string"
 	help
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index e8cfc58..0fd75cc 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -148,7 +148,10 @@
 boot		:= arch/arm64/boot
 KBUILD_IMAGE	:= $(boot)/Image.gz
 
+# Don't compile Image in mixed build with "all" target
+ifndef KBUILD_MIXED_TREE
 all:	Image.gz
+endif
 
 
 Image: vmlinux
diff --git a/arch/arm64/OWNERS b/arch/arm64/OWNERS
new file mode 100644
index 0000000..f362e24
--- /dev/null
+++ b/arch/arm64/OWNERS
@@ -0,0 +1,4 @@
+per-file crypto/**=file:/crypto/OWNERS
+per-file {include,kernel,kvm,lib}/**=mzyngier@google.com,willdeacon@google.com
+per-file mm/**=file:/mm/OWNERS
+per-file net/**=file:/net/OWNERS
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 5148cd9..04fbcce 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -3,12 +3,14 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j100.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-radxa-zero.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510-android.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gsking-x.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking-pro.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3-android.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-s922x-khadas-vim3.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-odroid-n2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12b-odroid-n2-plus.dtb
@@ -53,7 +55,9 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-gxm-wetek-core2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-bananapi-m5.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l-android.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-hc4.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610-android.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts
new file mode 100644
index 0000000..2f89a01
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510-android.dts
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+  compatible = "seirobotics,sei510", "amlogic,g12a";
+  model = "SEI Robotics SEI510";
+  fragment@101 {
+        target-path = "/";
+
+        __overlay__ {
+                reserved-memory {
+                        #address-cells = <2>;
+                        #size-cells = <2>;
+                        ramoops@d000000 {
+                                compatible = "ramoops";
+                                reg = <0x0 0x0d000000 0x0 0x00100000>;
+                                record-size = <0x8000>;
+                                console-size = <0x8000>;
+                                ftrace-size = <0x0>;
+                                pmsg-size = <0x8000>;
+                        };
+                };
+
+                adc_keys {
+                        button-onoff {
+                                linux,code = <BTN_0>;
+                        };
+                };
+
+                cvbs-connector {
+                        status = "disabled";
+                };
+        };
+  };
+};
+
+&vddao_3v3_t {
+	gpio-open-drain;
+};
+
+&uart_A {
+	bluetooth {
+        interrupt-parent = <&gpio_intc>;
+        interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "host-wakeup";
+    };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3-android.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3-android.dts
new file mode 100644
index 0000000..3ab19e2
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3-android.dts
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+  compatible = "khadas,vim3", "amlogic,a311d", "amlogic,g12b";
+  model = "Khadas VIM3";
+  fragment@101 {
+        target-path = "/";
+        __overlay__ {
+                reserved-memory {
+                        #address-cells = <2>;
+                        #size-cells = <2>;
+                        ramoops@d000000 {
+                                compatible = "ramoops";
+                                reg = <0x0 0x0d000000 0x0 0x00100000>;
+                                record-size = <0x8000>;
+                                console-size = <0x8000>;
+                                ftrace-size = <0x0>;
+                                pmsg-size = <0x8000>;
+                        };
+                };
+
+        };
+  };
+};
+
+&vcc_5v {
+	gpio-open-drain;
+};
+
+&uart_C {
+        status = "okay";
+        pinctrl-0 = <&uart_c_pins>;
+        pinctrl-names = "default";
+};
+
+&emmc_pwrseq{
+	status = "okay";
+};
+
+&sd_emmc_a {
+     /* WiFi firmware requires power to be kept while in suspend */
+    keep-power-in-suspend;
+};
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts
new file mode 100644
index 0000000..4b06bf2
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l-android.dts
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+  compatible = "khadas,vim3l", "amlogic,sm1";
+  model = "Khadas VIM3L";
+  fragment@101 {
+        target-path = "/";
+        __overlay__ {
+                reserved-memory {
+                        #address-cells = <2>;
+                        #size-cells = <2>;
+                        ramoops@d000000 {
+                                compatible = "ramoops";
+                                reg = <0x0 0x0d000000 0x0 0x00100000>;
+                                record-size = <0x8000>;
+                                console-size = <0x8000>;
+                                ftrace-size = <0x0>;
+                                pmsg-size = <0x8000>;
+                        };
+                };
+        };
+  };
+};
+
+&vcc_5v {
+    gpio-open-drain;
+};
+
+&uart_A {
+	bluetooth {
+        interrupt-parent = <&gpio_intc>;
+        interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "host-wakeup";
+    };
+};
+
+&uart_C {
+        status = "disabled";
+        pinctrl-0 = <&uart_c_pins>;
+        pinctrl-names = "default";
+};
+
+&emmc_pwrseq{
+	status = "okay";
+};
+
+&sd_emmc_a {
+     /* WiFi firmware requires power to be kept while in suspend */
+    keep-power-in-suspend;
+};
+
+&spicc1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&spicc1_pins>;
+	cs-gpios = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	spidev@0 {
+		compatible = "rohm,dh2228fv";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+		status = "okay";
+	};
+
+	neonkey@0 {
+		compatible = "nanohub";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+
+		sensorhub,nreset-gpio = <&gpio GPIOA_0 0>;
+		sensorhub,boot0-gpio = <&gpio GPIOA_3 0>;   /* Fake */
+		sensorhub,wakeup-gpio = <&gpio GPIOA_2 0>;  /* A2 -> PB9 */
+		sensorhub,irq1-gpio = <&gpio GPIOA_1 0>;    /* A1 -> PB5 */
+		interrupt-parent = <&gpio_intc>;
+		interrupts = <62 IRQ_TYPE_EDGE_RISING>;     /* A1 */
+		/* sensorhub,spi-cs-gpio = <&gpio GPIOH_6 GPIO_ACTIVE_LOW>; Optional */
+		sensorhub,bl-addr = <0x08000000>;
+		sensorhub,kernel-addr = <0x0800C000>;
+		sensorhub,shared-addr = <0x08040000>;
+		sensorhub,flash-banks = <0 0x08000000 0x04000>,
+					<3 0x0800C000 0x04000>,
+					<4 0x08010000 0x10000>,
+					<5 0x08020000 0x20000>,
+					<6 0x08040000 0x20000>,
+					<7 0x08060000 0x20000>;
+		sensorhub,num-flash-banks = <6>;
+		status = "disabled";
+	};
+
+	argonkey@0 {
+		compatible = "nanohub";
+		reg = <0>;
+		spi-max-frequency = <500000>;
+		spi-cpol;
+
+		sensorhub,nreset-gpio = <&gpio GPIOA_0 0>;
+		sensorhub,boot0-gpio = <&gpio GPIOA_3 0>;
+		sensorhub,wakeup-gpio = <&gpio GPIOA_1 0>;  /* A1 -> PA0 */
+		sensorhub,irq1-gpio = <&gpio GPIOA_2 0>;    /* A2 -> PA1 */
+		interrupt-parent = <&gpio_intc>;
+		interrupts = <63 IRQ_TYPE_EDGE_RISING>;     /* A2 */
+		sensorhub,bl-addr = <0x08000000>;
+		sensorhub,kernel-addr = <0x0800C000>;
+		sensorhub,num-flash-banks = <4>;
+		sensorhub,flash-banks =	<0 0x08000000 0x04000>,
+					<3 0x0800C000 0x04000>,
+					<4 0x08010000 0x10000>,
+					<5 0x08020000 0x20000>;
+		sensorhub,shared-addr = <0x08040000>;
+		sensorhub,num-shared-flash-banks = <6>;
+		sensorhub,shared-flash-banks = <6 0x08040000 0x20000>,
+					<7 0x08060000 0x20000>,
+					<8 0x08080000 0x20000>,
+					<9 0x080A0000 0x20000>,
+					<10 0x080C0000 0x20000>,
+					<11 0x080E0000 0x20000>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610-android.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610-android.dts
new file mode 100644
index 0000000..363d6c4
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610-android.dts
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+  compatible = "seirobotics,sei610", "amlogic,sm1";
+  model = "SEI Robotics SEI610";
+  fragment@101 {
+	target-path = "/";
+	__overlay__ {
+
+		reserved-memory {
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ramoops@d000000 {
+				compatible = "ramoops";
+				reg = <0x0 0x0d000000 0x0 0x00100000>;
+				record-size = <0x8000>;
+				console-size = <0x8000>;
+				ftrace-size = <0x0>;
+				pmsg-size = <0x8000>;
+			};
+		};
+	};
+   };
+};
+
+&vddao_3v3_t {
+	gpio-open-drain;
+};
+
+&emmc_pwrseq {
+	status = "okay";
+};
+
+&sd_emmc_a {
+	/* WiFi firmware requires power to be kept while in suspend */
+	keep-power-in-suspend;
+};
+
+&uart_C {
+        status = "disabled";
+        pinctrl-0 = <&uart_c_pins>;
+        pinctrl-names = "default";
+};
+
+&spicc0 {
+        status = "disabled";
+        pinctrl-names = "default";
+        pinctrl-0 = <&spicc0_x_pins>;
+        cs-gpios = <&gpio GPIOX_10 GPIO_ACTIVE_LOW>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        spidev@0 {
+            compatible = "rohm,dh2228fv";
+            reg = <0>;
+            spi-max-frequency = <500000>;
+            status = "disabled";
+        };
+};
diff --git a/arch/arm64/configs/amlogic_gki.fragment b/arch/arm64/configs/amlogic_gki.fragment
new file mode 100644
index 0000000..e577f55
--- /dev/null
+++ b/arch/arm64/configs/amlogic_gki.fragment
@@ -0,0 +1,142 @@
+#
+# Generic drivers/frameworks
+#
+CONFIG_COMMON_CLK_PWM=m
+CONFIG_REGULATOR_PWM=m
+CONFIG_PWRSEQ_EMMC=m
+CONFIG_PWRSEQ_SIMPLE=m
+CONFIG_USB_DWC2=m
+CONFIG_LEDS_GPIO=m
+
+#
+# Networking
+#
+CONFIG_REALTEK_PHY=m
+CONFIG_STMMAC_ETH=m
+CONFIG_STMMAC_PLATFORM=m
+
+#
+# WLAN
+#
+CONFIG_WLAN_VENDOR_BROADCOM=y
+CONFIG_BRCMUTIL=m
+CONFIG_BRCMFMAC=m
+CONFIG_BRCMFMAC_PROTO_BCDC=y
+CONFIG_BRCMFMAC_SDIO=y
+
+#
+# Amlogic
+#
+CONFIG_ARCH_MESON=y
+CONFIG_SERIAL_MESON=m
+CONFIG_SERIAL_MESON_CONSOLE=y
+
+#
+# Amlogic drivers as modules
+#
+
+# core
+CONFIG_MESON_SM=m
+CONFIG_RESET_MESON=m
+CONFIG_MESON_IRQ_GPIO=m
+
+# clocks
+CONFIG_COMMON_CLK_MESON_REGMAP=m
+CONFIG_COMMON_CLK_MESON_DUALDIV=m
+CONFIG_COMMON_CLK_MESON_MPLL=m
+CONFIG_COMMON_CLK_MESON_PHASE=m
+CONFIG_COMMON_CLK_MESON_PLL=m
+CONFIG_COMMON_CLK_MESON_SCLK_DIV=m
+CONFIG_COMMON_CLK_MESON_VID_PLL_DIV=m
+CONFIG_COMMON_CLK_MESON_AO_CLKC=m
+CONFIG_COMMON_CLK_MESON_EE_CLKC=m
+CONFIG_COMMON_CLK_MESON_CPU_DYNDIV=m
+CONFIG_COMMON_CLK_GXBB=m
+CONFIG_COMMON_CLK_AXG=m
+CONFIG_COMMON_CLK_G12A=m
+
+# PHY
+CONFIG_PHY_MESON8B_USB2=m
+CONFIG_PHY_MESON_GXL_USB2=m
+CONFIG_PHY_MESON_G12A_USB2=m
+CONFIG_PHY_MESON_G12A_USB3_PCIE=m
+CONFIG_PHY_MESON_AXG_PCIE=m
+CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG=m
+
+# peripherals
+CONFIG_I2C_MESON=m
+CONFIG_MMC_MESON_GX=m
+CONFIG_HW_RANDOM_MESON=m
+CONFIG_USB_DWC3_MESON_G12A=m
+CONFIG_MESON_SARADC=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_PCI_MESON=m
+CONFIG_DWMAC_MESON=m
+CONFIG_MDIO_BUS_MUX_MESON_G12A=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_PINCTRL_MESON=m
+CONFIG_PINCTRL_MESON_GXBB=m
+CONFIG_PINCTRL_MESON_GXL=m
+CONFIG_PINCTRL_MESON_AXG=m
+CONFIG_PINCTRL_MESON_AXG_PMX=m
+CONFIG_PINCTRL_MESON_G12A=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_MTD_NAND_MESON=m
+CONFIG_PWM_MESON=m
+CONFIG_IR_MESON=m
+CONFIG_MESON_EFUSE=m
+CONFIG_MFD_KHADAS_MCU=m
+CONFIG_KHADAS_MCU_FAN_THERMAL=m
+CONFIG_AMLOGIC_THERMAL=m
+
+# sound
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
+CONFIG_SND_MESON_GX_SOUND_CARD=m
+CONFIG_SND_MESON_G12A_TOHDMITX=m
+
+# display / video
+CONFIG_DRM_MESON=m
+CONFIG_DRM_MESON_DW_HDMI=m
+CONFIG_DRM_DW_HDMI=m
+CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
+CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
+CONFIG_DRM_DW_HDMI_CEC=m
+CONFIG_CEC_MESON_AO=m
+CONFIG_CEC_MESON_G12A_AO=m
+CONFIG_VIDEO_MESON_GE2D=m
+
+# SoC drivers
+CONFIG_MESON_CANVAS=m
+CONFIG_MESON_CLK_MEASURE=m
+CONFIG_MESON_GX_PM_DOMAINS=m
+CONFIG_MESON_EE_PM_DOMAINS=m
+CONFIG_MESON_SECURE_PM_DOMAINS=m
+
+#
+# Amlogic drivers disable
+#
+
+# 32-bit SoC drivers
+CONFIG_MESON6_TIMER=n
+CONFIG_MESON_MX_SOCINFO=n
+
+# only needed by DRM on S805X
+CONFIG_MESON_GX_SOCINFO=n
+
+#
+# Debug / Testing
+#
+
+# devtmpfs needed for buildroot/udev module loading, serial console
+#CONFIG_DEVTMPFS=y
+#CONFIG_DEVTMPFS_MOUNT=y
+
+# debug/testing with FB console
+#CONFIG_DRM_KMS_FB_HELPER=y
+#CONFIG_DRM_FBDEV_EMULATION=y
+#CONFIG_FB=y
+#CONFIG_VT=y
+#CONFIG_FRAMEBUFFER_CONSOLE=y
+#CONFIG_LOGO=y
diff --git a/arch/arm64/configs/db845c_gki.fragment b/arch/arm64/configs/db845c_gki.fragment
new file mode 100644
index 0000000..53032eb
--- /dev/null
+++ b/arch/arm64/configs/db845c_gki.fragment
@@ -0,0 +1,304 @@
+CONFIG_QRTR=m
+CONFIG_QRTR_TUN=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_USB_NET_AX88179_178A=m
+CONFIG_INPUT_PM8941_PWRKEY=m
+CONFIG_SERIAL_MSM=m
+CONFIG_I2C_QCOM_GENI=m
+CONFIG_I2C_QUP=m
+CONFIG_PINCTRL_MSM=m
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
+CONFIG_PINCTRL_SDM845=m
+CONFIG_POWER_RESET_QCOM_PON=m
+CONFIG_SYSCON_REBOOT_MODE=m
+CONFIG_QCOM_TSENS=m
+CONFIG_QCOM_WDT=m
+CONFIG_PM8916_WATCHDOG=m
+CONFIG_MFD_SPMI_PMIC=m
+CONFIG_SPMI_MSM_PMIC_ARB=m
+CONFIG_REGULATOR_QCOM_RPMH=m
+CONFIG_REGULATOR_QCOM_SPMI=m
+CONFIG_DRM_MSM=m
+# CONFIG_DRM_MSM_DSI_28NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_20NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_28NM_8960_PHY is not set
+CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+# CONFIG_USB_DWC3_HAPS is not set
+# CONFIG_USB_DWC3_OF_SIMPLE is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_ROLE_SWITCH=m
+CONFIG_USB_ULPI_BUS=m
+CONFIG_MMC_SDHCI_MSM=m
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_COMMON_CLK_QCOM=m
+CONFIG_SDM_GPUCC_845=m
+CONFIG_QCOM_CLK_RPMH=m
+CONFIG_SDM_DISPCC_845=m
+CONFIG_HWSPINLOCK_QCOM=m
+CONFIG_QCOM_LLCC=m
+CONFIG_QCOM_RMTFS_MEM=m
+CONFIG_QCOM_SMEM=m
+CONFIG_QCOM_SMSM=m
+CONFIG_EXTCON_USB_GPIO=m
+CONFIG_RESET_QCOM_AOSS=m
+CONFIG_RESET_QCOM_PDC=m
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_QCOM_QUSB2=m
+CONFIG_PHY_QCOM_USB_HS=m
+CONFIG_QCOM_QFPROM=m
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=m
+CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_QCOM_RPMH=m
+CONFIG_QCOM_RPMHPD=m
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH10K_AHB=y
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_SNOC=m
+CONFIG_QRTR_SMD=m
+CONFIG_QCOM_FASTRPC=m
+CONFIG_QCOM_APCS_IPC=m
+CONFIG_QCOM_Q6V5_COMMON=m
+CONFIG_QCOM_RPROC_COMMON=m
+CONFIG_QCOM_Q6V5_ADSP=m
+CONFIG_QCOM_Q6V5_MSS=m
+CONFIG_QCOM_Q6V5_PAS=m
+CONFIG_QCOM_Q6V5_WCSS=m
+CONFIG_QCOM_SYSMON=m
+CONFIG_RPMSG_QCOM_GLINK_SMEM=m
+CONFIG_RPMSG_QCOM_SMD=m
+CONFIG_QCOM_AOSS_QMP=m
+CONFIG_QCOM_SMP2P=m
+CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_APR=m
+CONFIG_QCOM_GLINK_SSR=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=m
+CONFIG_QCOM_PDC=m
+CONFIG_QCOM_SCM=m
+CONFIG_ARM_SMMU=m
+CONFIG_ARM_QCOM_CPUFREQ_HW=m
+# XXX Audio bits start here
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_MUX=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+CONFIG_I2C_RK3X=m
+CONFIG_SPI_PL022=m
+CONFIG_SPI_QCOM_QSPI=m
+CONFIG_SPI_QUP=m
+CONFIG_SPI_QCOM_GENI=m
+CONFIG_GPIO_WCD934X=m
+CONFIG_MFD_WCD934X=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_SND_SOC_QCOM=m
+CONFIG_SND_SOC_QCOM_COMMON=m
+CONFIG_SND_SOC_QDSP6_COMMON=m
+CONFIG_SND_SOC_QDSP6_CORE=m
+CONFIG_SND_SOC_QDSP6_AFE=m
+CONFIG_SND_SOC_QDSP6_AFE_DAI=m
+CONFIG_SND_SOC_QDSP6_ADM=m
+CONFIG_SND_SOC_QDSP6_ROUTING=m
+CONFIG_SND_SOC_QDSP6_ASM=m
+CONFIG_SND_SOC_QDSP6_ASM_DAI=m
+CONFIG_SND_SOC_QDSP6=m
+CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_DMIC=m
+CONFIG_SND_SOC_WCD9335=m
+CONFIG_SND_SOC_WCD934X=m
+CONFIG_SND_SOC_WSA881X=m
+CONFIG_QCOM_BAM_DMA=m
+CONFIG_SPMI_PMIC_CLKDIV=m
+CONFIG_SOUNDWIRE=m
+CONFIG_SOUNDWIRE_QCOM=m
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_NGD_CTRL=m
+CONFIG_DMABUF_HEAPS_SYSTEM=m
+CONFIG_VIDEO_QCOM_VENUS=m
+CONFIG_SDM_VIDEOCC_845=m
+# CONFIG_CXD2880_SPI_DRV is not set
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MSI001 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_E4000 is not set
+# CONFIG_MEDIA_TUNER_FC2580 is not set
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+# CONFIG_MEDIA_TUNER_IT913X is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV0910 is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_STV6111 is not set
+# CONFIG_DVB_MXL5XX is not set
+# CONFIG_DVB_M88DS3103 is not set
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_MN88472 is not set
+# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_SP8870 is not set
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_S5H1432 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_NXT6000 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+# CONFIG_DVB_DIB7000M is not set
+# CONFIG_DVB_DIB7000P is not set
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_AF9013 is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_RTL2830 is not set
+# CONFIG_DVB_RTL2832 is not set
+# CONFIG_DVB_RTL2832_SDR is not set
+# CONFIG_DVB_SI2168 is not set
+# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_CXD2880 is not set
+# CONFIG_DVB_VES1820 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LGDT3305 is not set
+# CONFIG_DVB_LGDT3306A is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_AU8522_V4L is not set
+# CONFIG_DVB_S5H1411 is not set
+# CONFIG_DVB_S921 is not set
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_TC90522 is not set
+# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_PLL is not set
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+# CONFIG_DVB_DRX39XYJ is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_AF9033 is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_CXD2099 is not set
+# CONFIG_DVB_SP2 is not set
+CONFIG_QCOM_COMMAND_DB=m
+CONFIG_QCOM_LMH=m
+# XXX RB5 bits start here
+CONFIG_QCOM_IPCC=m
+CONFIG_QCOM_SPMI_ADC5=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_RPMSG_NS=m
+CONFIG_CAN_MCP251XFD=m
+CONFIG_ATH11K=m
+CONFIG_ATH11K_AHB=m
+CONFIG_ATH11K_PCI=m
+CONFIG_PINCTRL_SM8250=m
+CONFIG_PINCTRL_LPASS_LPI=m
+CONFIG_QCOM_SPMI_ADC_TM5=m
+CONFIG_MFD_QCOM_QCA639X=m
+CONFIG_REGULATOR_QCOM_USB_VBUS=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_LONTIUM_LT9611UXC=m
+CONFIG_SND_SOC_SM8250=m
+CONFIG_SND_SOC_LPASS_WSA_MACRO=m
+CONFIG_SND_SOC_LPASS_VA_MACRO=m
+CONFIG_TYPEC_QCOM_PMIC=m
+CONFIG_LEDS_QCOM_LPG=m
+CONFIG_SM_GPUCC_8250=m
+CONFIG_SM_DISPCC_8250=m
+CONFIG_SM_VIDEOCC_8250=m
+CONFIG_CLK_GFM_LPASS_SM8250=m
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=m
+CONFIG_INTERCONNECT_QCOM_SM8250=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_QCOM_CPR=m
+CONFIG_QCOM_SPM=m
diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
new file mode 100644
index 0000000..1b5b996
--- /dev/null
+++ b/arch/arm64/configs/gki_defconfig
@@ -0,0 +1,681 @@
+CONFIG_LOCALVERSION="-mainline"
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_UCLAMP_TASK=y
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_RT_SOFTINT_OPTIMIZATION=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_USERFAULTFD=y
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_QCOM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=32
+CONFIG_PARAVIRT=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_COMPAT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure"
+CONFIG_CMDLINE_EXTEND=y
+# CONFIG_DMI is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_IDLE_GOV_TEO=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_PSCI_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_SCMI_CPUFREQ=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_SHADOW_CALL_STACK=y
+CONFIG_LTO_CLANG_FULL=y
+CONFIG_CFI_CLANG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SCMVERSION=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=16
+# CONFIG_ZONE_DMA is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_6LOWPAN=y
+CONFIG_IEEE802154=y
+CONFIG_IEEE802154_6LOWPAN=y
+CONFIG_MAC802154=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_SFQ=y
+CONFIG_NET_SCH_TBF=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_CODEL=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_BASIC=y
+CONFIG_NET_CLS_TCINDEX=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_MATCHALL=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_VSOCKETS=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_NFC=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+# CONFIG_SUN50I_DE2_BUS is not set
+# CONFIG_SUNXI_RSB is not set
+CONFIG_ARM_SCMI_PROTOCOL=y
+# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
+CONFIG_ARM_SCPI_PROTOCOL=y
+# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
+# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
+CONFIG_GNSS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SRAM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_SCSI_UFS_HISI=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_IFB=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=y
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_AQC111=y
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_EXAR is not set
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_SPRD=y
+CONFIG_SERIAL_SPRD_CONSOLE=y
+CONFIG_HVC_DCC=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_CAVIUM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I3C=y
+CONFIG_SPI=y
+CONFIG_SPMI=y
+# CONFIG_SPMI_MSM_PMIC_ARB is not set
+# CONFIG_PINCTRL_SUN8I_H3_R is not set
+# CONFIG_PINCTRL_SUN50I_A64 is not set
+# CONFIG_PINCTRL_SUN50I_A64_R is not set
+# CONFIG_PINCTRL_SUN50I_H5 is not set
+# CONFIG_PINCTRL_SUN50I_H6 is not set
+# CONFIG_PINCTRL_SUN50I_H6_R is not set
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_POWER_RESET_HISI=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_ACT8945A=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_PLAYSTATION=y
+CONFIG_PLAYSTATION_FF=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_CRYPTO=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_CLASS_MULTICOLOR=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL030=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_UIO=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_DEBUG_KINFO=y
+CONFIG_COMMON_CLK_SCPI=y
+# CONFIG_CLK_SUNXI is not set
+# CONFIG_SUNXI_CCU is not set
+CONFIG_HWSPINLOCK=y
+# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set
+CONFIG_MAILBOX=y
+CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_LIBNVDIMM=y
+# CONFIG_ND_BLK is not set
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_EXFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_EROFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_XZ_DEC=y
+CONFIG_DMA_CMA=y
+CONFIG_STACK_HASH_ORDER=12
+CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+CONFIG_UBSAN_LOCAL_BOUNDS=y
+# CONFIG_UBSAN_SHIFT is not set
+# CONFIG_UBSAN_OBJECT_SIZE is not set
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_KASAN=y
+CONFIG_KASAN_HW_TAGS=y
+CONFIG_KFENCE=y
+CONFIG_KFENCE_SAMPLE_INTERVAL=500
+CONFIG_KFENCE_NUM_OBJECTS=63
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_KUNIT=y
+CONFIG_KUNIT_DEBUGFS=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/arm64/configs/rockpi4_gki.fragment b/arch/arm64/configs/rockpi4_gki.fragment
new file mode 100644
index 0000000..2af01b8
--- /dev/null
+++ b/arch/arm64/configs/rockpi4_gki.fragment
@@ -0,0 +1,82 @@
+# Core features
+CONFIG_ARCH_ROCKCHIP=y
+# CONFIG_CLK_PX30 is not set
+# CONFIG_CLK_RV110X is not set
+# CONFIG_CLK_RK3036 is not set
+# CONFIG_CLK_RK312X is not set
+# CONFIG_CLK_RK3188 is not set
+# CONFIG_CLK_RK322X is not set
+# CONFIG_CLK_RK3288 is not set
+# CONFIG_CLK_RK3308 is not set
+# CONFIG_CLK_RK3328 is not set
+# CONFIG_CLK_RK3368 is not set
+CONFIG_COMMON_CLK_RK808=m
+CONFIG_CPUFREQ_DT=m
+CONFIG_MFD_RK808=m
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PL330_DMA=m
+CONFIG_PWM_ROCKCHIP=m
+CONFIG_PWRSEQ_SIMPLE=m
+CONFIG_REGULATOR_FAN53555=m
+CONFIG_REGULATOR_PWM=m
+CONFIG_REGULATOR_RK808=m
+CONFIG_ROCKCHIP_EFUSE=m
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_ROCKCHIP_IODOMAIN=m
+CONFIG_ROCKCHIP_MBOX=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ROCKCHIP_THERMAL=m
+
+# Ethernet
+CONFIG_STMMAC_ETH=m
+# CONFIG_DWMAC_GENERIC is not set
+# CONFIG_DWMAC_IPQ806X is not set
+# CONFIG_DWMAC_QCOM_ETHQOS is not set
+# CONFIG_DWMAC_SUNXI is not set
+# CONFIG_DWMAC_SUN8I is not set
+
+# I2C
+CONFIG_I2C_RK3X=m
+
+# Watchdog
+CONFIG_DW_WATCHDOG=m
+
+# Display
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+
+# USB 2.x
+CONFIG_PHY_ROCKCHIP_INNO_USB2=m
+CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_PCI is not set
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+
+# eMMC / SD-Card
+CONFIG_MMC_SDHCI_OF_ARASAN=m
+CONFIG_MMC_DW=m
+CONFIG_MMC_DW_ROCKCHIP=m
+CONFIG_PHY_ROCKCHIP_EMMC=m
+
+# Real-time clock
+CONFIG_RTC_DRV_RK808=m
+
+# Type-C
+CONFIG_PHY_ROCKCHIP_TYPEC=m
+
+# SAR ADC
+CONFIG_ROCKCHIP_SARADC=m
+
+# Audio
+CONFIG_SND_SOC_ROCKCHIP_I2S=m
+
+# To boot Linux distributions like Debian
+CONFIG_DEVTMPFS=y
+
+# To bootstrap rootfs with QEMU
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BLK=m
+CONFIG_VIRTIO_NET=m
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 6f41b65..f2a5dcc 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -30,6 +30,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
+#include <linux/android_vendor.h>
 
 #include <vdso/processor.h>
 
@@ -150,6 +151,8 @@
 		struct user_fpsimd_state fpsimd_state;
 	} uw;
 
+	ANDROID_VENDOR_DATA(1);
+
 	unsigned int		fpsimd_cpu;
 	void			*sve_state;	/* SVE registers, if any */
 	unsigned int		vl[ARM64_VEC_MAX];	/* vector length */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index fc55f5a..b936377 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -87,6 +87,8 @@
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+extern int nr_ipi_get(void);
+extern struct irq_desc **ipi_desc_get(void);
 
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 4f3661e..852abc5 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -283,21 +283,25 @@
 {
 	register_debug_hook(&hook->node, &user_break_hook);
 }
+EXPORT_SYMBOL_GPL(register_user_break_hook);
 
 void unregister_user_break_hook(struct break_hook *hook)
 {
 	unregister_debug_hook(&hook->node);
 }
+EXPORT_SYMBOL_GPL(unregister_user_break_hook);
 
 void register_kernel_break_hook(struct break_hook *hook)
 {
 	register_debug_hook(&hook->node, &kernel_break_hook);
 }
+EXPORT_SYMBOL_GPL(register_kernel_break_hook);
 
 void unregister_kernel_break_hook(struct break_hook *hook)
 {
 	unregister_debug_hook(&hook->node);
 }
+EXPORT_SYMBOL_GPL(unregister_kernel_break_hook);
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
 {
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index d8e606f..c2b0bfe 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -214,8 +214,11 @@
 {
 	const u8 *prop = get_bootargs_cmdline();
 
-	if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
+	if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+	    IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+	    !prop) {
 		__parse_cmdline(CONFIG_CMDLINE, true);
+	}
 
 	if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
 		__parse_cmdline(prop, true);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index aacf2f5..2eb2714 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -40,6 +40,8 @@
 #include <linux/percpu.h>
 #include <linux/thread_info.h>
 #include <linux/prctl.h>
+#include <trace/hooks/fpsimd.h>
+#include <trace/hooks/mpam.h>
 
 #include <asm/alternative.h>
 #include <asm/compat.h>
@@ -245,6 +247,7 @@
 	__show_regs(regs);
 	dump_backtrace(regs, NULL, KERN_DEFAULT);
 }
+EXPORT_SYMBOL_GPL(show_regs);
 
 static void tls_thread_flush(void)
 {
@@ -505,6 +508,12 @@
 	ptrauth_thread_switch_user(next);
 
 	/*
+	 *  vendor hook is needed before the dsb(),
+	 *  because MPAM is related to cache maintenance.
+	 */
+	trace_android_vh_mpam_set(prev, next);
+
+	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
 	 * the thread migrates to a different CPU.
 	 * This full barrier is also required by the membarrier system
@@ -522,6 +531,8 @@
 	if (prev->thread.sctlr_user != next->thread.sctlr_user)
 		update_sctlr_el1(next->thread.sctlr_user);
 
+	trace_android_vh_is_fpsimd_save(prev, next);
+
 	/* the actual thread switch */
 	last = cpu_switch_to(prev, next);
 
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 27df5c1..a54a23b 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -53,9 +53,14 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/debug.h>
 
 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
 
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
@@ -876,6 +881,7 @@
 		break;
 
 	case IPI_CPU_STOP:
+		trace_android_vh_ipi_stop_rcuidle(get_irq_regs());
 		local_cpu_stop();
 		break;
 
@@ -1104,3 +1110,15 @@
 	return !!cpus_stuck_in_kernel || smp_spin_tables ||
 		is_protected_kvm_enabled();
 }
+
+int nr_ipi_get(void)
+{
+	return nr_ipi;
+}
+EXPORT_SYMBOL_GPL(nr_ipi_get);
+
+struct irq_desc **ipi_desc_get(void)
+{
+	return ipi_desc;
+}
+EXPORT_SYMBOL_GPL(ipi_desc_get);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 94f83cd4..4148a23 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -214,6 +214,7 @@
 
 	put_task_stack(tsk);
 }
+EXPORT_SYMBOL_GPL(dump_backtrace);
 
 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 {
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6719f9e..ba8f22d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -10,6 +10,7 @@
 #include <linux/dma-iommu.h>
 #include <xen/xen.h>
 #include <xen/swiotlb-xen.h>
+#include <trace/hooks/iommu.h>
 
 #include <asm/cacheflush.h>
 
@@ -49,8 +50,10 @@
 		   ARCH_DMA_MINALIGN, cls);
 
 	dev->dma_coherent = coherent;
-	if (iommu)
+	if (iommu) {
 		iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
+		trace_android_vh_iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
+	}
 
 #ifdef CONFIG_XEN
 	if (xen_swiotlb_detect())
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4224386..6027eb3 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -240,8 +240,11 @@
 
 PHONY += bzImage $(BOOT_TARGETS)
 
+# Don't compile Image in mixed build with "all" target
+ifndef KBUILD_MIXED_TREE
 # Default kernel to build
 all: bzImage
+endif
 
 # KBUILD_IMAGE specify target image being built
 KBUILD_IMAGE := $(boot)/bzImage
diff --git a/arch/x86/OWNERS b/arch/x86/OWNERS
new file mode 100644
index 0000000..f59fa99
--- /dev/null
+++ b/arch/x86/OWNERS
@@ -0,0 +1,3 @@
+per-file crypto/**=file:/crypto/OWNERS
+per-file mm/**=file:/mm/OWNERS
+per-file net/**=file:/net/OWNERS
diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
new file mode 100644
index 0000000..d71d793
--- /dev/null
+++ b/arch/x86/configs/gki_defconfig
@@ -0,0 +1,617 @@
+CONFIG_LOCALVERSION="-mainline"
+CONFIG_KERNEL_LZ4=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_UCLAMP_TASK=y
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_USERFAULTFD=y
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+CONFIG_PROFILING=y
+CONFIG_SMP=y
+CONFIG_X86_X2APIC=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_NR_CPUS=32
+# CONFIG_X86_MCE is not set
+CONFIG_EFI=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure"
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_IA32_EMULATION=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_LTO_CLANG_FULL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SCMVERSION=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=16
+# CONFIG_ZONE_DMA is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_6LOWPAN=y
+CONFIG_IEEE802154=y
+CONFIG_IEEE802154_6LOWPAN=y
+CONFIG_MAC802154=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_SFQ=y
+CONFIG_NET_SCH_TBF=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_CODEL=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_BASIC=y
+CONFIG_NET_CLS_TCINDEX=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_MATCHALL=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_VSOCKETS=y
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_NFC=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_GNSS=y
+CONFIG_OF=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SRAM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_IFB=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX88179_178A is not set
+CONFIG_USB_NET_CDC_EEM=y
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_AQC111=y
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_HW_RANDOM=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+CONFIG_HPET=y
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I3C=y
+CONFIG_SPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_DRM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_PLAYSTATION=y
+CONFIG_PLAYSTATION_FF=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_CRYPTO=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_CLASS_MULTICOLOR=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_UIO=y
+# CONFIG_VIRTIO_MEM is not set
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_DEBUG_KINFO=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_LIBNVDIMM=y
+# CONFIG_ND_BLK is not set
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_EXFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_EROFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_CRYPTO_SHA512_SSSE3=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRC8=y
+CONFIG_XZ_DEC=y
+CONFIG_DMA_CMA=y
+CONFIG_STACK_HASH_ORDER=12
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+CONFIG_UBSAN_LOCAL_BOUNDS=y
+# CONFIG_UBSAN_SHIFT is not set
+# CONFIG_UBSAN_OBJECT_SIZE is not set
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_KFENCE=y
+CONFIG_KFENCE_SAMPLE_INTERVAL=500
+CONFIG_KFENCE_NUM_OBJECTS=63
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_UNWINDER_FRAME_POINTER=y
+CONFIG_KUNIT=y
+CONFIG_KUNIT_DEBUGFS=y
diff --git a/block/OWNERS b/block/OWNERS
new file mode 100644
index 0000000..2641e06
--- /dev/null
+++ b/block/OWNERS
@@ -0,0 +1,2 @@
+bvanassche@google.com
+jaegeuk@google.com
diff --git a/block/bio.c b/block/bio.c
index 15ab0d6..f509414 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -274,6 +274,9 @@
 #endif
 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
 	bio->bi_crypt_context = NULL;
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+	bio->bi_skip_dm_default_key = false;
+#endif
 #endif
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 	bio->bi_integrity = NULL;
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index c87aba8..893641e 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -87,7 +87,7 @@
  * This is the key we set when evicting a keyslot. This *should* be the all 0's
  * key, but AES-XTS rejects that key, so we use some random bytes instead.
  */
-static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
+static u8 blank_key[BLK_CRYPTO_MAX_STANDARD_KEY_SIZE];
 
 static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
 {
@@ -180,6 +180,8 @@
 	bio_clone_blkg_association(bio, bio_src);
 	blkcg_bio_issue_init(bio);
 
+	bio_clone_skip_dm_default_key(bio, bio_src);
+
 	return bio;
 }
 
@@ -539,7 +541,7 @@
 	if (blk_crypto_fallback_inited)
 		return 0;
 
-	prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
+	prandom_bytes(blank_key, BLK_CRYPTO_MAX_STANDARD_KEY_SIZE);
 
 	err = bioset_init(&crypto_bio_split, 64, 0, 0);
 	if (err)
@@ -552,6 +554,7 @@
 
 	profile->ll_ops = blk_crypto_fallback_ll_ops;
 	profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+	profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
 
 	/* All blk-crypto modes have a crypto API fallback. */
 	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index 2fb0d65..d36ae71 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -13,6 +13,7 @@
 struct blk_crypto_mode {
 	const char *cipher_str; /* crypto API name (for fallback case) */
 	unsigned int keysize; /* key size in bytes */
+	unsigned int security_strength; /* security strength in bytes */
 	unsigned int ivsize; /* iv size in bytes */
 };
 
diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 605ba06..8fc39d4 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -350,6 +350,8 @@
 		return false;
 	if (profile->max_dun_bytes_supported < cfg->dun_bytes)
 		return false;
+	if (!(profile->key_types_supported & cfg->key_type))
+		return false;
 	return true;
 }
 
@@ -469,6 +471,43 @@
 }
 
 /**
+ * blk_crypto_derive_sw_secret() - Derive software secret from hardware-wrapped
+ *				   key
+ * @profile: the crypto profile of the device the key will be used on
+ * @wrapped_key: the hardware-wrapped key
+ * @wrapped_key_size: size of @wrapped_key in bytes
+ * @sw_secret: (output) the software secret
+ *
+ * Given a hardware-wrapped key, ask the hardware to derive the secret which
+ * software can use for cryptographic tasks other than inline encryption.  This
+ * secret is guaranteed to be cryptographically isolated from the inline
+ * encryption key, i.e. derived with a different KDF context.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the given @profile doesn't support
+ *	   hardware-wrapped keys (or is NULL), -EBADMSG if the key isn't a valid
+ *	   hardware-wrapped key, or another -errno code.
+ */
+int blk_crypto_derive_sw_secret(struct blk_crypto_profile *profile,
+				const u8 *wrapped_key,
+				unsigned int wrapped_key_size,
+				u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+	int err = -EOPNOTSUPP;
+
+	if (profile &&
+	    (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) &&
+	    profile->ll_ops.derive_sw_secret) {
+		blk_crypto_hw_enter(profile);
+		err = profile->ll_ops.derive_sw_secret(profile, wrapped_key,
+						       wrapped_key_size,
+						       sw_secret);
+		blk_crypto_hw_exit(profile);
+	}
+	return err;
+}
+EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret);
+
+/**
  * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
  *					 by child device
  * @parent: the crypto profile for the parent device
@@ -491,10 +530,12 @@
 			    child->max_dun_bytes_supported);
 		for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++)
 			parent->modes_supported[i] &= child->modes_supported[i];
+		parent->key_types_supported &= child->key_types_supported;
 	} else {
 		parent->max_dun_bytes_supported = 0;
 		memset(parent->modes_supported, 0,
 		       sizeof(parent->modes_supported));
+		parent->key_types_supported = 0;
 	}
 }
 EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities);
@@ -527,6 +568,9 @@
 	    target->max_dun_bytes_supported)
 		return false;
 
+	if (reference->key_types_supported & ~target->key_types_supported)
+		return false;
+
 	return true;
 }
 EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities);
@@ -561,5 +605,6 @@
 	       sizeof(dst->modes_supported));
 
 	dst->max_dun_bytes_supported = src->max_dun_bytes_supported;
+	dst->key_types_supported = src->key_types_supported;
 }
 EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index ec9efee..c490ff0 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -21,16 +21,19 @@
 	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
 		.cipher_str = "xts(aes)",
 		.keysize = 64,
+		.security_strength = 32,
 		.ivsize = 16,
 	},
 	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
 		.cipher_str = "essiv(cbc(aes),sha256)",
 		.keysize = 16,
+		.security_strength = 16,
 		.ivsize = 16,
 	},
 	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
 		.cipher_str = "adiantum(xchacha12,aes)",
 		.keysize = 32,
+		.security_strength = 32,
 		.ivsize = 32,
 	},
 };
@@ -68,7 +71,10 @@
 
 	/* Sanity check that no algorithm exceeds the defined limits. */
 	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
-		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
+		BUG_ON(blk_crypto_modes[i].keysize >
+		       BLK_CRYPTO_MAX_STANDARD_KEY_SIZE);
+		BUG_ON(blk_crypto_modes[i].security_strength >
+		       blk_crypto_modes[i].keysize);
 		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
 	}
 
@@ -96,6 +102,7 @@
 
 	bio->bi_crypt_context = bc;
 }
+EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
 
 void __bio_crypt_free_ctx(struct bio *bio)
 {
@@ -308,8 +315,9 @@
 /**
  * blk_crypto_init_key() - Prepare a key for use with blk-crypto
  * @blk_key: Pointer to the blk_crypto_key to initialize.
- * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
- *	     @crypto_mode; see blk_crypto_modes[].
+ * @raw_key: the raw bytes of the key
+ * @raw_key_size: size of the raw key in bytes
+ * @key_type: type of the key -- either standard or hardware-wrapped
  * @crypto_mode: identifier for the encryption algorithm to use
  * @dun_bytes: number of bytes that will be used to specify the DUN when this
  *	       key is used
@@ -318,7 +326,9 @@
  * Return: 0 on success, -errno on failure.  The caller is responsible for
  *	   zeroizing both blk_key and raw_key when done with them.
  */
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+			const u8 *raw_key, unsigned int raw_key_size,
+			enum blk_crypto_key_type key_type,
 			enum blk_crypto_mode_num crypto_mode,
 			unsigned int dun_bytes,
 			unsigned int data_unit_size)
@@ -331,8 +341,19 @@
 		return -EINVAL;
 
 	mode = &blk_crypto_modes[crypto_mode];
-	if (mode->keysize == 0)
+	switch (key_type) {
+	case BLK_CRYPTO_KEY_TYPE_STANDARD:
+		if (raw_key_size != mode->keysize)
+			return -EINVAL;
+		break;
+	case BLK_CRYPTO_KEY_TYPE_HW_WRAPPED:
+		if (raw_key_size < mode->security_strength ||
+		    raw_key_size > BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
+			return -EINVAL;
+		break;
+	default:
 		return -EINVAL;
+	}
 
 	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
 		return -EINVAL;
@@ -343,12 +364,14 @@
 	blk_key->crypto_cfg.crypto_mode = crypto_mode;
 	blk_key->crypto_cfg.dun_bytes = dun_bytes;
 	blk_key->crypto_cfg.data_unit_size = data_unit_size;
+	blk_key->crypto_cfg.key_type = key_type;
 	blk_key->data_unit_size_bits = ilog2(data_unit_size);
-	blk_key->size = mode->keysize;
-	memcpy(blk_key->raw, raw_key, mode->keysize);
+	blk_key->size = raw_key_size;
+	memcpy(blk_key->raw, raw_key, raw_key_size);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(blk_crypto_init_key);
 
 /*
  * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
@@ -358,8 +381,10 @@
 bool blk_crypto_config_supported(struct request_queue *q,
 				 const struct blk_crypto_config *cfg)
 {
-	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
-	       __blk_crypto_cfg_supported(q->crypto_profile, cfg);
+	if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
+	    cfg->key_type == BLK_CRYPTO_KEY_TYPE_STANDARD)
+		return true;
+	return __blk_crypto_cfg_supported(q->crypto_profile, cfg);
 }
 
 /**
@@ -382,8 +407,13 @@
 {
 	if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
 		return 0;
+	if (key->crypto_cfg.key_type != BLK_CRYPTO_KEY_TYPE_STANDARD) {
+		pr_warn_once("tried to use wrapped key, but hardware doesn't support it\n");
+		return -EOPNOTSUPP;
+	}
 	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
 }
+EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
 
 /**
  * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
diff --git a/build.config.aarch64 b/build.config.aarch64
new file mode 100644
index 0000000..2bf2c15
--- /dev/null
+++ b/build.config.aarch64
@@ -0,0 +1,14 @@
+ARCH=arm64
+MAKE_GOALS="
+Image
+modules
+"
+
+FILES="
+arch/arm64/boot/Image
+vmlinux
+System.map
+vmlinux.symvers
+modules.builtin
+modules.builtin.modinfo
+"
diff --git a/build.config.allmodconfig b/build.config.allmodconfig
new file mode 100644
index 0000000..8d96095
--- /dev/null
+++ b/build.config.allmodconfig
@@ -0,0 +1,11 @@
+DEFCONFIG=allmodconfig
+
+POST_DEFCONFIG_CMDS="update_config"
+function update_config() {
+    ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+         -e UNWINDER_FRAME_POINTER \
+         -d WERROR \
+
+    (cd ${OUT_DIR} && \
+     make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} ${TOOL_ARGS} ${MAKE_ARGS} olddefconfig)
+}
diff --git a/build.config.allmodconfig.aarch64 b/build.config.allmodconfig.aarch64
new file mode 100644
index 0000000..2fbe380
--- /dev/null
+++ b/build.config.allmodconfig.aarch64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.allmodconfig.arm b/build.config.allmodconfig.arm
new file mode 100644
index 0000000..e92744a
--- /dev/null
+++ b/build.config.allmodconfig.arm
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.arm
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.allmodconfig.x86_64 b/build.config.allmodconfig.x86_64
new file mode 100644
index 0000000..f06b30c
--- /dev/null
+++ b/build.config.allmodconfig.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.amlogic b/build.config.amlogic
new file mode 100644
index 0000000..e04bd52
--- /dev/null
+++ b/build.config.amlogic
@@ -0,0 +1,34 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
+
+DEFCONFIG=amlogic_gki_defconfig
+FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/amlogic_gki.fragment
+
+PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
+POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
+
+# needed for DT overlay support
+DTC_FLAGS="-@"
+
+MAKE_GOALS="${MAKE_GOALS}
+dtbs
+"
+
+FILES="${FILES}
+arch/arm64/boot/Image.lz4
+arch/arm64/boot/dts/amlogic/meson-g12a-sei510*.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-sei610*.dtb
+arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l*.dtb
+arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3*.dtb
+"
+
+#
+# NOTE: Using Image.lz4 in MAKE_GOALS does not work because
+#   kernel build passes legacy option (-l) to lz4 command
+#   and u-boot fails to decompress.  Instead, add custom
+#   command to lz4 compress same options as kernel, but
+#   without the -l.
+#
+EXTRA_CMDS="lz4_compress"
+function lz4_compress() {
+        lz4 -f -12 --favor-decSpeed ${OUT_DIR}/arch/arm64/boot/Image ${OUT_DIR}/arch/arm64/boot/Image.lz4
+}
diff --git a/build.config.arm b/build.config.arm
new file mode 100644
index 0000000..832c3c0
--- /dev/null
+++ b/build.config.arm
@@ -0,0 +1,11 @@
+ARCH=arm
+MAKE_GOALS="
+zImage
+modules
+"
+
+FILES="
+arch/arm/boot/zImage
+vmlinux
+System.map
+"
diff --git a/build.config.common b/build.config.common
new file mode 100644
index 0000000..97cd431
--- /dev/null
+++ b/build.config.common
@@ -0,0 +1,17 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
+
+BRANCH=android-mainline
+KMI_GENERATION=0
+
+LLVM=1
+DEPMOD=depmod
+CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-${CLANG_VERSION}/bin
+BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86
+DTC=${ROOT_DIR}/${BUILDTOOLS_PREBUILT_BIN}/dtc
+
+EXTRA_CMDS=''
+STOP_SHIP_TRACEPRINTK=1
+IN_KERNEL_MODULES=1
+DO_NOT_STRIP_MODULES=1
+
+HERMETIC_TOOLCHAIN=${HERMETIC_TOOLCHAIN:-1}
diff --git a/build.config.constants b/build.config.constants
new file mode 100644
index 0000000..3ba91bac
--- /dev/null
+++ b/build.config.constants
@@ -0,0 +1 @@
+CLANG_VERSION=r437112
diff --git a/build.config.db845c b/build.config.db845c
new file mode 100644
index 0000000..0878946
--- /dev/null
+++ b/build.config.db845c
@@ -0,0 +1,20 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+
+BUILD_INITRAMFS=1
+DEFCONFIG=db845c_gki_defconfig
+FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment
+PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
+POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
+
+MAKE_GOALS="${MAKE_GOALS}
+qcom/sdm845-db845c.dtb
+qcom/qrb5165-rb5.dtb
+Image.gz
+"
+
+FILES="${FILES}
+arch/arm64/boot/Image.gz
+arch/arm64/boot/dts/qcom/sdm845-db845c.dtb
+arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb
+"
diff --git a/build.config.gki b/build.config.gki
new file mode 100644
index 0000000..8a13ddd
--- /dev/null
+++ b/build.config.gki
@@ -0,0 +1,6 @@
+DEFCONFIG=gki_defconfig
+POST_DEFCONFIG_CMDS="check_defconfig"
+
+if [ -n "${GKI_BUILD_CONFIG_FRAGMENT}" ]; then
+source ${GKI_BUILD_CONFIG_FRAGMENT}
+fi
diff --git a/build.config.gki-debug.aarch64 b/build.config.gki-debug.aarch64
new file mode 100644
index 0000000..c1fe2f0
--- /dev/null
+++ b/build.config.gki-debug.aarch64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
+TRIM_NONLISTED_KMI=""
+KMI_SYMBOL_LIST_STRICT_MODE=""
diff --git a/build.config.gki-debug.x86_64 b/build.config.gki-debug.x86_64
new file mode 100644
index 0000000..d89b7ad
--- /dev/null
+++ b/build.config.gki-debug.x86_64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.x86_64
+TRIM_NONLISTED_KMI=""
+KMI_SYMBOL_LIST_STRICT_MODE=""
diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64
new file mode 100644
index 0000000..0a9066e
--- /dev/null
+++ b/build.config.gki.aarch64
@@ -0,0 +1,11 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
+
+MAKE_GOALS="${MAKE_GOALS}
+Image.lz4
+"
+
+FILES="${FILES}
+arch/arm64/boot/Image.lz4
+"
diff --git a/build.config.gki.x86_64 b/build.config.gki.x86_64
new file mode 100644
index 0000000..0e04fc6
--- /dev/null
+++ b/build.config.gki.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
+
diff --git a/build.config.gki_kasan b/build.config.gki_kasan
new file mode 100644
index 0000000..6d9de7e
--- /dev/null
+++ b/build.config.gki_kasan
@@ -0,0 +1,22 @@
+DEFCONFIG=gki_defconfig
+POST_DEFCONFIG_CMDS="check_defconfig && update_kasan_config"
+KERNEL_DIR=common
+function update_kasan_config() {
+    ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+         -e CONFIG_KASAN \
+         -e CONFIG_KASAN_INLINE \
+         -e CONFIG_KCOV \
+         -e CONFIG_PANIC_ON_WARN_DEFAULT_ENABLE \
+         -d CONFIG_RANDOMIZE_BASE \
+         -d CONFIG_KASAN_OUTLINE \
+         --set-val CONFIG_FRAME_WARN 0 \
+         -d LTO \
+         -d LTO_CLANG \
+         -d CFI \
+         -d CFI_PERMISSIVE \
+         -d CFI_CLANG \
+         -d SHADOW_CALL_STACK
+    (cd ${OUT_DIR} && \
+     make ${TOOL_ARGS} O=${OUT_DIR} olddefconfig)
+}
+
diff --git a/build.config.gki_kasan.aarch64 b/build.config.gki_kasan.aarch64
new file mode 100644
index 0000000..9fd2560
--- /dev/null
+++ b/build.config.gki_kasan.aarch64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan
diff --git a/build.config.gki_kasan.x86_64 b/build.config.gki_kasan.x86_64
new file mode 100644
index 0000000..eec6458
--- /dev/null
+++ b/build.config.gki_kasan.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan
+
diff --git a/build.config.gki_kprobes b/build.config.gki_kprobes
new file mode 100644
index 0000000..f1c6cf7
--- /dev/null
+++ b/build.config.gki_kprobes
@@ -0,0 +1,20 @@
+DEFCONFIG=gki_defconfig
+POST_DEFCONFIG_CMDS="check_defconfig && update_kprobes_config"
+function update_kprobes_config() {
+    ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+         -d LTO \
+         -d LTO_CLANG_THIN \
+         -d CFI \
+         -d CFI_PERMISSIVE \
+         -d CFI_CLANG \
+         -e CONFIG_DYNAMIC_FTRACE \
+         -e CONFIG_FUNCTION_TRACER \
+         -e CONFIG_IRQSOFF_TRACER \
+         -e CONFIG_FUNCTION_PROFILER \
+         -e CONFIG_PREEMPT_TRACER \
+         -e CONFIG_CHECKPOINT_RESTORE \
+         -d CONFIG_RANDOMIZE_BASE
+    (cd ${OUT_DIR} && \
+     make ${TOOL_ARGS} O=${OUT_DIR} olddefconfig)
+}
+
diff --git a/build.config.gki_kprobes.aarch64 b/build.config.gki_kprobes.aarch64
new file mode 100644
index 0000000..627c217
--- /dev/null
+++ b/build.config.gki_kprobes.aarch64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kprobes
+
diff --git a/build.config.gki_kprobes.x86_64 b/build.config.gki_kprobes.x86_64
new file mode 100644
index 0000000..a1d3da7
--- /dev/null
+++ b/build.config.gki_kprobes.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kprobes
+
diff --git a/build.config.khwasan b/build.config.khwasan
new file mode 100644
index 0000000..c8e0c5e
--- /dev/null
+++ b/build.config.khwasan
@@ -0,0 +1,17 @@
+append_cmd POST_DEFCONFIG_CMDS update_khwasan_config
+
+function update_khwasan_config() {
+  ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+    -e CONFIG_KASAN \
+    -d CONFIG_KASAN_HW_TAGS \
+    -e CONFIG_KASAN_SW_TAGS \
+    -e CONFIG_KASAN_OUTLINE \
+    -e CONFIG_KASAN_PANIC_ON_WARN \
+    -e CONFIG_KCOV \
+    -e CONFIG_PANIC_ON_WARN_DEFAULT_ENABLE \
+    -d CONFIG_RANDOMIZE_BASE \
+    --set-val CONFIG_FRAME_WARN 0 \
+    -d SHADOW_CALL_STACK
+  (cd ${OUT_DIR} && \
+   make O=${OUT_DIR} "${TOOL_ARGS[@]}" ${MAKE_ARGS} olddefconfig)
+}
diff --git a/build.config.rockpi4 b/build.config.rockpi4
new file mode 100644
index 0000000..c9c9776
--- /dev/null
+++ b/build.config.rockpi4
@@ -0,0 +1,21 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+TRIM_NONLISTED_KMI=""
+KMI_SYMBOL_LIST_STRICT_MODE=""
+
+BUILD_INITRAMFS=1
+LZ4_RAMDISK=1
+DEFCONFIG=rockpi4_gki_defconfig
+FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/rockpi4_gki.fragment
+PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}"
+POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
+
+DTS_EXT_DIR=common-modules/virtual-device
+DTC_INCLUDE=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/boot/dts/rockchip
+MAKE_GOALS="${MAKE_GOALS}
+rk3399-rock-pi-4b.dtb
+"
+
+FILES="${FILES}
+../common-modules/virtual-device/rk3399-rock-pi-4b.dtb
+"
diff --git a/build.config.x86_64 b/build.config.x86_64
new file mode 100644
index 0000000..8b0633e
--- /dev/null
+++ b/build.config.x86_64
@@ -0,0 +1,14 @@
+ARCH=x86_64
+MAKE_GOALS="
+bzImage
+modules
+"
+
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+vmlinux.symvers
+modules.builtin
+modules.builtin.modinfo
+"
diff --git a/crypto/OWNERS b/crypto/OWNERS
new file mode 100644
index 0000000..4ed35a0
--- /dev/null
+++ b/crypto/OWNERS
@@ -0,0 +1 @@
+ardb@google.com
diff --git a/drivers/OWNERS b/drivers/OWNERS
new file mode 100644
index 0000000..c13fa05
--- /dev/null
+++ b/drivers/OWNERS
@@ -0,0 +1,6 @@
+per-file base/**=gregkh@google.com,saravanak@google.com
+per-file block/**=akailash@google.com
+per-file md/**=akailash@google.com,paullawrence@google.com
+per-file net/**=file:/net/OWNERS
+per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
+per-file {tty,usb}/**=gregkh@google.com
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 53b22e2..32fb9e5 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -54,6 +54,15 @@
 	  exhaustively with combinations of various buffer sizes and
 	  alignments.
 
+config ANDROID_VENDOR_HOOKS
+	bool "Android Vendor Hooks"
+	depends on TRACEPOINTS
+	help
+	  Enable vendor hooks implemented as tracepoints
+
+	  Allow vendor modules to attach to tracepoint "hooks" defined via
+	  DECLARE_HOOK or DECLARE_RESTRICTED_HOOK.
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c9..d488047 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_ANDROID_BINDERFS)		+= binderfs.o
 obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
 obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c75fb60..af8ef7b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -66,13 +66,16 @@
 #include <linux/syscalls.h>
 #include <linux/task_work.h>
 #include <linux/sizes.h>
+#include <linux/android_vendor.h>
 
+#include <uapi/linux/sched/types.h>
 #include <uapi/linux/android/binder.h>
 
 #include <asm/cacheflush.h>
 
 #include "binder_internal.h"
 #include "binder_trace.h"
+#include <trace/hooks/binder.h>
 
 static HLIST_HEAD(binder_deferred_list);
 static DEFINE_MUTEX(binder_deferred_lock);
@@ -523,6 +526,7 @@
 		thread = rb_entry(n, struct binder_thread, rb_node);
 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 		    binder_available_for_proc_work_ilocked(thread)) {
+			trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
 			if (sync)
 				wake_up_interruptible_sync(&thread->wait);
 			else
@@ -582,6 +586,7 @@
 	assert_spin_locked(&proc->inner_lock);
 
 	if (thread) {
+		trace_android_vh_binder_wakeup_ilocked(thread->task, sync, proc);
 		if (sync)
 			wake_up_interruptible_sync(&thread->wait);
 		else
@@ -612,22 +617,146 @@
 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
 }
 
-static void binder_set_nice(long nice)
+static bool is_rt_policy(int policy)
 {
-	long min_nice;
+	return policy == SCHED_FIFO || policy == SCHED_RR;
+}
 
-	if (can_nice(current, nice)) {
-		set_user_nice(current, nice);
+static bool is_fair_policy(int policy)
+{
+	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
+
+static bool binder_supported_policy(int policy)
+{
+	return is_fair_policy(policy) || is_rt_policy(policy);
+}
+
+static int to_userspace_prio(int policy, int kernel_priority)
+{
+	if (is_fair_policy(policy))
+		return PRIO_TO_NICE(kernel_priority);
+	else
+		return MAX_RT_PRIO - 1 - kernel_priority;
+}
+
+static int to_kernel_prio(int policy, int user_priority)
+{
+	if (is_fair_policy(policy))
+		return NICE_TO_PRIO(user_priority);
+	else
+		return MAX_RT_PRIO - 1 - user_priority;
+}
+
+static void binder_do_set_priority(struct task_struct *task,
+				   struct binder_priority desired,
+				   bool verify)
+{
+	int priority; /* user-space prio value */
+	bool has_cap_nice;
+	unsigned int policy = desired.sched_policy;
+
+	if (task->policy == policy && task->normal_prio == desired.prio)
 		return;
+
+	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
+
+	priority = to_userspace_prio(policy, desired.prio);
+
+	if (verify && is_rt_policy(policy) && !has_cap_nice) {
+		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
+
+		if (max_rtprio == 0) {
+			policy = SCHED_NORMAL;
+			priority = MIN_NICE;
+		} else if (priority > max_rtprio) {
+			priority = max_rtprio;
+		}
 	}
-	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
-	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-		     "%d: nice value %ld not allowed use %ld instead\n",
-		      current->pid, nice, min_nice);
-	set_user_nice(current, min_nice);
-	if (min_nice <= MAX_NICE)
+
+	if (verify && is_fair_policy(policy) && !has_cap_nice) {
+		long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
+
+		if (min_nice > MAX_NICE) {
+			binder_user_error("%d RLIMIT_NICE not set\n",
+					  task->pid);
+			return;
+		} else if (priority < min_nice) {
+			priority = min_nice;
+		}
+	}
+
+	if (policy != desired.sched_policy ||
+	    to_kernel_prio(policy, priority) != desired.prio)
+		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+			     "%d: priority %d not allowed, using %d instead\n",
+			      task->pid, desired.prio,
+			      to_kernel_prio(policy, priority));
+
+	trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
+				  to_kernel_prio(policy, priority),
+				  desired.prio);
+
+	/* Set the actual priority */
+	if (task->policy != policy || is_rt_policy(policy)) {
+		struct sched_param params;
+
+		params.sched_priority = is_rt_policy(policy) ? priority : 0;
+
+		sched_setscheduler_nocheck(task,
+					   policy | SCHED_RESET_ON_FORK,
+					   &params);
+	}
+	if (is_fair_policy(policy))
+		set_user_nice(task, priority);
+}
+
+static void binder_set_priority(struct task_struct *task,
+				struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ true);
+}
+
+static void binder_restore_priority(struct task_struct *task,
+				    struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ false);
+}
+
+static void binder_transaction_priority(struct task_struct *task,
+					struct binder_transaction *t,
+					struct binder_priority node_prio,
+					bool inherit_rt)
+{
+	struct binder_priority desired_prio = t->priority;
+
+	if (t->set_priority_called)
 		return;
-	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+
+	t->set_priority_called = true;
+	t->saved_priority.sched_policy = task->policy;
+	t->saved_priority.prio = task->normal_prio;
+
+	if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+		desired_prio.prio = NICE_TO_PRIO(0);
+		desired_prio.sched_policy = SCHED_NORMAL;
+	}
+
+	if (node_prio.prio < t->priority.prio ||
+	    (node_prio.prio == t->priority.prio &&
+	     node_prio.sched_policy == SCHED_FIFO)) {
+		/*
+		 * In case the minimum priority on the node is
+		 * higher (lower value), use that priority. If
+		 * the priority is the same, but the node uses
+		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
+		 * run unbounded, unlike SCHED_RR.
+		 */
+		desired_prio = node_prio;
+	}
+
+	binder_set_priority(task, desired_prio);
+	trace_android_vh_binder_set_priority(t, task);
 }
 
 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
@@ -680,6 +809,7 @@
 	binder_uintptr_t ptr = fp ? fp->binder : 0;
 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
 	__u32 flags = fp ? fp->flags : 0;
+	s8 priority;
 
 	assert_spin_locked(&proc->inner_lock);
 
@@ -712,8 +842,12 @@
 	node->ptr = ptr;
 	node->cookie = cookie;
 	node->work.type = BINDER_WORK_NODE;
-	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
+		FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+	node->min_priority = to_kernel_prio(node->sched_policy, priority);
 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+	node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
 	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
@@ -2354,11 +2488,15 @@
 				    struct binder_thread *thread)
 {
 	struct binder_node *node = t->buffer->target_node;
+	struct binder_priority node_prio;
 	bool oneway = !!(t->flags & TF_ONE_WAY);
 	bool pending_async = false;
 
 	BUG_ON(!node);
 	binder_node_lock(node);
+	node_prio.prio = node->min_priority;
+	node_prio.sched_policy = node->sched_policy;
+
 	if (oneway) {
 		BUG_ON(thread);
 		if (node->has_async_transaction)
@@ -2383,12 +2521,15 @@
 	if (!thread && !pending_async)
 		thread = binder_select_thread_ilocked(proc);
 
-	if (thread)
+	if (thread) {
+		binder_transaction_priority(thread->task, t, node_prio,
+					    node->inherit_rt);
 		binder_enqueue_thread_work_ilocked(thread, &t->work);
-	else if (!pending_async)
+	} else if (!pending_async) {
 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
-	else
+	} else {
 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
+	}
 
 	if (!pending_async)
 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
@@ -2510,7 +2651,6 @@
 		}
 		thread->transaction_stack = in_reply_to->to_parent;
 		binder_inner_proc_unlock(proc);
-		binder_set_nice(in_reply_to->saved_priority);
 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
 		if (target_thread == NULL) {
 			/* annotation for sparse */
@@ -2675,6 +2815,7 @@
 	INIT_LIST_HEAD(&t->fd_fixups);
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 	spin_lock_init(&t->lock);
+	trace_android_vh_binder_transaction_init(t);
 
 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
 	if (tcomplete == NULL) {
@@ -2715,7 +2856,15 @@
 	t->to_thread = target_thread;
 	t->code = tr->code;
 	t->flags = tr->flags;
-	t->priority = task_nice(current);
+	if (!(t->flags & TF_ONE_WAY) &&
+	    binder_supported_policy(current->policy)) {
+		/* Inherit supported policies for synchronous transactions */
+		t->priority.sched_policy = current->policy;
+		t->priority.prio = current->normal_prio;
+	} else {
+		/* Otherwise, fall back to the default priority */
+		t->priority = target_proc->default_priority;
+	}
 
 	if (target_node && target_node->txn_security_ctx) {
 		u32 secid;
@@ -3048,6 +3197,8 @@
 		target_proc->outstanding_txns++;
 		binder_inner_proc_unlock(target_proc);
 		wake_up_interruptible_sync(&target_thread->wait);
+		trace_android_vh_binder_restore_priority(in_reply_to, current);
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		binder_free_transaction(in_reply_to);
 	} else if (!(t->flags & TF_ONE_WAY)) {
 		BUG_ON(t->buffer->async_transaction != 0);
@@ -3162,6 +3313,8 @@
 
 	BUG_ON(thread->return_error.cmd != BR_OK);
 	if (in_reply_to) {
+		trace_android_vh_binder_restore_priority(in_reply_to, current);
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
 		binder_enqueue_thread_work(thread, &thread->return_error.work);
 		binder_send_failed_reply(in_reply_to, return_error);
@@ -3722,6 +3875,7 @@
 		if (do_proc_work)
 			list_add(&thread->waiting_thread_node,
 				 &proc->waiting_threads);
+		trace_android_vh_binder_wait_for_work(do_proc_work, thread, proc);
 		binder_inner_proc_unlock(proc);
 		schedule();
 		binder_inner_proc_lock(proc);
@@ -3839,7 +3993,8 @@
 			wait_event_interruptible(binder_user_error_wait,
 						 binder_stop_on_user_error < 2);
 		}
-		binder_set_nice(proc->default_priority);
+		trace_android_vh_binder_restore_priority(NULL, current);
+		binder_restore_priority(current, proc->default_priority);
 	}
 
 	if (non_block) {
@@ -4066,16 +4221,14 @@
 		BUG_ON(t->buffer == NULL);
 		if (t->buffer->target_node) {
 			struct binder_node *target_node = t->buffer->target_node;
+			struct binder_priority node_prio;
 
 			trd->target.ptr = target_node->ptr;
 			trd->cookie =  target_node->cookie;
-			t->saved_priority = task_nice(current);
-			if (t->priority < target_node->min_priority &&
-			    !(t->flags & TF_ONE_WAY))
-				binder_set_nice(t->priority);
-			else if (!(t->flags & TF_ONE_WAY) ||
-				 t->saved_priority > target_node->min_priority)
-				binder_set_nice(target_node->min_priority);
+			node_prio.sched_policy = target_node->sched_policy;
+			node_prio.prio = target_node->min_priority;
+			binder_transaction_priority(current, t, node_prio,
+						    target_node->inherit_rt);
 			cmd = BR_TRANSACTION;
 		} else {
 			trd->target.ptr = 0;
@@ -4093,6 +4246,7 @@
 			trd->sender_pid =
 				task_tgid_nr_ns(sender,
 						task_active_pid_ns(current));
+			trace_android_vh_sync_txn_recvd(thread->task, t_from->task);
 		} else {
 			trd->sender_pid = 0;
 		}
@@ -4293,6 +4447,8 @@
 	binder_stats_created(BINDER_STAT_THREAD);
 	thread->proc = proc;
 	thread->pid = current->pid;
+	get_task_struct(current);
+	thread->task = current;
 	atomic_set(&thread->tmp_ref, 0);
 	init_waitqueue_head(&thread->wait);
 	INIT_LIST_HEAD(&thread->todo);
@@ -4354,6 +4510,7 @@
 	BUG_ON(!list_empty(&thread->todo));
 	binder_stats_deleted(BINDER_STAT_THREAD);
 	binder_proc_dec_tmpref(thread->proc);
+	put_task_struct(thread->task);
 	kfree(thread);
 }
 
@@ -5047,7 +5204,14 @@
 	proc->cred = get_cred(filp->f_cred);
 	INIT_LIST_HEAD(&proc->todo);
 	init_waitqueue_head(&proc->freeze_wait);
-	proc->default_priority = task_nice(current);
+	if (binder_supported_policy(current->policy)) {
+		proc->default_priority.sched_policy = current->policy;
+		proc->default_priority.prio = current->normal_prio;
+	} else {
+		proc->default_priority.sched_policy = SCHED_NORMAL;
+		proc->default_priority.prio = NICE_TO_PRIO(0);
+	}
+
 	/* binderfs stashes devices in i_private */
 	if (is_binderfs_device(nodp)) {
 		binder_dev = nodp->i_private;
@@ -5372,13 +5536,14 @@
 	spin_lock(&t->lock);
 	to_proc = t->to_proc;
 	seq_printf(m,
-		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
 		   prefix, t->debug_id, t,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->pid : 0,
 		   to_proc ? to_proc->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
-		   t->code, t->flags, t->priority, t->need_reply);
+		   t->code, t->flags, t->priority.sched_policy,
+		   t->priority.prio, t->need_reply);
 	spin_unlock(&t->lock);
 
 	if (proc != to_proc) {
@@ -5496,8 +5661,9 @@
 	hlist_for_each_entry(ref, &node->refs, node_entry)
 		count++;
 
-	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+	seq_printf(m, "  node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
+		   node->sched_policy, node->min_priority,
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->local_strong_refs, node->local_weak_refs,
 		   node->internal_strong_refs, count, node->tmp_refs);
@@ -6021,5 +6187,6 @@
 
 #define CREATE_TRACE_POINTS
 #include "binder_trace.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(binder_transaction_received);
 
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index d6b6b8c..6af4ad9 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -237,10 +237,13 @@
  *                        and by @lock)
  * @has_async_transaction: async transaction to node in progress
  *                        (protected by @lock)
+ * @sched_policy:         minimum scheduling policy for node
+ *                        (invariant after initialized)
  * @accept_fds:           file descriptor operations supported for node
  *                        (invariant after initialized)
  * @min_priority:         minimum scheduling priority
  *                        (invariant after initialized)
+ * @inherit_rt:           inherit RT scheduling policy from caller
  * @txn_security_ctx:     require sender's security context
  *                        (invariant after initialized)
  * @async_todo:           list of async work items
@@ -278,6 +281,8 @@
 		/*
 		 * invariant after initialization
 		 */
+		u8 sched_policy:2;
+		u8 inherit_rt:1;
 		u8 accept_fds:1;
 		u8 txn_security_ctx:1;
 		u8 min_priority;
@@ -347,6 +352,22 @@
 };
 
 /**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy            scheduler policy
+ * @prio                    [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+	unsigned int sched_policy;
+	int prio;
+};
+
+/**
  * struct binder_proc - binder process bookkeeping
  * @proc_node:            element for binder_procs list
  * @threads:              rbtree of binder_threads in this proc
@@ -446,7 +467,7 @@
 	int requested_threads;
 	int requested_threads_started;
 	int tmp_ref;
-	long default_priority;
+	struct binder_priority default_priority;
 	struct dentry *debugfs_entry;
 	struct binder_alloc alloc;
 	struct binder_context *context;
@@ -489,6 +510,7 @@
  * @is_dead:              thread is dead and awaiting free
  *                        when outstanding transactions are cleaned up
  *                        (protected by @proc->inner_lock)
+ * @task:                 struct task_struct for this thread
  *
  * Bookkeeping structure for binder threads.
  */
@@ -508,6 +530,7 @@
 	struct binder_stats stats;
 	atomic_t tmp_ref;
 	bool is_dead;
+	struct task_struct *task;
 };
 
 /**
@@ -541,8 +564,9 @@
 	struct binder_buffer *buffer;
 	unsigned int    code;
 	unsigned int    flags;
-	long    priority;
-	long    saved_priority;
+	struct binder_priority priority;
+	struct binder_priority saved_priority;
+	bool set_priority_called;
 	kuid_t  sender_euid;
 	struct list_head fd_fixups;
 	binder_uintptr_t security_ctx;
@@ -553,6 +577,7 @@
 	 * during thread teardown
 	 */
 	spinlock_t lock;
+	ANDROID_VENDOR_DATA(1);
 };
 
 /**
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 8eeccdc..b36edc3 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -76,6 +76,30 @@
 DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
 DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
 
+TRACE_EVENT(binder_set_priority,
+	TP_PROTO(int proc, int thread, unsigned int old_prio,
+		 unsigned int desired_prio, unsigned int new_prio),
+	TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+	TP_STRUCT__entry(
+		__field(int, proc)
+		__field(int, thread)
+		__field(unsigned int, old_prio)
+		__field(unsigned int, new_prio)
+		__field(unsigned int, desired_prio)
+	),
+	TP_fast_assign(
+		__entry->proc = proc;
+		__entry->thread = thread;
+		__entry->old_prio = old_prio;
+		__entry->new_prio = new_prio;
+		__entry->desired_prio = desired_prio;
+	),
+	TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+		  __entry->proc, __entry->thread, __entry->old_prio,
+		  __entry->new_prio, __entry->desired_prio)
+);
+
 TRACE_EVENT(binder_wait_for_work,
 	TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
 	TP_ARGS(proc_work, transaction_stack, thread_todo),
diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c
new file mode 100644
index 0000000..0b71af5
--- /dev/null
+++ b/drivers/android/vendor_hooks.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* vendor_hook.c
+ *
+ * Android Vendor Hook Support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/hooks/vendor_hooks.h>
+#include <trace/hooks/sched.h>
+#include <trace/hooks/fpsimd.h>
+#include <trace/hooks/binder.h>
+#include <trace/hooks/dtask.h>
+#include <trace/hooks/cpuidle.h>
+#include <trace/hooks/topology.h>
+#include <trace/hooks/mpam.h>
+#include <trace/hooks/gic.h>
+#include <trace/hooks/wqlockup.h>
+#include <trace/hooks/debug.h>
+#include <trace/hooks/sysrqcrash.h>
+#include <trace/hooks/printk.h>
+#include <trace/hooks/gic_v3.h>
+#include <trace/hooks/epoch.h>
+#include <trace/hooks/cpufreq.h>
+#include <trace/hooks/mm.h>
+#include <trace/hooks/preemptirq.h>
+#include <trace/hooks/ftrace_dump.h>
+#include <trace/hooks/ufshcd.h>
+#include <trace/hooks/cgroup.h>
+#include <trace/hooks/sys.h>
+#include <trace/hooks/iommu.h>
+#include <trace/hooks/net.h>
+#include <trace/hooks/timer.h>
+#include <trace/hooks/pm_domain.h>
+#include <trace/hooks/cpuidle_psci.h>
+#include <trace/hooks/vmscan.h>
+#include <trace/hooks/avc.h>
+#include <trace/hooks/creds.h>
+#include <trace/hooks/memory.h>
+#include <trace/hooks/module.h>
+#include <trace/hooks/selinux.h>
+#include <trace/hooks/syscall_check.h>
+
+/*
+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
+ * associated with them) to allow external modules to probe them.
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_rt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_fallback_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scheduler_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_can_migrate_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_lowest_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_set_affinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_table_limits);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_newidle_balance);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_nohz_balancer_kick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_migrate_queued_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_energy_efficient_cpu);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_sugov_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_setaffinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpus_allowed);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_skip_swapcache_flags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_gfp_zone_flags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_readahead_gfp_mask);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_disable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_enable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_task_cpu);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up_success);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_wake_up_new_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_new_task_stats);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_flush_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tick_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_starting);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_can_attach);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ttwu_cond);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_exec);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_map_util_freq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_em_cpu_energy);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_size_check);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_format_check);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_dump_buffer);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_alloc_iova);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_free_iova);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kfree_skb);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timer_calc_index);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_allow_domain_state);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_ignore);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_replace_next_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wait_for_work);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sync_txn_recvd);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_balance_rt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pick_next_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_balance_anon_file_reclaim);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_show_max_freq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_enqueue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_dequeue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_entity_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_replace);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_lookup);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_commit_creds);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_exit_creds);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_override_creds);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_revert_creds);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_nx);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_rw);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_before_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_after_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_is_initialized);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_mmap_file);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_file_open);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_bpf_syscall);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index ff16a36..cbecaa3 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
+#include <trace/hooks/topology.h>
 
 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
 static struct cpumask scale_freq_counters_mask;
@@ -141,6 +142,8 @@
 
 	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
 
+	trace_android_vh_arch_set_freq_scale(cpus, cur_freq, max_freq, &scale);
+
 	for_each_cpu(i, cpus)
 		per_cpu(arch_freq_scale, i) = scale;
 }
@@ -154,6 +157,7 @@
 }
 
 DEFINE_PER_CPU(unsigned long, thermal_pressure);
+EXPORT_PER_CPU_SYMBOL_GPL(thermal_pressure);
 
 void topology_set_thermal_pressure(const struct cpumask *cpus,
 			       unsigned long th_pressure)
@@ -199,6 +203,8 @@
 subsys_initcall(register_cpu_capacity_sysctl);
 
 static int update_topology;
+bool topology_update_done;
+EXPORT_SYMBOL_GPL(topology_update_done);
 
 int topology_update_cpu_topology(void)
 {
@@ -213,6 +219,8 @@
 {
 	update_topology = 1;
 	rebuild_sched_domains();
+	topology_update_done = true;
+	trace_android_vh_update_topology_flags_workfn(NULL);
 	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
 	update_topology = 0;
 }
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fd034d7..1029147 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1581,7 +1581,7 @@
 }
 early_param("fw_devlink", fw_devlink_setup);
 
-static bool fw_devlink_strict;
+static bool fw_devlink_strict = true;
 static int __init fw_devlink_strict_setup(char *arg)
 {
 	return strtobool(arg, &fw_devlink_strict);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index cd08c58..22424e7 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -12,6 +12,8 @@
 #include <linux/cpumask.h>
 #include <linux/ktime.h>
 
+#include <trace/hooks/pm_domain.h>
+
 static int dev_update_qos_constraint(struct device *dev, void *data)
 {
 	s64 *constraint_ns_p = data;
@@ -174,6 +176,11 @@
 	struct pm_domain_data *pdd;
 	s64 min_off_time_ns;
 	s64 off_on_time_ns;
+	bool allow = true;
+
+	trace_android_vh_allow_domain_state(genpd, state, &allow);
+	if (!allow)
+		return false;
 
 	off_on_time_ns = genpd->states[state].power_off_latency_ns +
 		genpd->states[state].power_on_latency_ns;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 04ea92c..07a51ad 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -34,6 +34,7 @@
 #include <linux/cpufreq.h>
 #include <linux/devfreq.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -1241,6 +1242,8 @@
 	error = dpm_run_callback(callback, dev, state, info);
 	if (error) {
 		async_error = error;
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 		goto Complete;
 	}
 
@@ -1435,6 +1438,8 @@
 	error = dpm_run_callback(callback, dev, state, info);
 	if (error) {
 		async_error = error;
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 		goto Complete;
 	}
 	dpm_propagate_wakeup_to_parent(dev);
@@ -1711,6 +1716,9 @@
 
 		dpm_propagate_wakeup_to_parent(dev);
 		dpm_clear_superiors_direct_complete(dev);
+	} else {
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 	}
 
 	device_unlock(dev);
@@ -1924,6 +1932,9 @@
 		} else {
 			dev_info(dev, "not prepared for power transition: code %d\n",
 				 error);
+			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
+						 dev_name(dev), error);
+			dpm_save_failed_dev(dev_name(dev));
 		}
 
 		mutex_unlock(&dpm_list_mtx);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 8e93167..0489db3 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
 
 /**
  * apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 99bda0da..cb97aa5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -15,6 +15,9 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/wakeup_reason.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -873,6 +876,37 @@
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+	struct wakeup_source *ws, *last_active_ws = NULL;
+	int len = 0;
+	bool active = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active && len < max) {
+			if (!active)
+				len += scnprintf(pending_wakeup_source, max,
+						"Pending Wakeup Sources: ");
+			len += scnprintf(pending_wakeup_source + len, max - len,
+				"%s ", ws->name);
+			active = true;
+		} else if (!active &&
+			   (!last_active_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_active_ws->last_time))) {
+			last_active_ws = ws;
+		}
+	}
+	if (!active && last_active_ws) {
+		scnprintf(pending_wakeup_source, max,
+				"Last active Wakeup Source: %s",
+				last_active_ws->name);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
@@ -911,6 +945,7 @@
 {
 	unsigned long flags;
 	bool ret = false;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 
 	raw_spin_lock_irqsave(&events_lock, flags);
 	if (events_check_enabled) {
@@ -925,6 +960,10 @@
 	if (ret) {
 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 		pm_print_active_wakeup_sources();
+		pm_get_active_wakeup_sources(suspend_abort,
+					     MAX_SUSPEND_ABORT_LEN);
+		log_suspend_abort_reason(suspend_abort);
+		pr_info("PM: %s\n", suspend_abort);
 	}
 
 	return ret || atomic_read(&pm_abort_suspend) > 0;
@@ -952,6 +991,18 @@
 void pm_system_irq_wakeup(unsigned int irq_number)
 {
 	if (pm_wakeup_irq == 0) {
+		struct irq_desc *desc;
+		const char *name = "null";
+
+		desc = irq_to_desc(irq_number);
+		if (desc == NULL)
+			name = "stray irq";
+		else if (desc->action && desc->action->name)
+			name = desc->action->name;
+
+		log_irq_wakeup_reason(irq_number);
+		pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
+
 		pm_wakeup_irq = irq_number;
 		pm_system_wakeup();
 	}
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78..3a9527d 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -73,7 +74,9 @@
 	return 0;
 
  err_out:
-	pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+	log_suspend_abort_reason("System core suspend callback %pS failed",
+		ops->suspend);
+	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
 	list_for_each_entry_continue(ops, &syscore_ops_list, node)
 		if (ops->resume)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 566ee2c..f44cb6d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -72,6 +72,8 @@
 	unsigned long		flags;
 	bool			orphan;
 	bool			rpm_enabled;
+	bool			need_sync;
+	bool			boot_enabled;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
 	unsigned int		protect_count;
@@ -1215,6 +1217,10 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_unprepare_unused_subtree(child);
 
+	if (dev_has_sync_state(core->dev) &&
+	    !(core->flags & CLK_DONT_HOLD_STATE))
+		return;
+
 	if (core->prepare_count)
 		return;
 
@@ -1246,6 +1252,10 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_disable_unused_subtree(child);
 
+	if (dev_has_sync_state(core->dev) &&
+	    !(core->flags & CLK_DONT_HOLD_STATE))
+		return;
+
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
 		clk_core_prepare_enable(core->parent);
 
@@ -1319,6 +1329,38 @@
 }
 late_initcall_sync(clk_disable_unused);
 
+static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
+					      struct device *dev)
+{
+	struct clk_core *child;
+
+	lockdep_assert_held(&prepare_lock);
+
+	hlist_for_each_entry(child, &core->children, child_node)
+		clk_unprepare_disable_dev_subtree(child, dev);
+
+	if (core->dev != dev || !core->need_sync)
+		return;
+
+	clk_core_disable_unprepare(core);
+}
+
+void clk_sync_state(struct device *dev)
+{
+	struct clk_core *core;
+
+	clk_prepare_lock();
+
+	hlist_for_each_entry(core, &clk_root_list, child_node)
+		clk_unprepare_disable_dev_subtree(core, dev);
+
+	hlist_for_each_entry(core, &clk_orphan_list, child_node)
+		clk_unprepare_disable_dev_subtree(core, dev);
+
+	clk_prepare_unlock();
+}
+EXPORT_SYMBOL_GPL(clk_sync_state);
+
 static int clk_core_determine_round_nolock(struct clk_core *core,
 					   struct clk_rate_request *req)
 {
@@ -1725,6 +1767,33 @@
 }
 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
 
+static void clk_core_hold_state(struct clk_core *core)
+{
+	if (core->need_sync || !core->boot_enabled)
+		return;
+
+	if (core->orphan || !dev_has_sync_state(core->dev))
+		return;
+
+	if (core->flags & CLK_DONT_HOLD_STATE)
+		return;
+
+	core->need_sync = !clk_core_prepare_enable(core);
+}
+
+static void __clk_core_update_orphan_hold_state(struct clk_core *core)
+{
+	struct clk_core *child;
+
+	if (core->orphan)
+		return;
+
+	clk_core_hold_state(core);
+
+	hlist_for_each_entry(child, &core->children, child_node)
+		__clk_core_update_orphan_hold_state(child);
+}
+
 /*
  * Update the orphan status of @core and all its children.
  */
@@ -2031,6 +2100,13 @@
 			fail_clk = core;
 	}
 
+	if (core->ops->pre_rate_change) {
+		ret = core->ops->pre_rate_change(core->hw, core->rate,
+						 core->new_rate);
+		if (ret)
+			fail_clk = core;
+	}
+
 	hlist_for_each_entry(child, &core->children, child_node) {
 		/* Skip children who will be reparented to another clock */
 		if (child->new_parent && child->new_parent != core)
@@ -2125,6 +2201,9 @@
 	if (core->flags & CLK_RECALC_NEW_RATES)
 		(void)clk_calc_new_rates(core, core->new_rate);
 
+	if (core->ops->post_rate_change)
+		core->ops->post_rate_change(core->hw, old_rate, core->rate);
+
 	/*
 	 * Use safe iteration, as change_rate can actually swap parents
 	 * for certain clock types.
@@ -3054,7 +3133,7 @@
 }
 DEFINE_SHOW_ATTRIBUTE(clk_dump);
 
-#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#define CLOCK_ALLOW_WRITE_DEBUGFS
 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
 /*
  * This can be dangerous, therefore don't provide any real compile time
@@ -3395,6 +3474,7 @@
 			__clk_set_parent_after(orphan, parent, NULL);
 			__clk_recalc_accuracies(orphan);
 			__clk_recalc_rates(orphan, 0);
+			__clk_core_update_orphan_hold_state(orphan);
 		}
 	}
 }
@@ -3561,6 +3641,8 @@
 		rate = 0;
 	core->rate = core->req_rate = rate;
 
+	core->boot_enabled = clk_core_is_enabled(core);
+
 	/*
 	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
 	 * don't get accidentally disabled when walking the orphan tree and
@@ -3583,6 +3665,7 @@
 		}
 	}
 
+	clk_core_hold_state(core);
 	clk_core_reparent_orphans_nolock();
 
 
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 735adfe..414ffb6 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -3,6 +3,7 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -869,6 +870,7 @@
 	.driver		= {
 		.name	= "disp_cc-sdm845",
 		.of_match_table = disp_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 407e2c5..1310c23 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -3205,6 +3205,7 @@
 	.driver		= {
 		.name	= "gcc-msm8998",
 		.of_match_table = gcc_msm8998_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 58aa3ec..c2f093f 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3624,6 +3624,7 @@
 	.driver		= {
 		.name	= "gcc-sdm845",
 		.of_match_table = gcc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 110b544..8749aab 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -205,6 +205,7 @@
 	.driver = {
 		.name = "sdm845-gpucc",
 		.of_match_table = gpu_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index c77a4dd..f678ade 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -337,6 +337,7 @@
 	.driver		= {
 		.name	= "sdm845-videocc",
 		.of_match_table = video_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f65e31b..b519958 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -128,7 +128,7 @@
 	  Enables the support for the RDA Micro timer driver.
 
 config SUN4I_TIMER
-	bool "Sun4i timer driver" if COMPILE_TEST
+	bool "Sun4i timer driver"
 	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	select TIMER_OF
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c3038cd..c63de9e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -35,6 +35,13 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_TIMES
+       bool "CPU frequency time-in-state statistics"
+       help
+         Export CPU time-in-state information through procfs.
+
+         If in doubt, say N.
+
 choice
 	prompt "Default CPUFreq governor"
 	default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
@@ -227,6 +234,15 @@
 
 	  If in doubt, say N.
 
+config CPUFREQ_DUMMY
+	tristate "Dummy CPU frequency driver"
+	help
+	  This option adds a generic dummy CPUfreq driver, which sets a fake
+	  2-frequency table when initializing each policy and otherwise does
+	  nothing.
+
+	  If in doubt, say N
+
 if X86
 source "drivers/cpufreq/Kconfig.x86"
 endif
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 48ee585..f357aa7 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -5,7 +5,10 @@
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
-# CPUfreq governors 
+# CPUfreq times
+obj-$(CONFIG_CPU_FREQ_TIMES)		+= cpufreq_times.o
+
+# CPUfreq governors
 obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE)	+= cpufreq_performance.o
 obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)	+= cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
@@ -17,6 +20,8 @@
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
 
+obj-$(CONFIG_CPUFREQ_DUMMY)		+= dummy-cpufreq.o
+
 ##################################################################################
 # x86 drivers.
 # Link order matters. K8 is preferred to ACPI because of firmware bugs in early
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 096c384..7007c70 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -16,6 +16,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
 #include <linux/cpu_cooling.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -29,6 +30,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
+#include <trace/hooks/cpufreq.h>
 
 static LIST_HEAD(cpufreq_policy_list);
 
@@ -386,7 +388,9 @@
 					 CPUFREQ_POSTCHANGE, freqs);
 
 		cpufreq_stats_record_transition(policy, freqs->new);
+		cpufreq_times_record_transition(policy, freqs->new);
 		policy->cur = freqs->new;
+		trace_android_rvh_cpufreq_transition(policy);
 	}
 }
 
@@ -688,8 +692,15 @@
 	return sprintf(buf, "%u\n", policy->object);	\
 }
 
+static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
+{
+	unsigned int max_freq = policy->cpuinfo.max_freq;
+
+	trace_android_rvh_show_max_freq(policy, &max_freq);
+	return sprintf(buf, "%u\n", max_freq);
+}
+
 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
-show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
@@ -1487,6 +1498,7 @@
 			goto out_destroy_policy;
 
 		cpufreq_stats_create_table(policy);
+		cpufreq_times_create_policy(policy);
 
 		write_lock_irqsave(&cpufreq_driver_lock, flags);
 		list_add(&policy->policy_list, &cpufreq_policy_list);
@@ -2100,6 +2112,7 @@
 	arch_set_freq_scale(policy->related_cpus, freq,
 			    policy->cpuinfo.max_freq);
 	cpufreq_stats_record_transition(policy, freq);
+	trace_android_rvh_cpufreq_transition(policy);
 
 	if (trace_cpu_frequency_enabled()) {
 		for_each_cpu(cpu, policy->cpus)
@@ -2576,7 +2589,6 @@
 		ret = cpufreq_start_governor(policy);
 		if (!ret) {
 			pr_debug("governor change\n");
-			sched_cpufreq_governor_change(policy, old_gov);
 			return 0;
 		}
 		cpufreq_exit_governor(policy);
@@ -2594,6 +2606,7 @@
 
 	return ret;
 }
+EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits);
 
 /**
  * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
new file mode 100644
index 0000000..4df55b3
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -0,0 +1,211 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+
+/**
+ * struct cpu_freqs - per-cpu frequency information
+ * @offset: start of these freqs' stats in task time_in_state array
+ * @max_state: number of entries in freq_table
+ * @last_index: index in freq_table of last frequency switched to
+ * @freq_table: list of available frequencies
+ */
+struct cpu_freqs {
+	unsigned int offset;
+	unsigned int max_state;
+	unsigned int last_index;
+	unsigned int freq_table[0];
+};
+
+static struct cpu_freqs *all_freqs[NR_CPUS];
+
+static unsigned int next_offset;
+
+void cpufreq_task_times_init(struct task_struct *p)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = 0;
+}
+
+void cpufreq_task_times_alloc(struct task_struct *p)
+{
+	void *temp;
+	unsigned long flags;
+	unsigned int max_state = READ_ONCE(next_offset);
+
+	/* We use one array to avoid multiple allocs per task */
+	temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
+	if (!temp)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = temp;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = max_state;
+}
+
+/* Caller must hold task_time_in_state_lock */
+static int cpufreq_task_times_realloc_locked(struct task_struct *p)
+{
+	void *temp;
+	unsigned int max_state = READ_ONCE(next_offset);
+
+	temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+	p->time_in_state = temp;
+	memset(p->time_in_state + p->max_state, 0,
+	       (max_state - p->max_state) * sizeof(u64));
+	p->max_state = max_state;
+	return 0;
+}
+
+void cpufreq_task_times_exit(struct task_struct *p)
+{
+	unsigned long flags;
+	void *temp;
+
+	if (!p->time_in_state)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	temp = p->time_in_state;
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+	struct pid *pid, struct task_struct *p)
+{
+	unsigned int cpu, i;
+	u64 cputime;
+	unsigned long flags;
+	struct cpu_freqs *freqs;
+	struct cpu_freqs *last_freqs = NULL;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	for_each_possible_cpu(cpu) {
+		freqs = all_freqs[cpu];
+		if (!freqs || freqs == last_freqs)
+			continue;
+		last_freqs = freqs;
+
+		seq_printf(m, "cpu%u\n", cpu);
+		for (i = 0; i < freqs->max_state; i++) {
+			cputime = 0;
+			if (freqs->offset + i < p->max_state &&
+			    p->time_in_state)
+				cputime = p->time_in_state[freqs->offset + i];
+			seq_printf(m, "%u %lu\n", freqs->freq_table[i],
+				   (unsigned long)nsec_to_clock_t(cputime));
+		}
+	}
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	return 0;
+}
+
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
+{
+	unsigned long flags;
+	unsigned int state;
+	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+
+	if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
+		return;
+
+	state = freqs->offset + READ_ONCE(freqs->last_index);
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
+	    p->time_in_state)
+		p->time_in_state[state] += cputime;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+}
+
+static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
+{
+	int index;
+        for (index = 0; index < freqs->max_state; ++index) {
+		if (freqs->freq_table[index] == freq)
+			return index;
+        }
+	return -1;
+}
+
+void cpufreq_times_create_policy(struct cpufreq_policy *policy)
+{
+	int cpu, index = 0;
+	unsigned int count = 0;
+	struct cpufreq_frequency_table *pos, *table;
+	struct cpu_freqs *freqs;
+	void *tmp;
+
+	if (all_freqs[policy->cpu])
+		return;
+
+	table = policy->freq_table;
+	if (!table)
+		return;
+
+	cpufreq_for_each_valid_entry(pos, table)
+		count++;
+
+	tmp =  kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
+		       GFP_KERNEL);
+	if (!tmp)
+		return;
+
+	freqs = tmp;
+	freqs->max_state = count;
+
+	cpufreq_for_each_valid_entry(pos, table)
+		freqs->freq_table[index++] = pos->frequency;
+
+	index = cpufreq_times_get_index(freqs, policy->cur);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+
+	freqs->offset = next_offset;
+	WRITE_ONCE(next_offset, freqs->offset + count);
+	for_each_cpu(cpu, policy->related_cpus)
+		all_freqs[cpu] = freqs;
+}
+
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+	unsigned int new_freq)
+{
+	int index;
+	struct cpu_freqs *freqs = all_freqs[policy->cpu];
+	if (!freqs)
+		return;
+
+	index = cpufreq_times_get_index(freqs, new_freq);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+}
diff --git a/drivers/cpufreq/dummy-cpufreq.c b/drivers/cpufreq/dummy-cpufreq.c
new file mode 100644
index 0000000..e74ef67
--- /dev/null
+++ b/drivers/cpufreq/dummy-cpufreq.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Google, Inc.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+
+static struct cpufreq_frequency_table freq_table[] = {
+	{ .frequency = 1 },
+	{ .frequency = 2 },
+	{ .frequency = CPUFREQ_TABLE_END },
+};
+
+static int dummy_cpufreq_target_index(struct cpufreq_policy *policy,
+				   unsigned int index)
+{
+	return 0;
+}
+
+static int dummy_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+	policy->freq_table = freq_table;
+	return 0;
+}
+
+static unsigned int dummy_cpufreq_get(unsigned int cpu)
+{
+	return 1;
+}
+
+static int dummy_cpufreq_verify(struct cpufreq_policy_data *data)
+{
+	return 0;
+}
+
+static struct cpufreq_driver dummy_cpufreq_driver = {
+	.name = "dummy",
+	.target_index = dummy_cpufreq_target_index,
+	.init = dummy_cpufreq_driver_init,
+	.get = dummy_cpufreq_get,
+	.verify = dummy_cpufreq_verify,
+	.attr = cpufreq_generic_attr,
+};
+
+static int __init dummy_cpufreq_init(void)
+{
+	return cpufreq_register_driver(&dummy_cpufreq_driver);
+}
+
+static void __exit dummy_cpufreq_exit(void)
+{
+	cpufreq_unregister_driver(&dummy_cpufreq_driver);
+}
+
+module_init(dummy_cpufreq_init);
+module_exit(dummy_cpufreq_exit);
+
+MODULE_AUTHOR("Connor O'Brien <connoro@google.com>");
+MODULE_DESCRIPTION("dummy cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 67e56cf..24d3115 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,6 +9,7 @@
 
 #include <linux/cpufreq.h>
 #include <linux/module.h>
+#include <trace/hooks/cpufreq.h>
 
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
@@ -51,6 +52,7 @@
 			max_freq = freq;
 	}
 
+	trace_android_vh_freq_table_limits(policy, min_freq, max_freq);
 	policy->min = policy->cpuinfo.min_freq = min_freq;
 	policy->max = max_freq;
 	/*
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b51b5df..efc063f 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -25,6 +25,7 @@
 #include <linux/string.h>
 
 #include <asm/cpuidle.h>
+#include <trace/hooks/cpuidle_psci.h>
 
 #include "cpuidle-psci.h"
 #include "dt_idle_states.h"
@@ -67,6 +68,8 @@
 	if (ret)
 		return -1;
 
+	trace_android_vh_cpuidle_psci_enter(dev, s2idle);
+
 	/* Do runtime PM to manage a hierarchical CPU toplogy. */
 	rcu_irq_enter_irqson();
 	if (s2idle)
@@ -88,6 +91,8 @@
 		pm_runtime_get_sync(pd_dev);
 	rcu_irq_exit_irqson();
 
+	trace_android_vh_cpuidle_psci_exit(dev, s2idle);
+
 	cpu_pm_exit();
 
 	/* Clear the domain state to start fresh when back from idle. */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index ef2ea1b..a01f04b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -24,6 +24,7 @@
 #include <linux/tick.h>
 #include <linux/mmu_context.h>
 #include <trace/events/power.h>
+#include <trace/hooks/cpuidle.h>
 
 #include "cpuidle.h"
 
@@ -202,11 +203,22 @@
 {
 	int entered_state;
 
-	struct cpuidle_state *target_state = &drv->states[index];
-	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
+	struct cpuidle_state *target_state;
+	bool broadcast;
 	ktime_t time_start, time_end;
 
 	/*
+	 * The vendor hook may modify index, which means target_state and
+	 * broadcast must be assigned after the vendor hook.
+	 */
+	trace_android_vh_cpu_idle_enter(&index, dev);
+	if (index < 0)
+		return index;
+
+	target_state = &drv->states[index];
+	broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
+
+	/*
 	 * Tell the time framework to switch to a broadcast timer because our
 	 * local timer will be shut down.  If a local timer is used from another
 	 * CPU as a broadcast timer, this call may fail if it is not available.
@@ -242,6 +254,7 @@
 	sched_clock_idle_wakeup_event();
 	time_end = ns_to_ktime(local_clock());
 	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+	trace_android_vh_cpu_idle_exit(entered_state, dev);
 
 	/* The cpu is no longer idle or about to enter idle. */
 	sched_idle_set_state(NULL);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 29acaf4..0e51ed2 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -102,6 +102,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpuidle_register_governor);
 
 /**
  * cpuidle_governor_latency_req - Compute a latency constraint for CPU
@@ -118,3 +119,4 @@
 
 	return (s64)device_req * NSEC_PER_USEC;
 }
+EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6437b2e..84ec1671 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1185,6 +1185,30 @@
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
 
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
+				     enum dma_data_direction direction,
+				     unsigned int offset, unsigned int len)
+{
+	int ret = 0;
+
+	if (WARN_ON(!dmabuf))
+		return -EINVAL;
+
+	if (dmabuf->ops->begin_cpu_access_partial)
+		ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
+							    offset, len);
+
+	/* Ensure that all fences are waited upon - but we first allow
+	 * the native handler the chance to do so more efficiently if it
+	 * chooses. A double invocation here will be reasonably cheap no-op.
+	 */
+	if (ret == 0)
+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
+
 /**
  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
@@ -1213,6 +1237,21 @@
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
 
+int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
+				   enum dma_data_direction direction,
+				   unsigned int offset, unsigned int len)
+{
+	int ret = 0;
+
+	WARN_ON(!dmabuf);
+
+	if (dmabuf->ops->end_cpu_access_partial)
+		ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
+							  offset, len);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
 
 /**
  * dma_buf_mmap - Setup up a userspace mmap with the given vma
@@ -1333,6 +1372,20 @@
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
 
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
+{
+	int ret = 0;
+
+	if (WARN_ON(!dmabuf) || !flags)
+		return -EINVAL;
+
+	if (dmabuf->ops->get_flags)
+		ret = dmabuf->ops->get_flags(dmabuf, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get_flags);
+
 #ifdef CONFIG_DEBUG_FS
 static int dma_buf_debug_show(struct seq_file *s, void *unused)
 {
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 56bf5ad..0114157 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -30,6 +30,7 @@
  * @heap_devt		heap device node
  * @list		list head connecting to list of heaps
  * @heap_cdev		heap char device
+ * @heap_dev		heap device struct
  *
  * Represents a heap of memory from which buffers can be made.
  */
@@ -40,6 +41,8 @@
 	dev_t heap_devt;
 	struct list_head list;
 	struct cdev heap_cdev;
+	struct kref refcount;
+	struct device *heap_dev;
 };
 
 static LIST_HEAD(heap_list);
@@ -48,22 +51,60 @@
 static struct class *dma_heap_class;
 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
 
-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
-				 unsigned int fd_flags,
-				 unsigned int heap_flags)
+struct dma_heap *dma_heap_find(const char *name)
 {
-	struct dma_buf *dmabuf;
-	int fd;
+	struct dma_heap *h;
 
+	mutex_lock(&heap_list_lock);
+	list_for_each_entry(h, &heap_list, list) {
+		if (!strcmp(h->name, name)) {
+			kref_get(&h->refcount);
+			mutex_unlock(&heap_list_lock);
+			return h;
+		}
+	}
+	mutex_unlock(&heap_list_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_heap_find);
+
+
+void dma_heap_buffer_free(struct dma_buf *dmabuf)
+{
+	dma_buf_put(dmabuf);
+}
+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
+
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+				      unsigned int fd_flags,
+				      unsigned int heap_flags)
+{
+	if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+		return ERR_PTR(-EINVAL);
+
+	if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+		return ERR_PTR(-EINVAL);
 	/*
 	 * Allocations from all heaps have to begin
 	 * and end on page boundaries.
 	 */
 	len = PAGE_ALIGN(len);
 	if (!len)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
-	dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
+
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+			    unsigned int fd_flags,
+			    unsigned int heap_flags)
+{
+	struct dma_buf *dmabuf;
+	int fd;
+
+	dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
+
 	if (IS_ERR(dmabuf))
 		return PTR_ERR(dmabuf);
 
@@ -73,7 +114,9 @@
 		/* just return, as put will call release and that will free */
 	}
 	return fd;
+
 }
+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
 
 static int dma_heap_open(struct inode *inode, struct file *file)
 {
@@ -101,15 +144,9 @@
 	if (heap_allocation->fd)
 		return -EINVAL;
 
-	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
-		return -EINVAL;
-
-	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
-		return -EINVAL;
-
-	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
-				   heap_allocation->fd_flags,
-				   heap_allocation->heap_flags);
+	fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
+				     heap_allocation->fd_flags,
+				     heap_allocation->heap_flags);
 	if (fd < 0)
 		return fd;
 
@@ -201,6 +238,47 @@
 {
 	return heap->priv;
 }
+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
+
+static void dma_heap_release(struct kref *ref)
+{
+	struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
+	int minor = MINOR(heap->heap_devt);
+
+	/* Note, we already holding the heap_list_lock here */
+	list_del(&heap->list);
+
+	device_destroy(dma_heap_class, heap->heap_devt);
+	cdev_del(&heap->heap_cdev);
+	xa_erase(&dma_heap_minors, minor);
+
+	kfree(heap);
+}
+
+void dma_heap_put(struct dma_heap *h)
+{
+	/*
+	 * Take the heap_list_lock now to avoid racing with code
+	 * scanning the list and then taking a kref.
+	 */
+	mutex_lock(&heap_list_lock);
+	kref_put(&h->refcount, dma_heap_release);
+	mutex_unlock(&heap_list_lock);
+}
+EXPORT_SYMBOL_GPL(dma_heap_put);
+
+/**
+ * dma_heap_get_dev() - get device struct for the heap
+ * @heap: DMA-Heap to retrieve device struct from
+ *
+ * Returns:
+ * The device struct for the heap.
+ */
+struct device *dma_heap_get_dev(struct dma_heap *heap)
+{
+	return heap->heap_dev;
+}
+EXPORT_SYMBOL_GPL(dma_heap_get_dev);
 
 /**
  * dma_heap_get_name() - get heap name
@@ -213,11 +291,11 @@
 {
 	return heap->name;
 }
+EXPORT_SYMBOL_GPL(dma_heap_get_name);
 
 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
 {
-	struct dma_heap *heap, *h, *err_ret;
-	struct device *dev_ret;
+	struct dma_heap *heap, *err_ret;
 	unsigned int minor;
 	int ret;
 
@@ -232,21 +310,19 @@
 	}
 
 	/* check the name is unique */
-	mutex_lock(&heap_list_lock);
-	list_for_each_entry(h, &heap_list, list) {
-		if (!strcmp(h->name, exp_info->name)) {
-			mutex_unlock(&heap_list_lock);
-			pr_err("dma_heap: Already registered heap named %s\n",
-			       exp_info->name);
-			return ERR_PTR(-EINVAL);
-		}
+	heap = dma_heap_find(exp_info->name);
+	if (heap) {
+		pr_err("dma_heap: Already registered heap named %s\n",
+		       exp_info->name);
+		dma_heap_put(heap);
+		return ERR_PTR(-EINVAL);
 	}
-	mutex_unlock(&heap_list_lock);
 
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
 
+	kref_init(&heap->refcount);
 	heap->name = exp_info->name;
 	heap->ops = exp_info->ops;
 	heap->priv = exp_info->priv;
@@ -271,16 +347,20 @@
 		goto err1;
 	}
 
-	dev_ret = device_create(dma_heap_class,
-				NULL,
-				heap->heap_devt,
-				NULL,
-				heap->name);
-	if (IS_ERR(dev_ret)) {
+	heap->heap_dev = device_create(dma_heap_class,
+				       NULL,
+				       heap->heap_devt,
+				       NULL,
+				       heap->name);
+	if (IS_ERR(heap->heap_dev)) {
 		pr_err("dma_heap: Unable to create device\n");
-		err_ret = ERR_CAST(dev_ret);
+		err_ret = ERR_CAST(heap->heap_dev);
 		goto err2;
 	}
+
+	/* Make sure it doesn't disappear on us */
+	heap->heap_dev = get_device(heap->heap_dev);
+
 	/* Add heap to the list */
 	mutex_lock(&heap_list_lock);
 	list_add(&heap->list, &heap_list);
@@ -296,6 +376,7 @@
 	kfree(heap);
 	return err_ret;
 }
+EXPORT_SYMBOL_GPL(dma_heap_add);
 
 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
 {
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06..e273fb1 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -1,12 +1,12 @@
 config DMABUF_HEAPS_SYSTEM
-	bool "DMA-BUF System Heap"
+	tristate "DMA-BUF System Heap"
 	depends on DMABUF_HEAPS
 	help
 	  Choose this option to enable the system dmabuf heap. The system heap
 	  is backed by pages from the buddy allocator. If in doubt, say Y.
 
 config DMABUF_HEAPS_CMA
-	bool "DMA-BUF CMA Heap"
+	tristate "DMA-BUF CMA Heap"
 	depends on DMABUF_HEAPS && DMA_CMA
 	help
 	  Choose this option to enable dma-buf CMA heap. This heap is backed
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0c05b79..5905795 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -403,3 +403,4 @@
 module_init(add_default_cma_heap);
 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
 MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index ab7fd89..9ac8ac5 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 
 static struct dma_heap *sys_heap;
+static struct dma_heap *sys_uncached_heap;
 
 struct system_heap_buffer {
 	struct dma_heap *heap;
@@ -31,6 +32,8 @@
 	struct sg_table sg_table;
 	int vmap_cnt;
 	void *vaddr;
+
+	bool uncached;
 };
 
 struct dma_heap_attachment {
@@ -38,6 +41,8 @@
 	struct sg_table *table;
 	struct list_head list;
 	bool mapped;
+
+	bool uncached;
 };
 
 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
@@ -101,7 +106,7 @@
 	a->dev = attachment->dev;
 	INIT_LIST_HEAD(&a->list);
 	a->mapped = false;
-
+	a->uncached = buffer->uncached;
 	attachment->priv = a;
 
 	mutex_lock(&buffer->lock);
@@ -131,9 +136,13 @@
 {
 	struct dma_heap_attachment *a = attachment->priv;
 	struct sg_table *table = a->table;
+	int attr = 0;
 	int ret;
 
-	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+	if (a->uncached)
+		attr = DMA_ATTR_SKIP_CPU_SYNC;
+
+	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -146,9 +155,12 @@
 				      enum dma_data_direction direction)
 {
 	struct dma_heap_attachment *a = attachment->priv;
+	int attr = 0;
 
+	if (a->uncached)
+		attr = DMA_ATTR_SKIP_CPU_SYNC;
 	a->mapped = false;
-	dma_unmap_sgtable(attachment->dev, table, direction, 0);
+	dma_unmap_sgtable(attachment->dev, table, direction, attr);
 }
 
 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -162,10 +174,12 @@
 	if (buffer->vmap_cnt)
 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
 
-	list_for_each_entry(a, &buffer->attachments, list) {
-		if (!a->mapped)
-			continue;
-		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+	if (!buffer->uncached) {
+		list_for_each_entry(a, &buffer->attachments, list) {
+			if (!a->mapped)
+				continue;
+			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+		}
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -183,10 +197,12 @@
 	if (buffer->vmap_cnt)
 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
 
-	list_for_each_entry(a, &buffer->attachments, list) {
-		if (!a->mapped)
-			continue;
-		dma_sync_sgtable_for_device(a->dev, a->table, direction);
+	if (!buffer->uncached) {
+		list_for_each_entry(a, &buffer->attachments, list) {
+			if (!a->mapped)
+				continue;
+			dma_sync_sgtable_for_device(a->dev, a->table, direction);
+		}
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -201,6 +217,9 @@
 	struct sg_page_iter piter;
 	int ret;
 
+	if (buffer->uncached)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
 		struct page *page = sg_page_iter_page(&piter);
 
@@ -222,17 +241,21 @@
 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
 	struct page **tmp = pages;
 	struct sg_page_iter piter;
+	pgprot_t pgprot = PAGE_KERNEL;
 	void *vaddr;
 
 	if (!pages)
 		return ERR_PTR(-ENOMEM);
 
+	if (buffer->uncached)
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
 	for_each_sgtable_page(table, &piter, 0) {
 		WARN_ON(tmp - pages >= npages);
 		*tmp++ = sg_page_iter_page(&piter);
 	}
 
-	vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
 	vfree(pages);
 
 	if (!vaddr)
@@ -332,10 +355,11 @@
 	return NULL;
 }
 
-static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
-					    unsigned long len,
-					    unsigned long fd_flags,
-					    unsigned long heap_flags)
+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
+					       unsigned long len,
+					       unsigned long fd_flags,
+					       unsigned long heap_flags,
+					       bool uncached)
 {
 	struct system_heap_buffer *buffer;
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -356,6 +380,7 @@
 	mutex_init(&buffer->lock);
 	buffer->heap = heap;
 	buffer->len = len;
+	buffer->uncached = uncached;
 
 	INIT_LIST_HEAD(&pages);
 	i = 0;
@@ -401,6 +426,18 @@
 		ret = PTR_ERR(dmabuf);
 		goto free_pages;
 	}
+
+	/*
+	 * For uncached buffers, we need to initially flush cpu cache, since
+	 * the __GFP_ZERO on the allocation means the zeroing was done by the
+	 * cpu and thus it is likely cached. Map (and implicitly flush) and
+	 * unmap it now so we don't get corruption later on.
+	 */
+	if (buffer->uncached) {
+		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
+		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
+	}
+
 	return dmabuf;
 
 free_pages:
@@ -418,10 +455,40 @@
 	return ERR_PTR(ret);
 }
 
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+					    unsigned long len,
+					    unsigned long fd_flags,
+					    unsigned long heap_flags)
+{
+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
+}
+
 static const struct dma_heap_ops system_heap_ops = {
 	.allocate = system_heap_allocate,
 };
 
+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
+						     unsigned long len,
+						     unsigned long fd_flags,
+						     unsigned long heap_flags)
+{
+	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
+}
+
+/* Dummy function to be used until we can call coerce_mask_and_coherent */
+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
+							    unsigned long len,
+							    unsigned long fd_flags,
+							    unsigned long heap_flags)
+{
+	return ERR_PTR(-EBUSY);
+}
+
+static struct dma_heap_ops system_uncached_heap_ops = {
+	/* After system_heap_create is complete, we will swap this */
+	.allocate = system_uncached_heap_not_initialized,
+};
+
 static int system_heap_create(void)
 {
 	struct dma_heap_export_info exp_info;
@@ -434,7 +501,20 @@
 	if (IS_ERR(sys_heap))
 		return PTR_ERR(sys_heap);
 
+	exp_info.name = "system-uncached";
+	exp_info.ops = &system_uncached_heap_ops;
+	exp_info.priv = NULL;
+
+	sys_uncached_heap = dma_heap_add(&exp_info);
+	if (IS_ERR(sys_uncached_heap))
+		return PTR_ERR(sys_uncached_heap);
+
+	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64));
+	mb(); /* make sure we only set allocate after dma_mask is set */
+	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
+
 	return 0;
 }
 module_init(system_heap_create);
 MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8b8744d..7f4ee6d 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -672,9 +672,9 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 1c72208..3c4b651 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1938,6 +1938,7 @@
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
+EXPORT_SYMBOL_GPL(drm_mode_convert_to_umode);
 
 /**
  * drm_mode_convert_umode - convert a modeinfo into a drm_display_mode
@@ -2014,6 +2015,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(drm_mode_convert_umode);
 
 /**
  * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 5b00310a..3997b35 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -298,10 +298,6 @@
 	struct virtio_gpu_framebuffer *virtio_gpu_fb;
 	int ret;
 
-	if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 &&
-	    mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888)
-		return ERR_PTR(-ENOENT);
-
 	/* lookup object associated with res handle */
 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (!obj)
@@ -337,7 +333,6 @@
 	if (ret)
 		return ret;
 
-	vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
 	vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
 
 	/* modes will be validated against the framebuffer size */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6d3cc9e..e2fc62d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -31,7 +31,14 @@
 #include "virtgpu_drv.h"
 
 static const uint32_t virtio_gpu_formats[] = {
-	DRM_FORMAT_HOST_XRGB8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_BGRA8888,
+	DRM_FORMAT_RGBX8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
 };
 
 static const uint32_t virtio_gpu_cursor_formats[] = {
@@ -43,6 +50,32 @@
 	uint32_t format;
 
 	switch (drm_fourcc) {
+#ifdef __BIG_ENDIAN
+	case DRM_FORMAT_XRGB8888:
+		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+		break;
+	case DRM_FORMAT_BGRA8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+#else
 	case DRM_FORMAT_XRGB8888:
 		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
 		break;
@@ -55,6 +88,19 @@
 	case DRM_FORMAT_BGRA8888:
 		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
 		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+#endif
 	default:
 		/*
 		 * This should not happen, we handle everything listed
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a7c78ac..70d209a 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -741,9 +741,6 @@
 config HID_NINTENDO
 	tristate "Nintendo Joy-Con and Pro Controller support"
 	depends on HID
-	depends on NEW_LEDS
-	depends on LEDS_CLASS
-	select POWER_SUPPLY
 	help
 	Adds support for the Nintendo Switch Joy-Cons and Pro Controller.
 	All controllers support bluetooth, and the Pro Controller also supports
@@ -752,16 +749,6 @@
 	To compile this driver as a module, choose M here: the
 	module will be called hid-nintendo.
 
-config NINTENDO_FF
-	bool "Nintendo Switch controller force feedback support"
-	depends on HID_NINTENDO
-	select INPUT_FF_MEMLESS
-	help
-	Say Y here if you have a Nintendo Switch controller and want to enable
-	force feedback support for it. This works for both joy-cons and the pro
-	controller. For the pro controller, both rumble motors can be controlled
-	individually.
-
 config HID_NTI
 	tristate "NTI keyboard adapters"
 	help
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 19da077..292a46d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -928,7 +928,6 @@
 #define USB_DEVICE_ID_NINTENDO_JOYCONL	0x2006
 #define USB_DEVICE_ID_NINTENDO_JOYCONR	0x2007
 #define USB_DEVICE_ID_NINTENDO_PROCON	0x2009
-#define USB_DEVICE_ID_NINTENDO_CHRGGRIP	0x200E
 
 #define USB_VENDOR_ID_NOVATEK		0x0603
 #define USB_DEVICE_ID_NOVATEK_PCT	0x0600
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index b6a9a0f..3695b96 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -2,17 +2,15 @@
 /*
  * HID driver for Nintendo Switch Joy-Cons and Pro Controllers
  *
- * Copyright (c) 2019-2021 Daniel J. Ogorchock <djogorchock@gmail.com>
+ * Copyright (c) 2019 Daniel J. Ogorchock <djogorchock@gmail.com>
  *
  * The following resources/projects were referenced for this driver:
  *   https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering
  *   https://gitlab.com/pjranki/joycon-linux-kernel (Peter Rankin)
  *   https://github.com/FrotBot/SwitchProConLinuxUSB
  *   https://github.com/MTCKC/ProconXInput
- *   https://github.com/Davidobot/BetterJoyForCemu
  *   hid-wiimote kernel hid driver
  *   hid-logitech-hidpp driver
- *   hid-sony driver
  *
  * This driver supports the Nintendo Switch Joy-Cons and Pro Controllers. The
  * Pro Controllers can either be used over USB or Bluetooth.
@@ -23,16 +21,11 @@
  */
 
 #include "hid-ids.h"
-#include <asm/unaligned.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/kernel.h>
 #include <linux/hid.h>
 #include <linux/input.h>
-#include <linux/jiffies.h>
-#include <linux/leds.h>
 #include <linux/module.h>
-#include <linux/power_supply.h>
 #include <linux/spinlock.h>
 
 /*
@@ -41,272 +34,78 @@
  */
 
 /* Output Reports */
-#define JC_OUTPUT_RUMBLE_AND_SUBCMD	 0x01
-#define JC_OUTPUT_FW_UPDATE_PKT		 0x03
-#define JC_OUTPUT_RUMBLE_ONLY		 0x10
-#define JC_OUTPUT_MCU_DATA		 0x11
-#define JC_OUTPUT_USB_CMD		 0x80
+static const u8 JC_OUTPUT_RUMBLE_AND_SUBCMD	= 0x01;
+static const u8 JC_OUTPUT_FW_UPDATE_PKT		= 0x03;
+static const u8 JC_OUTPUT_RUMBLE_ONLY		= 0x10;
+static const u8 JC_OUTPUT_MCU_DATA		= 0x11;
+static const u8 JC_OUTPUT_USB_CMD		= 0x80;
 
 /* Subcommand IDs */
-#define JC_SUBCMD_STATE			 0x00
-#define JC_SUBCMD_MANUAL_BT_PAIRING	 0x01
-#define JC_SUBCMD_REQ_DEV_INFO		 0x02
-#define JC_SUBCMD_SET_REPORT_MODE	 0x03
-#define JC_SUBCMD_TRIGGERS_ELAPSED	 0x04
-#define JC_SUBCMD_GET_PAGE_LIST_STATE	 0x05
-#define JC_SUBCMD_SET_HCI_STATE		 0x06
-#define JC_SUBCMD_RESET_PAIRING_INFO	 0x07
-#define JC_SUBCMD_LOW_POWER_MODE	 0x08
-#define JC_SUBCMD_SPI_FLASH_READ	 0x10
-#define JC_SUBCMD_SPI_FLASH_WRITE	 0x11
-#define JC_SUBCMD_RESET_MCU		 0x20
-#define JC_SUBCMD_SET_MCU_CONFIG	 0x21
-#define JC_SUBCMD_SET_MCU_STATE		 0x22
-#define JC_SUBCMD_SET_PLAYER_LIGHTS	 0x30
-#define JC_SUBCMD_GET_PLAYER_LIGHTS	 0x31
-#define JC_SUBCMD_SET_HOME_LIGHT	 0x38
-#define JC_SUBCMD_ENABLE_IMU		 0x40
-#define JC_SUBCMD_SET_IMU_SENSITIVITY	 0x41
-#define JC_SUBCMD_WRITE_IMU_REG		 0x42
-#define JC_SUBCMD_READ_IMU_REG		 0x43
-#define JC_SUBCMD_ENABLE_VIBRATION	 0x48
-#define JC_SUBCMD_GET_REGULATED_VOLTAGE	 0x50
+static const u8 JC_SUBCMD_STATE			/*= 0x00*/;
+static const u8 JC_SUBCMD_MANUAL_BT_PAIRING	= 0x01;
+static const u8 JC_SUBCMD_REQ_DEV_INFO		= 0x02;
+static const u8 JC_SUBCMD_SET_REPORT_MODE	= 0x03;
+static const u8 JC_SUBCMD_TRIGGERS_ELAPSED	= 0x04;
+static const u8 JC_SUBCMD_GET_PAGE_LIST_STATE	= 0x05;
+static const u8 JC_SUBCMD_SET_HCI_STATE		= 0x06;
+static const u8 JC_SUBCMD_RESET_PAIRING_INFO	= 0x07;
+static const u8 JC_SUBCMD_LOW_POWER_MODE	= 0x08;
+static const u8 JC_SUBCMD_SPI_FLASH_READ	= 0x10;
+static const u8 JC_SUBCMD_SPI_FLASH_WRITE	= 0x11;
+static const u8 JC_SUBCMD_RESET_MCU		= 0x20;
+static const u8 JC_SUBCMD_SET_MCU_CONFIG	= 0x21;
+static const u8 JC_SUBCMD_SET_MCU_STATE		= 0x22;
+static const u8 JC_SUBCMD_SET_PLAYER_LIGHTS	= 0x30;
+static const u8 JC_SUBCMD_GET_PLAYER_LIGHTS	= 0x31;
+static const u8 JC_SUBCMD_SET_HOME_LIGHT	= 0x38;
+static const u8 JC_SUBCMD_ENABLE_IMU		= 0x40;
+static const u8 JC_SUBCMD_SET_IMU_SENSITIVITY	= 0x41;
+static const u8 JC_SUBCMD_WRITE_IMU_REG		= 0x42;
+static const u8 JC_SUBCMD_READ_IMU_REG		= 0x43;
+static const u8 JC_SUBCMD_ENABLE_VIBRATION	= 0x48;
+static const u8 JC_SUBCMD_GET_REGULATED_VOLTAGE	= 0x50;
 
 /* Input Reports */
-#define JC_INPUT_BUTTON_EVENT		 0x3F
-#define JC_INPUT_SUBCMD_REPLY		 0x21
-#define JC_INPUT_IMU_DATA		 0x30
-#define JC_INPUT_MCU_DATA		 0x31
-#define JC_INPUT_USB_RESPONSE		 0x81
+static const u8 JC_INPUT_BUTTON_EVENT		= 0x3F;
+static const u8 JC_INPUT_SUBCMD_REPLY		= 0x21;
+static const u8 JC_INPUT_IMU_DATA		= 0x30;
+static const u8 JC_INPUT_MCU_DATA		= 0x31;
+static const u8 JC_INPUT_USB_RESPONSE		= 0x81;
 
 /* Feature Reports */
-#define JC_FEATURE_LAST_SUBCMD		 0x02
-#define JC_FEATURE_OTA_FW_UPGRADE	 0x70
-#define JC_FEATURE_SETUP_MEM_READ	 0x71
-#define JC_FEATURE_MEM_READ		 0x72
-#define JC_FEATURE_ERASE_MEM_SECTOR	 0x73
-#define JC_FEATURE_MEM_WRITE		 0x74
-#define JC_FEATURE_LAUNCH		 0x75
+static const u8 JC_FEATURE_LAST_SUBCMD		= 0x02;
+static const u8 JC_FEATURE_OTA_FW_UPGRADE	= 0x70;
+static const u8 JC_FEATURE_SETUP_MEM_READ	= 0x71;
+static const u8 JC_FEATURE_MEM_READ		= 0x72;
+static const u8 JC_FEATURE_ERASE_MEM_SECTOR	= 0x73;
+static const u8 JC_FEATURE_MEM_WRITE		= 0x74;
+static const u8 JC_FEATURE_LAUNCH		= 0x75;
 
 /* USB Commands */
-#define JC_USB_CMD_CONN_STATUS		 0x01
-#define JC_USB_CMD_HANDSHAKE		 0x02
-#define JC_USB_CMD_BAUDRATE_3M		 0x03
-#define JC_USB_CMD_NO_TIMEOUT		 0x04
-#define JC_USB_CMD_EN_TIMEOUT		 0x05
-#define JC_USB_RESET			 0x06
-#define JC_USB_PRE_HANDSHAKE		 0x91
-#define JC_USB_SEND_UART		 0x92
-
-/* Magic value denoting presence of user calibration */
-#define JC_CAL_USR_MAGIC_0		 0xB2
-#define JC_CAL_USR_MAGIC_1		 0xA1
-#define JC_CAL_USR_MAGIC_SIZE		 2
-
-/* SPI storage addresses of user calibration data */
-#define JC_CAL_USR_LEFT_MAGIC_ADDR	 0x8010
-#define JC_CAL_USR_LEFT_DATA_ADDR	 0x8012
-#define JC_CAL_USR_LEFT_DATA_END	 0x801A
-#define JC_CAL_USR_RIGHT_MAGIC_ADDR	 0x801B
-#define JC_CAL_USR_RIGHT_DATA_ADDR	 0x801D
-#define JC_CAL_STICK_DATA_SIZE \
-	(JC_CAL_USR_LEFT_DATA_END - JC_CAL_USR_LEFT_DATA_ADDR + 1)
+static const u8 JC_USB_CMD_CONN_STATUS		= 0x01;
+static const u8 JC_USB_CMD_HANDSHAKE		= 0x02;
+static const u8 JC_USB_CMD_BAUDRATE_3M		= 0x03;
+static const u8 JC_USB_CMD_NO_TIMEOUT		= 0x04;
+static const u8 JC_USB_CMD_EN_TIMEOUT		= 0x05;
+static const u8 JC_USB_RESET			= 0x06;
+static const u8 JC_USB_PRE_HANDSHAKE		= 0x91;
+static const u8 JC_USB_SEND_UART		= 0x92;
 
 /* SPI storage addresses of factory calibration data */
-#define JC_CAL_FCT_DATA_LEFT_ADDR	 0x603d
-#define JC_CAL_FCT_DATA_RIGHT_ADDR	 0x6046
+static const u16 JC_CAL_DATA_START		= 0x603d;
+static const u16 JC_CAL_DATA_END		= 0x604e;
+#define JC_CAL_DATA_SIZE	(JC_CAL_DATA_END - JC_CAL_DATA_START + 1)
 
-/* SPI storage addresses of IMU factory calibration data */
-#define JC_IMU_CAL_FCT_DATA_ADDR	 0x6020
-#define JC_IMU_CAL_FCT_DATA_END	 0x6037
-#define JC_IMU_CAL_DATA_SIZE \
-	(JC_IMU_CAL_FCT_DATA_END - JC_IMU_CAL_FCT_DATA_ADDR + 1)
-/* SPI storage addresses of IMU user calibration data */
-#define JC_IMU_CAL_USR_MAGIC_ADDR	 0x8026
-#define JC_IMU_CAL_USR_DATA_ADDR	 0x8028
 
 /* The raw analog joystick values will be mapped in terms of this magnitude */
-#define JC_MAX_STICK_MAG		 32767
-#define JC_STICK_FUZZ			 250
-#define JC_STICK_FLAT			 500
-
-/* Hat values for pro controller's d-pad */
-#define JC_MAX_DPAD_MAG		1
-#define JC_DPAD_FUZZ		0
-#define JC_DPAD_FLAT		0
-
-/* Under most circumstances IMU reports are pushed every 15ms; use as default */
-#define JC_IMU_DFLT_AVG_DELTA_MS	15
-/* How many samples to sum before calculating average IMU report delta */
-#define JC_IMU_SAMPLES_PER_DELTA_AVG	300
-/* Controls how many dropped IMU packets at once trigger a warning message */
-#define JC_IMU_DROPPED_PKT_WARNING	3
-
-/*
- * The controller's accelerometer has a sensor resolution of 16bits and is
- * configured with a range of +-8000 milliGs. Therefore, the resolution can be
- * calculated thus: (2^16-1)/(8000 * 2) = 4.096 digits per milliG
- * Resolution per G (rather than per millliG): 4.096 * 1000 = 4096 digits per G
- * Alternatively: 1/4096 = .0002441 Gs per digit
- */
-#define JC_IMU_MAX_ACCEL_MAG		32767
-#define JC_IMU_ACCEL_RES_PER_G		4096
-#define JC_IMU_ACCEL_FUZZ		10
-#define JC_IMU_ACCEL_FLAT		0
-
-/*
- * The controller's gyroscope has a sensor resolution of 16bits and is
- * configured with a range of +-2000 degrees/second.
- * Digits per dps: (2^16 -1)/(2000*2) = 16.38375
- * dps per digit: 16.38375E-1 = .0610
- *
- * STMicro recommends in the datasheet to add 15% to the dps/digit. This allows
- * the full sensitivity range to be saturated without clipping. This yields more
- * accurate results, so it's the technique this driver uses.
- * dps per digit (corrected): .0610 * 1.15 = .0702
- * digits per dps (corrected): .0702E-1 = 14.247
- *
- * Now, 14.247 truncating to 14 loses a lot of precision, so we rescale the
- * min/max range by 1000.
- */
-#define JC_IMU_PREC_RANGE_SCALE	1000
-/* Note: change mag and res_per_dps if prec_range_scale is ever altered */
-#define JC_IMU_MAX_GYRO_MAG		32767000 /* (2^16-1)*1000 */
-#define JC_IMU_GYRO_RES_PER_DPS		14247 /* (14.247*1000) */
-#define JC_IMU_GYRO_FUZZ		10
-#define JC_IMU_GYRO_FLAT		0
-
-/* frequency/amplitude tables for rumble */
-struct joycon_rumble_freq_data {
-	u16 high;
-	u8 low;
-	u16 freq; /* Hz*/
-};
-
-struct joycon_rumble_amp_data {
-	u8 high;
-	u16 low;
-	u16 amp;
-};
-
-#if IS_ENABLED(CONFIG_NINTENDO_FF)
-/*
- * These tables are from
- * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md
- */
-static const struct joycon_rumble_freq_data joycon_rumble_frequencies[] = {
-	/* high, low, freq */
-	{ 0x0000, 0x01,   41 }, { 0x0000, 0x02,   42 }, { 0x0000, 0x03,   43 },
-	{ 0x0000, 0x04,   44 }, { 0x0000, 0x05,   45 }, { 0x0000, 0x06,   46 },
-	{ 0x0000, 0x07,   47 }, { 0x0000, 0x08,   48 }, { 0x0000, 0x09,   49 },
-	{ 0x0000, 0x0A,   50 }, { 0x0000, 0x0B,   51 }, { 0x0000, 0x0C,   52 },
-	{ 0x0000, 0x0D,   53 }, { 0x0000, 0x0E,   54 }, { 0x0000, 0x0F,   55 },
-	{ 0x0000, 0x10,   57 }, { 0x0000, 0x11,   58 }, { 0x0000, 0x12,   59 },
-	{ 0x0000, 0x13,   60 }, { 0x0000, 0x14,   62 }, { 0x0000, 0x15,   63 },
-	{ 0x0000, 0x16,   64 }, { 0x0000, 0x17,   66 }, { 0x0000, 0x18,   67 },
-	{ 0x0000, 0x19,   69 }, { 0x0000, 0x1A,   70 }, { 0x0000, 0x1B,   72 },
-	{ 0x0000, 0x1C,   73 }, { 0x0000, 0x1D,   75 }, { 0x0000, 0x1e,   77 },
-	{ 0x0000, 0x1f,   78 }, { 0x0000, 0x20,   80 }, { 0x0400, 0x21,   82 },
-	{ 0x0800, 0x22,   84 }, { 0x0c00, 0x23,   85 }, { 0x1000, 0x24,   87 },
-	{ 0x1400, 0x25,   89 }, { 0x1800, 0x26,   91 }, { 0x1c00, 0x27,   93 },
-	{ 0x2000, 0x28,   95 }, { 0x2400, 0x29,   97 }, { 0x2800, 0x2a,   99 },
-	{ 0x2c00, 0x2b,  102 }, { 0x3000, 0x2c,  104 }, { 0x3400, 0x2d,  106 },
-	{ 0x3800, 0x2e,  108 }, { 0x3c00, 0x2f,  111 }, { 0x4000, 0x30,  113 },
-	{ 0x4400, 0x31,  116 }, { 0x4800, 0x32,  118 }, { 0x4c00, 0x33,  121 },
-	{ 0x5000, 0x34,  123 }, { 0x5400, 0x35,  126 }, { 0x5800, 0x36,  129 },
-	{ 0x5c00, 0x37,  132 }, { 0x6000, 0x38,  135 }, { 0x6400, 0x39,  137 },
-	{ 0x6800, 0x3a,  141 }, { 0x6c00, 0x3b,  144 }, { 0x7000, 0x3c,  147 },
-	{ 0x7400, 0x3d,  150 }, { 0x7800, 0x3e,  153 }, { 0x7c00, 0x3f,  157 },
-	{ 0x8000, 0x40,  160 }, { 0x8400, 0x41,  164 }, { 0x8800, 0x42,  167 },
-	{ 0x8c00, 0x43,  171 }, { 0x9000, 0x44,  174 }, { 0x9400, 0x45,  178 },
-	{ 0x9800, 0x46,  182 }, { 0x9c00, 0x47,  186 }, { 0xa000, 0x48,  190 },
-	{ 0xa400, 0x49,  194 }, { 0xa800, 0x4a,  199 }, { 0xac00, 0x4b,  203 },
-	{ 0xb000, 0x4c,  207 }, { 0xb400, 0x4d,  212 }, { 0xb800, 0x4e,  217 },
-	{ 0xbc00, 0x4f,  221 }, { 0xc000, 0x50,  226 }, { 0xc400, 0x51,  231 },
-	{ 0xc800, 0x52,  236 }, { 0xcc00, 0x53,  241 }, { 0xd000, 0x54,  247 },
-	{ 0xd400, 0x55,  252 }, { 0xd800, 0x56,  258 }, { 0xdc00, 0x57,  263 },
-	{ 0xe000, 0x58,  269 }, { 0xe400, 0x59,  275 }, { 0xe800, 0x5a,  281 },
-	{ 0xec00, 0x5b,  287 }, { 0xf000, 0x5c,  293 }, { 0xf400, 0x5d,  300 },
-	{ 0xf800, 0x5e,  306 }, { 0xfc00, 0x5f,  313 }, { 0x0001, 0x60,  320 },
-	{ 0x0401, 0x61,  327 }, { 0x0801, 0x62,  334 }, { 0x0c01, 0x63,  341 },
-	{ 0x1001, 0x64,  349 }, { 0x1401, 0x65,  357 }, { 0x1801, 0x66,  364 },
-	{ 0x1c01, 0x67,  372 }, { 0x2001, 0x68,  381 }, { 0x2401, 0x69,  389 },
-	{ 0x2801, 0x6a,  397 }, { 0x2c01, 0x6b,  406 }, { 0x3001, 0x6c,  415 },
-	{ 0x3401, 0x6d,  424 }, { 0x3801, 0x6e,  433 }, { 0x3c01, 0x6f,  443 },
-	{ 0x4001, 0x70,  453 }, { 0x4401, 0x71,  462 }, { 0x4801, 0x72,  473 },
-	{ 0x4c01, 0x73,  483 }, { 0x5001, 0x74,  494 }, { 0x5401, 0x75,  504 },
-	{ 0x5801, 0x76,  515 }, { 0x5c01, 0x77,  527 }, { 0x6001, 0x78,  538 },
-	{ 0x6401, 0x79,  550 }, { 0x6801, 0x7a,  562 }, { 0x6c01, 0x7b,  574 },
-	{ 0x7001, 0x7c,  587 }, { 0x7401, 0x7d,  600 }, { 0x7801, 0x7e,  613 },
-	{ 0x7c01, 0x7f,  626 }, { 0x8001, 0x00,  640 }, { 0x8401, 0x00,  654 },
-	{ 0x8801, 0x00,  668 }, { 0x8c01, 0x00,  683 }, { 0x9001, 0x00,  698 },
-	{ 0x9401, 0x00,  713 }, { 0x9801, 0x00,  729 }, { 0x9c01, 0x00,  745 },
-	{ 0xa001, 0x00,  761 }, { 0xa401, 0x00,  778 }, { 0xa801, 0x00,  795 },
-	{ 0xac01, 0x00,  812 }, { 0xb001, 0x00,  830 }, { 0xb401, 0x00,  848 },
-	{ 0xb801, 0x00,  867 }, { 0xbc01, 0x00,  886 }, { 0xc001, 0x00,  905 },
-	{ 0xc401, 0x00,  925 }, { 0xc801, 0x00,  945 }, { 0xcc01, 0x00,  966 },
-	{ 0xd001, 0x00,  987 }, { 0xd401, 0x00, 1009 }, { 0xd801, 0x00, 1031 },
-	{ 0xdc01, 0x00, 1053 }, { 0xe001, 0x00, 1076 }, { 0xe401, 0x00, 1100 },
-	{ 0xe801, 0x00, 1124 }, { 0xec01, 0x00, 1149 }, { 0xf001, 0x00, 1174 },
-	{ 0xf401, 0x00, 1199 }, { 0xf801, 0x00, 1226 }, { 0xfc01, 0x00, 1253 }
-};
-
-#define joycon_max_rumble_amp	(1003)
-static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = {
-	/* high, low, amp */
-	{ 0x00, 0x0040,    0 },
-	{ 0x02, 0x8040,   10 }, { 0x04, 0x0041,   12 }, { 0x06, 0x8041,   14 },
-	{ 0x08, 0x0042,   17 }, { 0x0a, 0x8042,   20 }, { 0x0c, 0x0043,   24 },
-	{ 0x0e, 0x8043,   28 }, { 0x10, 0x0044,   33 }, { 0x12, 0x8044,   40 },
-	{ 0x14, 0x0045,   47 }, { 0x16, 0x8045,   56 }, { 0x18, 0x0046,   67 },
-	{ 0x1a, 0x8046,   80 }, { 0x1c, 0x0047,   95 }, { 0x1e, 0x8047,  112 },
-	{ 0x20, 0x0048,  117 }, { 0x22, 0x8048,  123 }, { 0x24, 0x0049,  128 },
-	{ 0x26, 0x8049,  134 }, { 0x28, 0x004a,  140 }, { 0x2a, 0x804a,  146 },
-	{ 0x2c, 0x004b,  152 }, { 0x2e, 0x804b,  159 }, { 0x30, 0x004c,  166 },
-	{ 0x32, 0x804c,  173 }, { 0x34, 0x004d,  181 }, { 0x36, 0x804d,  189 },
-	{ 0x38, 0x004e,  198 }, { 0x3a, 0x804e,  206 }, { 0x3c, 0x004f,  215 },
-	{ 0x3e, 0x804f,  225 }, { 0x40, 0x0050,  230 }, { 0x42, 0x8050,  235 },
-	{ 0x44, 0x0051,  240 }, { 0x46, 0x8051,  245 }, { 0x48, 0x0052,  251 },
-	{ 0x4a, 0x8052,  256 }, { 0x4c, 0x0053,  262 }, { 0x4e, 0x8053,  268 },
-	{ 0x50, 0x0054,  273 }, { 0x52, 0x8054,  279 }, { 0x54, 0x0055,  286 },
-	{ 0x56, 0x8055,  292 }, { 0x58, 0x0056,  298 }, { 0x5a, 0x8056,  305 },
-	{ 0x5c, 0x0057,  311 }, { 0x5e, 0x8057,  318 }, { 0x60, 0x0058,  325 },
-	{ 0x62, 0x8058,  332 }, { 0x64, 0x0059,  340 }, { 0x66, 0x8059,  347 },
-	{ 0x68, 0x005a,  355 }, { 0x6a, 0x805a,  362 }, { 0x6c, 0x005b,  370 },
-	{ 0x6e, 0x805b,  378 }, { 0x70, 0x005c,  387 }, { 0x72, 0x805c,  395 },
-	{ 0x74, 0x005d,  404 }, { 0x76, 0x805d,  413 }, { 0x78, 0x005e,  422 },
-	{ 0x7a, 0x805e,  431 }, { 0x7c, 0x005f,  440 }, { 0x7e, 0x805f,  450 },
-	{ 0x80, 0x0060,  460 }, { 0x82, 0x8060,  470 }, { 0x84, 0x0061,  480 },
-	{ 0x86, 0x8061,  491 }, { 0x88, 0x0062,  501 }, { 0x8a, 0x8062,  512 },
-	{ 0x8c, 0x0063,  524 }, { 0x8e, 0x8063,  535 }, { 0x90, 0x0064,  547 },
-	{ 0x92, 0x8064,  559 }, { 0x94, 0x0065,  571 }, { 0x96, 0x8065,  584 },
-	{ 0x98, 0x0066,  596 }, { 0x9a, 0x8066,  609 }, { 0x9c, 0x0067,  623 },
-	{ 0x9e, 0x8067,  636 }, { 0xa0, 0x0068,  650 }, { 0xa2, 0x8068,  665 },
-	{ 0xa4, 0x0069,  679 }, { 0xa6, 0x8069,  694 }, { 0xa8, 0x006a,  709 },
-	{ 0xaa, 0x806a,  725 }, { 0xac, 0x006b,  741 }, { 0xae, 0x806b,  757 },
-	{ 0xb0, 0x006c,  773 }, { 0xb2, 0x806c,  790 }, { 0xb4, 0x006d,  808 },
-	{ 0xb6, 0x806d,  825 }, { 0xb8, 0x006e,  843 }, { 0xba, 0x806e,  862 },
-	{ 0xbc, 0x006f,  881 }, { 0xbe, 0x806f,  900 }, { 0xc0, 0x0070,  920 },
-	{ 0xc2, 0x8070,  940 }, { 0xc4, 0x0071,  960 }, { 0xc6, 0x8071,  981 },
-	{ 0xc8, 0x0072, joycon_max_rumble_amp }
-};
-static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
-static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
-#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
-static const u16 JC_RUMBLE_PERIOD_MS = 50;
+static const u16 JC_MAX_STICK_MAG		= 32767;
+static const u16 JC_STICK_FUZZ			= 250;
+static const u16 JC_STICK_FLAT			= 500;
 
 /* States for controller state machine */
 enum joycon_ctlr_state {
 	JOYCON_CTLR_STATE_INIT,
 	JOYCON_CTLR_STATE_READ,
-	JOYCON_CTLR_STATE_REMOVED,
-};
-
-/* Controller type received as part of device info */
-enum joycon_ctlr_type {
-	JOYCON_CTLR_TYPE_JCL = 0x01,
-	JOYCON_CTLR_TYPE_JCR = 0x02,
-	JOYCON_CTLR_TYPE_PRO = 0x03,
 };
 
 struct joycon_stick_cal {
@@ -315,11 +114,6 @@
 	s32 center;
 };
 
-struct joycon_imu_cal {
-	s16 offset[3];
-	s16 scale[3];
-};
-
 /*
  * All the controller's button values are stored in a u32.
  * They can be accessed with bitwise ANDs.
@@ -353,33 +147,18 @@
 	JOYCON_MSG_TYPE_SUBCMD,
 };
 
-struct joycon_rumble_output {
-	u8 output_id;
-	u8 packet_num;
-	u8 rumble_data[8];
-} __packed;
-
 struct joycon_subcmd_request {
 	u8 output_id; /* must be 0x01 for subcommand, 0x10 for rumble only */
 	u8 packet_num; /* incremented every send */
 	u8 rumble_data[8];
 	u8 subcmd_id;
-	u8 data[]; /* length depends on the subcommand */
+	u8 data[0]; /* length depends on the subcommand */
 } __packed;
 
 struct joycon_subcmd_reply {
 	u8 ack; /* MSB 1 for ACK, 0 for NACK */
 	u8 id; /* id of requested subcmd */
-	u8 data[]; /* will be at most 35 bytes */
-} __packed;
-
-struct joycon_imu_data {
-	s16 accel_x;
-	s16 accel_y;
-	s16 accel_z;
-	s16 gyro_x;
-	s16 gyro_y;
-	s16 gyro_z;
+	u8 data[0]; /* will be at most 35 bytes */
 } __packed;
 
 struct joycon_input_report {
@@ -391,38 +170,20 @@
 	u8 right_stick[3];
 	u8 vibrator_report;
 
-	union {
-		struct joycon_subcmd_reply subcmd_reply;
-		/* IMU input reports contain 3 samples */
-		u8 imu_raw_bytes[sizeof(struct joycon_imu_data) * 3];
-	};
+	/*
+	 * If support for firmware updates, gyroscope data, and/or NFC/IR
+	 * are added in the future, this can be swapped for a union.
+	 */
+	struct joycon_subcmd_reply reply;
 } __packed;
 
 #define JC_MAX_RESP_SIZE	(sizeof(struct joycon_input_report) + 35)
-#define JC_RUMBLE_DATA_SIZE	8
-#define JC_RUMBLE_QUEUE_SIZE	8
-
-static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
-
-static const char * const joycon_player_led_names[] = {
-	LED_FUNCTION_PLAYER1,
-	LED_FUNCTION_PLAYER2,
-	LED_FUNCTION_PLAYER3,
-	LED_FUNCTION_PLAYER4,
-};
-#define JC_NUM_LEDS		ARRAY_SIZE(joycon_player_led_names)
 
 /* Each physical controller is associated with a joycon_ctlr struct */
 struct joycon_ctlr {
 	struct hid_device *hdev;
 	struct input_dev *input;
-	struct led_classdev leds[JC_NUM_LEDS]; /* player leds */
-	struct led_classdev home_led;
 	enum joycon_ctlr_state ctlr_state;
-	spinlock_t lock;
-	u8 mac_addr[6];
-	char *mac_addr_str;
-	enum joycon_ctlr_type ctlr_type;
 
 	/* The following members are used for synchronous sends/receives */
 	enum joycon_msg_type msg_type;
@@ -433,8 +194,6 @@
 	bool received_resp;
 	u8 usb_ack_match;
 	u8 subcmd_ack_match;
-	bool received_input_report;
-	unsigned int last_subcmd_sent_msecs;
 
 	/* factory calibration data */
 	struct joycon_stick_cal left_stick_cal_x;
@@ -442,64 +201,8 @@
 	struct joycon_stick_cal right_stick_cal_x;
 	struct joycon_stick_cal right_stick_cal_y;
 
-	struct joycon_imu_cal accel_cal;
-	struct joycon_imu_cal gyro_cal;
-
-	/* prevents needlessly recalculating these divisors every sample */
-	s32 imu_cal_accel_divisor[3];
-	s32 imu_cal_gyro_divisor[3];
-
-	/* power supply data */
-	struct power_supply *battery;
-	struct power_supply_desc battery_desc;
-	u8 battery_capacity;
-	bool battery_charging;
-	bool host_powered;
-
-	/* rumble */
-	u8 rumble_data[JC_RUMBLE_QUEUE_SIZE][JC_RUMBLE_DATA_SIZE];
-	int rumble_queue_head;
-	int rumble_queue_tail;
-	struct workqueue_struct *rumble_queue;
-	struct work_struct rumble_worker;
-	unsigned int rumble_msecs;
-	u16 rumble_ll_freq;
-	u16 rumble_lh_freq;
-	u16 rumble_rl_freq;
-	u16 rumble_rh_freq;
-	unsigned short rumble_zero_countdown;
-
-	/* imu */
-	struct input_dev *imu_input;
-	bool imu_first_packet_received; /* helps in initiating timestamp */
-	unsigned int imu_timestamp_us; /* timestamp we report to userspace */
-	unsigned int imu_last_pkt_ms; /* used to calc imu report delta */
-	/* the following are used to track the average imu report time delta */
-	unsigned int imu_delta_samples_count;
-	unsigned int imu_delta_samples_sum;
-	unsigned int imu_avg_delta_ms;
 };
 
-/* Helper macros for checking controller type */
-#define jc_type_is_joycon(ctlr) \
-	(ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL || \
-	 ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR || \
-	 ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP)
-#define jc_type_is_procon(ctlr) \
-	(ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON)
-#define jc_type_is_chrggrip(ctlr) \
-	(ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP)
-
-/* Does this controller have inputs associated with left joycon? */
-#define jc_type_has_left(ctlr) \
-	(ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL || \
-	 ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO)
-
-/* Does this controller have inputs associated with right joycon? */
-#define jc_type_has_right(ctlr) \
-	(ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR || \
-	 ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO)
-
 static int __joycon_hid_send(struct hid_device *hdev, u8 *data, size_t len)
 {
 	u8 *buf;
@@ -515,91 +218,27 @@
 	return ret;
 }
 
-static void joycon_wait_for_input_report(struct joycon_ctlr *ctlr)
+static int joycon_hid_send_sync(struct joycon_ctlr *ctlr, u8 *data, size_t len)
 {
 	int ret;
 
-	/*
-	 * If we are in the proper reporting mode, wait for an input
-	 * report prior to sending the subcommand. This improves
-	 * reliability considerably.
-	 */
-	if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&ctlr->lock, flags);
-		ctlr->received_input_report = false;
-		spin_unlock_irqrestore(&ctlr->lock, flags);
-		ret = wait_event_timeout(ctlr->wait,
-					 ctlr->received_input_report,
-					 HZ / 4);
-		/* We will still proceed, even with a timeout here */
-		if (!ret)
-			hid_warn(ctlr->hdev,
-				 "timeout waiting for input report\n");
+	ret = __joycon_hid_send(ctlr->hdev, data, len);
+	if (ret < 0) {
+		memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
+		return ret;
 	}
-}
 
-/*
- * Sending subcommands and/or rumble data at too high a rate can cause bluetooth
- * controller disconnections.
- */
-static void joycon_enforce_subcmd_rate(struct joycon_ctlr *ctlr)
-{
-	static const unsigned int max_subcmd_rate_ms = 25;
-	unsigned int current_ms = jiffies_to_msecs(jiffies);
-	unsigned int delta_ms = current_ms - ctlr->last_subcmd_sent_msecs;
-
-	while (delta_ms < max_subcmd_rate_ms &&
-	       ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) {
-		joycon_wait_for_input_report(ctlr);
-		current_ms = jiffies_to_msecs(jiffies);
-		delta_ms = current_ms - ctlr->last_subcmd_sent_msecs;
-	}
-	ctlr->last_subcmd_sent_msecs = current_ms;
-}
-
-static int joycon_hid_send_sync(struct joycon_ctlr *ctlr, u8 *data, size_t len,
-				u32 timeout)
-{
-	int ret;
-	int tries = 2;
-
-	/*
-	 * The controller occasionally seems to drop subcommands. In testing,
-	 * doing one retry after a timeout appears to always work.
-	 */
-	while (tries--) {
-		joycon_enforce_subcmd_rate(ctlr);
-
-		ret = __joycon_hid_send(ctlr->hdev, data, len);
-		if (ret < 0) {
-			memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
-			return ret;
-		}
-
-		ret = wait_event_timeout(ctlr->wait, ctlr->received_resp,
-					 timeout);
-		if (!ret) {
-			hid_dbg(ctlr->hdev,
-				"synchronous send/receive timed out\n");
-			if (tries) {
-				hid_dbg(ctlr->hdev,
-					"retrying sync send after timeout\n");
-			}
-			memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
-			ret = -ETIMEDOUT;
-		} else {
-			ret = 0;
-			break;
-		}
+	if (!wait_event_timeout(ctlr->wait, ctlr->received_resp, HZ)) {
+		hid_dbg(ctlr->hdev, "synchronous send/receive timed out\n");
+		memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
+		return -ETIMEDOUT;
 	}
 
 	ctlr->received_resp = false;
-	return ret;
+	return 0;
 }
 
-static int joycon_send_usb(struct joycon_ctlr *ctlr, u8 cmd, u32 timeout)
+static int joycon_send_usb(struct joycon_ctlr *ctlr, u8 cmd)
 {
 	int ret;
 	u8 buf[2] = {JC_OUTPUT_USB_CMD};
@@ -607,7 +246,7 @@
 	buf[1] = cmd;
 	ctlr->usb_ack_match = cmd;
 	ctlr->msg_type = JOYCON_MSG_TYPE_USB;
-	ret = joycon_hid_send_sync(ctlr, buf, sizeof(buf), timeout);
+	ret = joycon_hid_send_sync(ctlr, buf, sizeof(buf));
 	if (ret)
 		hid_dbg(ctlr->hdev, "send usb command failed; ret=%d\n", ret);
 	return ret;
@@ -615,23 +254,9 @@
 
 static int joycon_send_subcmd(struct joycon_ctlr *ctlr,
 			      struct joycon_subcmd_request *subcmd,
-			      size_t data_len, u32 timeout)
+			      size_t data_len)
 {
 	int ret;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	/*
-	 * If the controller has been removed, just return ENODEV so the LED
-	 * subsystem doesn't print invalid errors on removal.
-	 */
-	if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) {
-		spin_unlock_irqrestore(&ctlr->lock, flags);
-		return -ENODEV;
-	}
-	memcpy(subcmd->rumble_data, ctlr->rumble_data[ctlr->rumble_queue_tail],
-	       JC_RUMBLE_DATA_SIZE);
-	spin_unlock_irqrestore(&ctlr->lock, flags);
 
 	subcmd->output_id = JC_OUTPUT_RUMBLE_AND_SUBCMD;
 	subcmd->packet_num = ctlr->subcmd_num;
@@ -641,7 +266,7 @@
 	ctlr->msg_type = JOYCON_MSG_TYPE_SUBCMD;
 
 	ret = joycon_hid_send_sync(ctlr, (u8 *)subcmd,
-				   sizeof(*subcmd) + data_len, timeout);
+				   sizeof(*subcmd) + data_len);
 	if (ret < 0)
 		hid_dbg(ctlr->hdev, "send subcommand failed; ret=%d\n", ret);
 	else
@@ -660,108 +285,7 @@
 	req->data[0] = (flash << 4) | on;
 
 	hid_dbg(ctlr->hdev, "setting player leds\n");
-	return joycon_send_subcmd(ctlr, req, 1, HZ/4);
-}
-
-static int joycon_request_spi_flash_read(struct joycon_ctlr *ctlr,
-					 u32 start_addr, u8 size, u8 **reply)
-{
-	struct joycon_subcmd_request *req;
-	struct joycon_input_report *report;
-	u8 buffer[sizeof(*req) + 5] = { 0 };
-	u8 *data;
-	int ret;
-
-	if (!reply)
-		return -EINVAL;
-
-	req = (struct joycon_subcmd_request *)buffer;
-	req->subcmd_id = JC_SUBCMD_SPI_FLASH_READ;
-	data = req->data;
-	put_unaligned_le32(start_addr, data);
-	data[4] = size;
-
-	hid_dbg(ctlr->hdev, "requesting SPI flash data\n");
-	ret = joycon_send_subcmd(ctlr, req, 5, HZ);
-	if (ret) {
-		hid_err(ctlr->hdev, "failed reading SPI flash; ret=%d\n", ret);
-	} else {
-		report = (struct joycon_input_report *)ctlr->input_buf;
-		/* The read data starts at the 6th byte */
-		*reply = &report->subcmd_reply.data[5];
-	}
-	return ret;
-}
-
-/*
- * User calibration's presence is denoted with a magic byte preceding it.
- * returns 0 if magic val is present, 1 if not present, < 0 on error
- */
-static int joycon_check_for_cal_magic(struct joycon_ctlr *ctlr, u32 flash_addr)
-{
-	int ret;
-	u8 *reply;
-
-	ret = joycon_request_spi_flash_read(ctlr, flash_addr,
-					    JC_CAL_USR_MAGIC_SIZE, &reply);
-	if (ret)
-		return ret;
-
-	return reply[0] != JC_CAL_USR_MAGIC_0 || reply[1] != JC_CAL_USR_MAGIC_1;
-}
-
-static int joycon_read_stick_calibration(struct joycon_ctlr *ctlr, u16 cal_addr,
-					 struct joycon_stick_cal *cal_x,
-					 struct joycon_stick_cal *cal_y,
-					 bool left_stick)
-{
-	s32 x_max_above;
-	s32 x_min_below;
-	s32 y_max_above;
-	s32 y_min_below;
-	u8 *raw_cal;
-	int ret;
-
-	ret = joycon_request_spi_flash_read(ctlr, cal_addr,
-					    JC_CAL_STICK_DATA_SIZE, &raw_cal);
-	if (ret)
-		return ret;
-
-	/* stick calibration parsing: note the order differs based on stick */
-	if (left_stick) {
-		x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0,
-						12);
-		y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4,
-						12);
-		cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0,
-						  12);
-		cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4,
-						  12);
-		x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0,
-						12);
-		y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4,
-						12);
-	} else {
-		cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0,
-						  12);
-		cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4,
-						  12);
-		x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0,
-						12);
-		y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4,
-						12);
-		x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0,
-						12);
-		y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4,
-						12);
-	}
-
-	cal_x->max = cal_x->center + x_max_above;
-	cal_x->min = cal_x->center - x_min_below;
-	cal_y->max = cal_y->center + y_max_above;
-	cal_y->min = cal_y->center - y_min_below;
-
-	return 0;
+	return joycon_send_subcmd(ctlr, req, 1);
 }
 
 static const u16 DFLT_STICK_CAL_CEN = 2000;
@@ -769,34 +293,33 @@
 static const u16 DFLT_STICK_CAL_MIN = 500;
 static int joycon_request_calibration(struct joycon_ctlr *ctlr)
 {
-	u16 left_stick_addr = JC_CAL_FCT_DATA_LEFT_ADDR;
-	u16 right_stick_addr = JC_CAL_FCT_DATA_RIGHT_ADDR;
+	struct joycon_subcmd_request *req;
+	u8 buffer[sizeof(*req) + 5] = { 0 };
+	struct joycon_input_report *report;
+	struct joycon_stick_cal *cal_x;
+	struct joycon_stick_cal *cal_y;
+	s32 x_max_above;
+	s32 x_min_below;
+	s32 y_max_above;
+	s32 y_min_below;
+	u8 *data;
+	u8 *raw_cal;
 	int ret;
 
+	req = (struct joycon_subcmd_request *)buffer;
+	req->subcmd_id = JC_SUBCMD_SPI_FLASH_READ;
+	data = req->data;
+	data[0] = 0xFF & JC_CAL_DATA_START;
+	data[1] = 0xFF & (JC_CAL_DATA_START >> 8);
+	data[2] = 0xFF & (JC_CAL_DATA_START >> 16);
+	data[3] = 0xFF & (JC_CAL_DATA_START >> 24);
+	data[4] = JC_CAL_DATA_SIZE;
+
 	hid_dbg(ctlr->hdev, "requesting cal data\n");
-
-	/* check if user stick calibrations are present */
-	if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_LEFT_MAGIC_ADDR)) {
-		left_stick_addr = JC_CAL_USR_LEFT_DATA_ADDR;
-		hid_info(ctlr->hdev, "using user cal for left stick\n");
-	} else {
-		hid_info(ctlr->hdev, "using factory cal for left stick\n");
-	}
-	if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_RIGHT_MAGIC_ADDR)) {
-		right_stick_addr = JC_CAL_USR_RIGHT_DATA_ADDR;
-		hid_info(ctlr->hdev, "using user cal for right stick\n");
-	} else {
-		hid_info(ctlr->hdev, "using factory cal for right stick\n");
-	}
-
-	/* read the left stick calibration data */
-	ret = joycon_read_stick_calibration(ctlr, left_stick_addr,
-					    &ctlr->left_stick_cal_x,
-					    &ctlr->left_stick_cal_y,
-					    true);
+	ret = joycon_send_subcmd(ctlr, req, 5);
 	if (ret) {
 		hid_warn(ctlr->hdev,
-			 "Failed to read left stick cal, using dflts; e=%d\n",
+			 "Failed to read stick cal, using defaults; ret=%d\n",
 			 ret);
 
 		ctlr->left_stick_cal_x.center = DFLT_STICK_CAL_CEN;
@@ -806,17 +329,6 @@
 		ctlr->left_stick_cal_y.center = DFLT_STICK_CAL_CEN;
 		ctlr->left_stick_cal_y.max = DFLT_STICK_CAL_MAX;
 		ctlr->left_stick_cal_y.min = DFLT_STICK_CAL_MIN;
-	}
-
-	/* read the right stick calibration data */
-	ret = joycon_read_stick_calibration(ctlr, right_stick_addr,
-					    &ctlr->right_stick_cal_x,
-					    &ctlr->right_stick_cal_y,
-					    false);
-	if (ret) {
-		hid_warn(ctlr->hdev,
-			 "Failed to read right stick cal, using dflts; e=%d\n",
-			 ret);
 
 		ctlr->right_stick_cal_x.center = DFLT_STICK_CAL_CEN;
 		ctlr->right_stick_cal_x.max = DFLT_STICK_CAL_MAX;
@@ -825,8 +337,44 @@
 		ctlr->right_stick_cal_y.center = DFLT_STICK_CAL_CEN;
 		ctlr->right_stick_cal_y.max = DFLT_STICK_CAL_MAX;
 		ctlr->right_stick_cal_y.min = DFLT_STICK_CAL_MIN;
+
+		return ret;
 	}
 
+	report = (struct joycon_input_report *)ctlr->input_buf;
+	raw_cal = &report->reply.data[5];
+
+	/* left stick calibration parsing */
+	cal_x = &ctlr->left_stick_cal_x;
+	cal_y = &ctlr->left_stick_cal_y;
+
+	x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12);
+	y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12);
+	cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12);
+	cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12);
+	x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12);
+	y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12);
+	cal_x->max = cal_x->center + x_max_above;
+	cal_x->min = cal_x->center - x_min_below;
+	cal_y->max = cal_y->center + y_max_above;
+	cal_y->min = cal_y->center - y_min_below;
+
+	/* right stick calibration parsing */
+	raw_cal += 9;
+	cal_x = &ctlr->right_stick_cal_x;
+	cal_y = &ctlr->right_stick_cal_y;
+
+	cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12);
+	cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12);
+	x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12);
+	y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12);
+	x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12);
+	y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12);
+	cal_x->max = cal_x->center + x_max_above;
+	cal_x->min = cal_x->center - x_min_below;
+	cal_y->max = cal_y->center + y_max_above;
+	cal_y->min = cal_y->center - y_min_below;
+
 	hid_dbg(ctlr->hdev, "calibration:\n"
 			    "l_x_c=%d l_x_max=%d l_x_min=%d\n"
 			    "l_y_c=%d l_y_max=%d l_y_min=%d\n"
@@ -848,94 +396,6 @@
 	return 0;
 }
 
-/*
- * These divisors are calculated once rather than for each sample. They are only
- * dependent on the IMU calibration values. They are used when processing the
- * IMU input reports.
- */
-static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
-{
-	int i;
-
-	for (i = 0; i < 3; i++) {
-		ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
-						ctlr->accel_cal.offset[i];
-		ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
-						ctlr->gyro_cal.offset[i];
-	}
-}
-
-static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
-static const s16 DFLT_ACCEL_SCALE = 16384;
-static const s16 DFLT_GYRO_OFFSET /*= 0*/;
-static const s16 DFLT_GYRO_SCALE  = 13371;
-static int joycon_request_imu_calibration(struct joycon_ctlr *ctlr)
-{
-	u16 imu_cal_addr = JC_IMU_CAL_FCT_DATA_ADDR;
-	u8 *raw_cal;
-	int ret;
-	int i;
-
-	/* check if user calibration exists */
-	if (!joycon_check_for_cal_magic(ctlr, JC_IMU_CAL_USR_MAGIC_ADDR)) {
-		imu_cal_addr = JC_IMU_CAL_USR_DATA_ADDR;
-		hid_info(ctlr->hdev, "using user cal for IMU\n");
-	} else {
-		hid_info(ctlr->hdev, "using factory cal for IMU\n");
-	}
-
-	/* request IMU calibration data */
-	hid_dbg(ctlr->hdev, "requesting IMU cal data\n");
-	ret = joycon_request_spi_flash_read(ctlr, imu_cal_addr,
-					    JC_IMU_CAL_DATA_SIZE, &raw_cal);
-	if (ret) {
-		hid_warn(ctlr->hdev,
-			 "Failed to read IMU cal, using defaults; ret=%d\n",
-			 ret);
-
-		for (i = 0; i < 3; i++) {
-			ctlr->accel_cal.offset[i] = DFLT_ACCEL_OFFSET;
-			ctlr->accel_cal.scale[i] = DFLT_ACCEL_SCALE;
-			ctlr->gyro_cal.offset[i] = DFLT_GYRO_OFFSET;
-			ctlr->gyro_cal.scale[i] = DFLT_GYRO_SCALE;
-		}
-		joycon_calc_imu_cal_divisors(ctlr);
-		return ret;
-	}
-
-	/* IMU calibration parsing */
-	for (i = 0; i < 3; i++) {
-		int j = i * 2;
-
-		ctlr->accel_cal.offset[i] = get_unaligned_le16(raw_cal + j);
-		ctlr->accel_cal.scale[i] = get_unaligned_le16(raw_cal + j + 6);
-		ctlr->gyro_cal.offset[i] = get_unaligned_le16(raw_cal + j + 12);
-		ctlr->gyro_cal.scale[i] = get_unaligned_le16(raw_cal + j + 18);
-	}
-
-	joycon_calc_imu_cal_divisors(ctlr);
-
-	hid_dbg(ctlr->hdev, "IMU calibration:\n"
-			    "a_o[0]=%d a_o[1]=%d a_o[2]=%d\n"
-			    "a_s[0]=%d a_s[1]=%d a_s[2]=%d\n"
-			    "g_o[0]=%d g_o[1]=%d g_o[2]=%d\n"
-			    "g_s[0]=%d g_s[1]=%d g_s[2]=%d\n",
-			    ctlr->accel_cal.offset[0],
-			    ctlr->accel_cal.offset[1],
-			    ctlr->accel_cal.offset[2],
-			    ctlr->accel_cal.scale[0],
-			    ctlr->accel_cal.scale[1],
-			    ctlr->accel_cal.scale[2],
-			    ctlr->gyro_cal.offset[0],
-			    ctlr->gyro_cal.offset[1],
-			    ctlr->gyro_cal.offset[2],
-			    ctlr->gyro_cal.scale[0],
-			    ctlr->gyro_cal.scale[1],
-			    ctlr->gyro_cal.scale[2]);
-
-	return 0;
-}
-
 static int joycon_set_report_mode(struct joycon_ctlr *ctlr)
 {
 	struct joycon_subcmd_request *req;
@@ -946,33 +406,7 @@
 	req->data[0] = 0x30; /* standard, full report mode */
 
 	hid_dbg(ctlr->hdev, "setting controller report mode\n");
-	return joycon_send_subcmd(ctlr, req, 1, HZ);
-}
-
-static int joycon_enable_rumble(struct joycon_ctlr *ctlr)
-{
-	struct joycon_subcmd_request *req;
-	u8 buffer[sizeof(*req) + 1] = { 0 };
-
-	req = (struct joycon_subcmd_request *)buffer;
-	req->subcmd_id = JC_SUBCMD_ENABLE_VIBRATION;
-	req->data[0] = 0x01; /* note: 0x00 would disable */
-
-	hid_dbg(ctlr->hdev, "enabling rumble\n");
-	return joycon_send_subcmd(ctlr, req, 1, HZ/4);
-}
-
-static int joycon_enable_imu(struct joycon_ctlr *ctlr)
-{
-	struct joycon_subcmd_request *req;
-	u8 buffer[sizeof(*req) + 1] = { 0 };
-
-	req = (struct joycon_subcmd_request *)buffer;
-	req->subcmd_id = JC_SUBCMD_ENABLE_IMU;
-	req->data[0] = 0x01; /* note: 0x00 would disable */
-
-	hid_dbg(ctlr->hdev, "enabling IMU\n");
-	return joycon_send_subcmd(ctlr, req, 1, HZ);
+	return joycon_send_subcmd(ctlr, req, 1);
 }
 
 static s32 joycon_map_stick_val(struct joycon_stick_cal *cal, s32 val)
@@ -993,281 +427,16 @@
 	return new_val;
 }
 
-static void joycon_input_report_parse_imu_data(struct joycon_ctlr *ctlr,
-					       struct joycon_input_report *rep,
-					       struct joycon_imu_data *imu_data)
-{
-	u8 *raw = rep->imu_raw_bytes;
-	int i;
-
-	for (i = 0; i < 3; i++) {
-		struct joycon_imu_data *data = &imu_data[i];
-
-		data->accel_x = get_unaligned_le16(raw + 0);
-		data->accel_y = get_unaligned_le16(raw + 2);
-		data->accel_z = get_unaligned_le16(raw + 4);
-		data->gyro_x = get_unaligned_le16(raw + 6);
-		data->gyro_y = get_unaligned_le16(raw + 8);
-		data->gyro_z = get_unaligned_le16(raw + 10);
-		/* point to next imu sample */
-		raw += sizeof(struct joycon_imu_data);
-	}
-}
-
-static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
-				    struct joycon_input_report *rep)
-{
-	struct joycon_imu_data imu_data[3] = {0}; /* 3 reports per packet */
-	struct input_dev *idev = ctlr->imu_input;
-	unsigned int msecs = jiffies_to_msecs(jiffies);
-	unsigned int last_msecs = ctlr->imu_last_pkt_ms;
-	int i;
-	int value[6];
-
-	joycon_input_report_parse_imu_data(ctlr, rep, imu_data);
-
-	/*
-	 * There are complexities surrounding how we determine the timestamps we
-	 * associate with the samples we pass to userspace. The IMU input
-	 * reports do not provide us with a good timestamp. There's a quickly
-	 * incrementing 8-bit counter per input report, but it is not very
-	 * useful for this purpose (it is not entirely clear what rate it
-	 * increments at or if it varies based on packet push rate - more on
-	 * the push rate below...).
-	 *
-	 * The reverse engineering work done on the joy-cons and pro controllers
-	 * by the community seems to indicate the following:
-	 * - The controller samples the IMU every 1.35ms. It then does some of
-	 *   its own processing, probably averaging the samples out.
-	 * - Each imu input report contains 3 IMU samples, (usually 5ms apart).
-	 * - In the standard reporting mode (which this driver uses exclusively)
-	 *   input reports are pushed from the controller as follows:
-	 *      * joy-con (bluetooth): every 15 ms
-	 *      * joy-cons (in charging grip via USB): every 15 ms
-	 *      * pro controller (USB): every 15 ms
-	 *      * pro controller (bluetooth): every 8 ms (this is the wildcard)
-	 *
-	 * Further complicating matters is that some bluetooth stacks are known
-	 * to alter the controller's packet rate by hardcoding the bluetooth
-	 * SSR for the switch controllers (android's stack currently sets the
-	 * SSR to 11ms for both the joy-cons and pro controllers).
-	 *
-	 * In my own testing, I've discovered that my pro controller either
-	 * reports IMU sample batches every 11ms or every 15ms. This rate is
-	 * stable after connecting. It isn't 100% clear what determines this
-	 * rate. Importantly, even when sending every 11ms, none of the samples
-	 * are duplicates. This seems to indicate that the time deltas between
-	 * reported samples can vary based on the input report rate.
-	 *
-	 * The solution employed in this driver is to keep track of the average
-	 * time delta between IMU input reports. In testing, this value has
-	 * proven to be stable, staying at 15ms or 11ms, though other hardware
-	 * configurations and bluetooth stacks could potentially see other rates
-	 * (hopefully this will become more clear as more people use the
-	 * driver).
-	 *
-	 * Keeping track of the average report delta allows us to submit our
-	 * timestamps to userspace based on that. Each report contains 3
-	 * samples, so the IMU sampling rate should be avg_time_delta/3. We can
-	 * also use this average to detect events where we have dropped a
-	 * packet. The userspace timestamp for the samples will be adjusted
-	 * accordingly to prevent unwanted behvaior.
-	 */
-	if (!ctlr->imu_first_packet_received) {
-		ctlr->imu_timestamp_us = 0;
-		ctlr->imu_delta_samples_count = 0;
-		ctlr->imu_delta_samples_sum = 0;
-		ctlr->imu_avg_delta_ms = JC_IMU_DFLT_AVG_DELTA_MS;
-		ctlr->imu_first_packet_received = true;
-	} else {
-		unsigned int delta = msecs - last_msecs;
-		unsigned int dropped_pkts;
-		unsigned int dropped_threshold;
-
-		/* avg imu report delta housekeeping */
-		ctlr->imu_delta_samples_sum += delta;
-		ctlr->imu_delta_samples_count++;
-		if (ctlr->imu_delta_samples_count >=
-		    JC_IMU_SAMPLES_PER_DELTA_AVG) {
-			ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
-						 ctlr->imu_delta_samples_count;
-			/* don't ever want divide by zero shenanigans */
-			if (ctlr->imu_avg_delta_ms == 0) {
-				ctlr->imu_avg_delta_ms = 1;
-				hid_warn(ctlr->hdev,
-					 "calculated avg imu delta of 0\n");
-			}
-			ctlr->imu_delta_samples_count = 0;
-			ctlr->imu_delta_samples_sum = 0;
-		}
-
-		/* useful for debugging IMU sample rate */
-		hid_dbg(ctlr->hdev,
-			"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
-			msecs, last_msecs, delta, ctlr->imu_avg_delta_ms);
-
-		/* check if any packets have been dropped */
-		dropped_threshold = ctlr->imu_avg_delta_ms * 3 / 2;
-		dropped_pkts = (delta - min(delta, dropped_threshold)) /
-				ctlr->imu_avg_delta_ms;
-		ctlr->imu_timestamp_us += 1000 * ctlr->imu_avg_delta_ms;
-		if (dropped_pkts > JC_IMU_DROPPED_PKT_WARNING) {
-			hid_warn(ctlr->hdev,
-				 "compensating for %u dropped IMU reports\n",
-				 dropped_pkts);
-			hid_warn(ctlr->hdev,
-				 "delta=%u avg_delta=%u\n",
-				 delta, ctlr->imu_avg_delta_ms);
-		}
-	}
-	ctlr->imu_last_pkt_ms = msecs;
-
-	/* Each IMU input report contains three samples */
-	for (i = 0; i < 3; i++) {
-		input_event(idev, EV_MSC, MSC_TIMESTAMP,
-			    ctlr->imu_timestamp_us);
-
-		/*
-		 * These calculations (which use the controller's calibration
-		 * settings to improve the final values) are based on those
-		 * found in the community's reverse-engineering repo (linked at
-		 * top of driver). For hid-nintendo, we make sure that the final
-		 * value given to userspace is always in terms of the axis
-		 * resolution we provided.
-		 *
-		 * Currently only the gyro calculations subtract the calibration
-		 * offsets from the raw value itself. In testing, doing the same
-		 * for the accelerometer raw values decreased accuracy.
-		 *
-		 * Note that the gyro values are multiplied by the
-		 * precision-saving scaling factor to prevent large inaccuracies
-		 * due to truncation of the resolution value which would
-		 * otherwise occur. To prevent overflow (without resorting to 64
-		 * bit integer math), the mult_frac macro is used.
-		 */
-		value[0] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-				      (imu_data[i].gyro_x -
-				       ctlr->gyro_cal.offset[0])),
-				     ctlr->gyro_cal.scale[0],
-				     ctlr->imu_cal_gyro_divisor[0]);
-		value[1] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-				      (imu_data[i].gyro_y -
-				       ctlr->gyro_cal.offset[1])),
-				     ctlr->gyro_cal.scale[1],
-				     ctlr->imu_cal_gyro_divisor[1]);
-		value[2] = mult_frac((JC_IMU_PREC_RANGE_SCALE *
-				      (imu_data[i].gyro_z -
-				       ctlr->gyro_cal.offset[2])),
-				     ctlr->gyro_cal.scale[2],
-				     ctlr->imu_cal_gyro_divisor[2]);
-
-		value[3] = ((s32)imu_data[i].accel_x *
-			    ctlr->accel_cal.scale[0]) /
-			    ctlr->imu_cal_accel_divisor[0];
-		value[4] = ((s32)imu_data[i].accel_y *
-			    ctlr->accel_cal.scale[1]) /
-			    ctlr->imu_cal_accel_divisor[1];
-		value[5] = ((s32)imu_data[i].accel_z *
-			    ctlr->accel_cal.scale[2]) /
-			    ctlr->imu_cal_accel_divisor[2];
-
-		hid_dbg(ctlr->hdev, "raw_gyro: g_x=%d g_y=%d g_z=%d\n",
-			imu_data[i].gyro_x, imu_data[i].gyro_y,
-			imu_data[i].gyro_z);
-		hid_dbg(ctlr->hdev, "raw_accel: a_x=%d a_y=%d a_z=%d\n",
-			imu_data[i].accel_x, imu_data[i].accel_y,
-			imu_data[i].accel_z);
-
-		/*
-		 * The right joy-con has 2 axes negated, Y and Z. This is due to
-		 * the orientation of the IMU in the controller. We negate those
-		 * axes' values in order to be consistent with the left joy-con
-		 * and the pro controller:
-		 *   X: positive is pointing toward the triggers
-		 *   Y: positive is pointing to the left
-		 *   Z: positive is pointing up (out of the buttons/sticks)
-		 * The axes follow the right-hand rule.
-		 */
-		if (jc_type_is_joycon(ctlr) && jc_type_has_right(ctlr)) {
-			int j;
-
-			/* negate all but x axis */
-			for (j = 1; j < 6; ++j) {
-				if (j == 3)
-					continue;
-				value[j] *= -1;
-			}
-		}
-
-		input_report_abs(idev, ABS_RX, value[0]);
-		input_report_abs(idev, ABS_RY, value[1]);
-		input_report_abs(idev, ABS_RZ, value[2]);
-		input_report_abs(idev, ABS_X, value[3]);
-		input_report_abs(idev, ABS_Y, value[4]);
-		input_report_abs(idev, ABS_Z, value[5]);
-		input_sync(idev);
-		/* convert to micros and divide by 3 (3 samples per report). */
-		ctlr->imu_timestamp_us += ctlr->imu_avg_delta_ms * 1000 / 3;
-	}
-}
-
 static void joycon_parse_report(struct joycon_ctlr *ctlr,
 				struct joycon_input_report *rep)
 {
 	struct input_dev *dev = ctlr->input;
-	unsigned long flags;
-	u8 tmp;
 	u32 btns;
-	unsigned long msecs = jiffies_to_msecs(jiffies);
+	u32 id = ctlr->hdev->product;
 
-	spin_lock_irqsave(&ctlr->lock, flags);
-	if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
-	    (msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS &&
-	    (ctlr->rumble_queue_head != ctlr->rumble_queue_tail ||
-	     ctlr->rumble_zero_countdown > 0)) {
-		/*
-		 * When this value reaches 0, we know we've sent multiple
-		 * packets to the controller instructing it to disable rumble.
-		 * We can safely stop sending periodic rumble packets until the
-		 * next ff effect.
-		 */
-		if (ctlr->rumble_zero_countdown > 0)
-			ctlr->rumble_zero_countdown--;
-		queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
-	}
-
-	/* Parse the battery status */
-	tmp = rep->bat_con;
-	ctlr->host_powered = tmp & BIT(0);
-	ctlr->battery_charging = tmp & BIT(4);
-	tmp = tmp >> 5;
-	switch (tmp) {
-	case 0: /* empty */
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
-		break;
-	case 1: /* low */
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
-		break;
-	case 2: /* medium */
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
-		break;
-	case 3: /* high */
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
-		break;
-	case 4: /* full */
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
-		break;
-	default:
-		ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
-		hid_warn(ctlr->hdev, "Invalid battery status\n");
-		break;
-	}
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	/* Parse the buttons and sticks */
 	btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24);
 
-	if (jc_type_has_left(ctlr)) {
+	if (id != USB_DEVICE_ID_NINTENDO_JOYCONR) {
 		u16 raw_x;
 		u16 raw_y;
 		s32 x;
@@ -1287,43 +456,20 @@
 		/* report buttons */
 		input_report_key(dev, BTN_TL, btns & JC_BTN_L);
 		input_report_key(dev, BTN_TL2, btns & JC_BTN_ZL);
-		input_report_key(dev, BTN_SELECT, btns & JC_BTN_MINUS);
-		input_report_key(dev, BTN_THUMBL, btns & JC_BTN_LSTICK);
-		input_report_key(dev, BTN_Z, btns & JC_BTN_CAP);
-
-		if (jc_type_is_joycon(ctlr)) {
+		if (id != USB_DEVICE_ID_NINTENDO_PROCON) {
 			/* Report the S buttons as the non-existent triggers */
 			input_report_key(dev, BTN_TR, btns & JC_BTN_SL_L);
 			input_report_key(dev, BTN_TR2, btns & JC_BTN_SR_L);
-
-			/* Report d-pad as digital buttons for the joy-cons */
-			input_report_key(dev, BTN_DPAD_DOWN,
-					 btns & JC_BTN_DOWN);
-			input_report_key(dev, BTN_DPAD_UP, btns & JC_BTN_UP);
-			input_report_key(dev, BTN_DPAD_RIGHT,
-					 btns & JC_BTN_RIGHT);
-			input_report_key(dev, BTN_DPAD_LEFT,
-					 btns & JC_BTN_LEFT);
-		} else {
-			int hatx = 0;
-			int haty = 0;
-
-			/* d-pad x */
-			if (btns & JC_BTN_LEFT)
-				hatx = -1;
-			else if (btns & JC_BTN_RIGHT)
-				hatx = 1;
-			input_report_abs(dev, ABS_HAT0X, hatx);
-
-			/* d-pad y */
-			if (btns & JC_BTN_UP)
-				haty = -1;
-			else if (btns & JC_BTN_DOWN)
-				haty = 1;
-			input_report_abs(dev, ABS_HAT0Y, haty);
 		}
+		input_report_key(dev, BTN_SELECT, btns & JC_BTN_MINUS);
+		input_report_key(dev, BTN_THUMBL, btns & JC_BTN_LSTICK);
+		input_report_key(dev, BTN_Z, btns & JC_BTN_CAP);
+		input_report_key(dev, BTN_DPAD_DOWN, btns & JC_BTN_DOWN);
+		input_report_key(dev, BTN_DPAD_UP, btns & JC_BTN_UP);
+		input_report_key(dev, BTN_DPAD_RIGHT, btns & JC_BTN_RIGHT);
+		input_report_key(dev, BTN_DPAD_LEFT, btns & JC_BTN_LEFT);
 	}
-	if (jc_type_has_right(ctlr)) {
+	if (id != USB_DEVICE_ID_NINTENDO_JOYCONL) {
 		u16 raw_x;
 		u16 raw_y;
 		s32 x;
@@ -1343,7 +489,7 @@
 		/* report buttons */
 		input_report_key(dev, BTN_TR, btns & JC_BTN_R);
 		input_report_key(dev, BTN_TR2, btns & JC_BTN_ZR);
-		if (jc_type_is_joycon(ctlr)) {
+		if (id != USB_DEVICE_ID_NINTENDO_PROCON) {
 			/* Report the S buttons as the non-existent triggers */
 			input_report_key(dev, BTN_TL, btns & JC_BTN_SL_R);
 			input_report_key(dev, BTN_TL2, btns & JC_BTN_SR_R);
@@ -1358,220 +504,12 @@
 	}
 
 	input_sync(dev);
-
-	/*
-	 * Immediately after receiving a report is the most reliable time to
-	 * send a subcommand to the controller. Wake any subcommand senders
-	 * waiting for a report.
-	 */
-	if (unlikely(mutex_is_locked(&ctlr->output_mutex))) {
-		spin_lock_irqsave(&ctlr->lock, flags);
-		ctlr->received_input_report = true;
-		spin_unlock_irqrestore(&ctlr->lock, flags);
-		wake_up(&ctlr->wait);
-	}
-
-	/* parse IMU data if present */
-	if (rep->id == JC_INPUT_IMU_DATA)
-		joycon_parse_imu_report(ctlr, rep);
 }
 
-static int joycon_send_rumble_data(struct joycon_ctlr *ctlr)
-{
-	int ret;
-	unsigned long flags;
-	struct joycon_rumble_output rumble_output = { 0 };
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	/*
-	 * If the controller has been removed, just return ENODEV so the LED
-	 * subsystem doesn't print invalid errors on removal.
-	 */
-	if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) {
-		spin_unlock_irqrestore(&ctlr->lock, flags);
-		return -ENODEV;
-	}
-	memcpy(rumble_output.rumble_data,
-	       ctlr->rumble_data[ctlr->rumble_queue_tail],
-	       JC_RUMBLE_DATA_SIZE);
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	rumble_output.output_id = JC_OUTPUT_RUMBLE_ONLY;
-	rumble_output.packet_num = ctlr->subcmd_num;
-	if (++ctlr->subcmd_num > 0xF)
-		ctlr->subcmd_num = 0;
-
-	joycon_enforce_subcmd_rate(ctlr);
-
-	ret = __joycon_hid_send(ctlr->hdev, (u8 *)&rumble_output,
-				sizeof(rumble_output));
-	return ret;
-}
-
-static void joycon_rumble_worker(struct work_struct *work)
-{
-	struct joycon_ctlr *ctlr = container_of(work, struct joycon_ctlr,
-							rumble_worker);
-	unsigned long flags;
-	bool again = true;
-	int ret;
-
-	while (again) {
-		mutex_lock(&ctlr->output_mutex);
-		ret = joycon_send_rumble_data(ctlr);
-		mutex_unlock(&ctlr->output_mutex);
-
-		/* -ENODEV means the controller was just unplugged */
-		spin_lock_irqsave(&ctlr->lock, flags);
-		if (ret < 0 && ret != -ENODEV &&
-		    ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED)
-			hid_warn(ctlr->hdev, "Failed to set rumble; e=%d", ret);
-
-		ctlr->rumble_msecs = jiffies_to_msecs(jiffies);
-		if (ctlr->rumble_queue_tail != ctlr->rumble_queue_head) {
-			if (++ctlr->rumble_queue_tail >= JC_RUMBLE_QUEUE_SIZE)
-				ctlr->rumble_queue_tail = 0;
-		} else {
-			again = false;
-		}
-		spin_unlock_irqrestore(&ctlr->lock, flags);
-	}
-}
-
-#if IS_ENABLED(CONFIG_NINTENDO_FF)
-static struct joycon_rumble_freq_data joycon_find_rumble_freq(u16 freq)
-{
-	const size_t length = ARRAY_SIZE(joycon_rumble_frequencies);
-	const struct joycon_rumble_freq_data *data = joycon_rumble_frequencies;
-	int i = 0;
-
-	if (freq > data[0].freq) {
-		for (i = 1; i < length - 1; i++) {
-			if (freq > data[i - 1].freq && freq <= data[i].freq)
-				break;
-		}
-	}
-
-	return data[i];
-}
-
-static struct joycon_rumble_amp_data joycon_find_rumble_amp(u16 amp)
-{
-	const size_t length = ARRAY_SIZE(joycon_rumble_amplitudes);
-	const struct joycon_rumble_amp_data *data = joycon_rumble_amplitudes;
-	int i = 0;
-
-	if (amp > data[0].amp) {
-		for (i = 1; i < length - 1; i++) {
-			if (amp > data[i - 1].amp && amp <= data[i].amp)
-				break;
-		}
-	}
-
-	return data[i];
-}
-
-static void joycon_encode_rumble(u8 *data, u16 freq_low, u16 freq_high, u16 amp)
-{
-	struct joycon_rumble_freq_data freq_data_low;
-	struct joycon_rumble_freq_data freq_data_high;
-	struct joycon_rumble_amp_data amp_data;
-
-	freq_data_low = joycon_find_rumble_freq(freq_low);
-	freq_data_high = joycon_find_rumble_freq(freq_high);
-	amp_data = joycon_find_rumble_amp(amp);
-
-	data[0] = (freq_data_high.high >> 8) & 0xFF;
-	data[1] = (freq_data_high.high & 0xFF) + amp_data.high;
-	data[2] = freq_data_low.low + ((amp_data.low >> 8) & 0xFF);
-	data[3] = amp_data.low & 0xFF;
-}
-
-static const u16 JOYCON_MAX_RUMBLE_HIGH_FREQ	= 1253;
-static const u16 JOYCON_MIN_RUMBLE_HIGH_FREQ	= 82;
-static const u16 JOYCON_MAX_RUMBLE_LOW_FREQ	= 626;
-static const u16 JOYCON_MIN_RUMBLE_LOW_FREQ	= 41;
-
-static void joycon_clamp_rumble_freqs(struct joycon_ctlr *ctlr)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	ctlr->rumble_ll_freq = clamp(ctlr->rumble_ll_freq,
-				     JOYCON_MIN_RUMBLE_LOW_FREQ,
-				     JOYCON_MAX_RUMBLE_LOW_FREQ);
-	ctlr->rumble_lh_freq = clamp(ctlr->rumble_lh_freq,
-				     JOYCON_MIN_RUMBLE_HIGH_FREQ,
-				     JOYCON_MAX_RUMBLE_HIGH_FREQ);
-	ctlr->rumble_rl_freq = clamp(ctlr->rumble_rl_freq,
-				     JOYCON_MIN_RUMBLE_LOW_FREQ,
-				     JOYCON_MAX_RUMBLE_LOW_FREQ);
-	ctlr->rumble_rh_freq = clamp(ctlr->rumble_rh_freq,
-				     JOYCON_MIN_RUMBLE_HIGH_FREQ,
-				     JOYCON_MAX_RUMBLE_HIGH_FREQ);
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-}
-
-static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l,
-			     bool schedule_now)
-{
-	u8 data[JC_RUMBLE_DATA_SIZE];
-	u16 amp;
-	u16 freq_r_low;
-	u16 freq_r_high;
-	u16 freq_l_low;
-	u16 freq_l_high;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	freq_r_low = ctlr->rumble_rl_freq;
-	freq_r_high = ctlr->rumble_rh_freq;
-	freq_l_low = ctlr->rumble_ll_freq;
-	freq_l_high = ctlr->rumble_lh_freq;
-	/* limit number of silent rumble packets to reduce traffic */
-	if (amp_l != 0 || amp_r != 0)
-		ctlr->rumble_zero_countdown = JC_RUMBLE_ZERO_AMP_PKT_CNT;
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	/* right joy-con */
-	amp = amp_r * (u32)joycon_max_rumble_amp / 65535;
-	joycon_encode_rumble(data + 4, freq_r_low, freq_r_high, amp);
-
-	/* left joy-con */
-	amp = amp_l * (u32)joycon_max_rumble_amp / 65535;
-	joycon_encode_rumble(data, freq_l_low, freq_l_high, amp);
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	if (++ctlr->rumble_queue_head >= JC_RUMBLE_QUEUE_SIZE)
-		ctlr->rumble_queue_head = 0;
-	memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data,
-	       JC_RUMBLE_DATA_SIZE);
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	/* don't wait for the periodic send (reduces latency) */
-	if (schedule_now)
-		queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
-
-	return 0;
-}
-
-static int joycon_play_effect(struct input_dev *dev, void *data,
-						     struct ff_effect *effect)
-{
-	struct joycon_ctlr *ctlr = input_get_drvdata(dev);
-
-	if (effect->type != FF_RUMBLE)
-		return 0;
-
-	return joycon_set_rumble(ctlr,
-				 effect->u.rumble.weak_magnitude,
-				 effect->u.rumble.strong_magnitude,
-				 true);
-}
-#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
 
 static const unsigned int joycon_button_inputs_l[] = {
 	BTN_SELECT, BTN_Z, BTN_THUMBL,
+	BTN_DPAD_UP, BTN_DPAD_DOWN, BTN_DPAD_LEFT, BTN_DPAD_RIGHT,
 	BTN_TL, BTN_TL2,
 	0 /* 0 signals end of array */
 };
@@ -1583,16 +521,12 @@
 	0 /* 0 signals end of array */
 };
 
-/* We report joy-con d-pad inputs as buttons and pro controller as a hat. */
-static const unsigned int joycon_dpad_inputs_jc[] = {
-	BTN_DPAD_UP, BTN_DPAD_DOWN, BTN_DPAD_LEFT, BTN_DPAD_RIGHT,
-};
-
+static DEFINE_MUTEX(joycon_input_num_mutex);
 static int joycon_input_create(struct joycon_ctlr *ctlr)
 {
 	struct hid_device *hdev;
+	static int input_num = 1;
 	const char *name;
-	const char *imu_name;
 	int ret;
 	int i;
 
@@ -1601,24 +535,12 @@
 	switch (hdev->product) {
 	case USB_DEVICE_ID_NINTENDO_PROCON:
 		name = "Nintendo Switch Pro Controller";
-		imu_name = "Nintendo Switch Pro Controller IMU";
-		break;
-	case USB_DEVICE_ID_NINTENDO_CHRGGRIP:
-		if (jc_type_has_left(ctlr)) {
-			name = "Nintendo Switch Left Joy-Con (Grip)";
-			imu_name = "Nintendo Switch Left Joy-Con IMU (Grip)";
-		} else {
-			name = "Nintendo Switch Right Joy-Con (Grip)";
-			imu_name = "Nintendo Switch Right Joy-Con IMU (Grip)";
-		}
 		break;
 	case USB_DEVICE_ID_NINTENDO_JOYCONL:
 		name = "Nintendo Switch Left Joy-Con";
-		imu_name = "Nintendo Switch Left Joy-Con IMU";
 		break;
 	case USB_DEVICE_ID_NINTENDO_JOYCONR:
 		name = "Nintendo Switch Right Joy-Con";
-		imu_name = "Nintendo Switch Right Joy-Con IMU";
 		break;
 	default: /* Should be impossible */
 		hid_err(hdev, "Invalid hid product\n");
@@ -1632,212 +554,44 @@
 	ctlr->input->id.vendor = hdev->vendor;
 	ctlr->input->id.product = hdev->product;
 	ctlr->input->id.version = hdev->version;
-	ctlr->input->uniq = ctlr->mac_addr_str;
 	ctlr->input->name = name;
 	input_set_drvdata(ctlr->input, ctlr);
 
-	/* set up sticks and buttons */
-	if (jc_type_has_left(ctlr)) {
+
+	/* set up sticks */
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONR) {
 		input_set_abs_params(ctlr->input, ABS_X,
 				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
 				     JC_STICK_FUZZ, JC_STICK_FLAT);
 		input_set_abs_params(ctlr->input, ABS_Y,
 				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
 				     JC_STICK_FUZZ, JC_STICK_FLAT);
-
-		for (i = 0; joycon_button_inputs_l[i] > 0; i++)
-			input_set_capability(ctlr->input, EV_KEY,
-					     joycon_button_inputs_l[i]);
-
-		/* configure d-pad differently for joy-con vs pro controller */
-		if (hdev->product != USB_DEVICE_ID_NINTENDO_PROCON) {
-			for (i = 0; joycon_dpad_inputs_jc[i] > 0; i++)
-				input_set_capability(ctlr->input, EV_KEY,
-						     joycon_dpad_inputs_jc[i]);
-		} else {
-			input_set_abs_params(ctlr->input, ABS_HAT0X,
-					     -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG,
-					     JC_DPAD_FUZZ, JC_DPAD_FLAT);
-			input_set_abs_params(ctlr->input, ABS_HAT0Y,
-					     -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG,
-					     JC_DPAD_FUZZ, JC_DPAD_FLAT);
-		}
 	}
-	if (jc_type_has_right(ctlr)) {
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONL) {
 		input_set_abs_params(ctlr->input, ABS_RX,
 				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
 				     JC_STICK_FUZZ, JC_STICK_FLAT);
 		input_set_abs_params(ctlr->input, ABS_RY,
 				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
 				     JC_STICK_FUZZ, JC_STICK_FLAT);
+	}
 
+	/* set up buttons */
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONR) {
+		for (i = 0; joycon_button_inputs_l[i] > 0; i++)
+			input_set_capability(ctlr->input, EV_KEY,
+					     joycon_button_inputs_l[i]);
+	}
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONL) {
 		for (i = 0; joycon_button_inputs_r[i] > 0; i++)
 			input_set_capability(ctlr->input, EV_KEY,
 					     joycon_button_inputs_r[i]);
 	}
 
-	/* Let's report joy-con S triggers separately */
-	if (hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL) {
-		input_set_capability(ctlr->input, EV_KEY, BTN_TR);
-		input_set_capability(ctlr->input, EV_KEY, BTN_TR2);
-	} else if (hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR) {
-		input_set_capability(ctlr->input, EV_KEY, BTN_TL);
-		input_set_capability(ctlr->input, EV_KEY, BTN_TL2);
-	}
-
-#if IS_ENABLED(CONFIG_NINTENDO_FF)
-	/* set up rumble */
-	input_set_capability(ctlr->input, EV_FF, FF_RUMBLE);
-	input_ff_create_memless(ctlr->input, NULL, joycon_play_effect);
-	ctlr->rumble_ll_freq = JC_RUMBLE_DFLT_LOW_FREQ;
-	ctlr->rumble_lh_freq = JC_RUMBLE_DFLT_HIGH_FREQ;
-	ctlr->rumble_rl_freq = JC_RUMBLE_DFLT_LOW_FREQ;
-	ctlr->rumble_rh_freq = JC_RUMBLE_DFLT_HIGH_FREQ;
-	joycon_clamp_rumble_freqs(ctlr);
-	joycon_set_rumble(ctlr, 0, 0, false);
-	ctlr->rumble_msecs = jiffies_to_msecs(jiffies);
-#endif
-
 	ret = input_register_device(ctlr->input);
 	if (ret)
 		return ret;
 
-	/* configure the imu input device */
-	ctlr->imu_input = devm_input_allocate_device(&hdev->dev);
-	if (!ctlr->imu_input)
-		return -ENOMEM;
-
-	ctlr->imu_input->id.bustype = hdev->bus;
-	ctlr->imu_input->id.vendor = hdev->vendor;
-	ctlr->imu_input->id.product = hdev->product;
-	ctlr->imu_input->id.version = hdev->version;
-	ctlr->imu_input->uniq = ctlr->mac_addr_str;
-	ctlr->imu_input->name = imu_name;
-	input_set_drvdata(ctlr->imu_input, ctlr);
-
-	/* configure imu axes */
-	input_set_abs_params(ctlr->imu_input, ABS_X,
-			     -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG,
-			     JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT);
-	input_set_abs_params(ctlr->imu_input, ABS_Y,
-			     -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG,
-			     JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT);
-	input_set_abs_params(ctlr->imu_input, ABS_Z,
-			     -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG,
-			     JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT);
-	input_abs_set_res(ctlr->imu_input, ABS_X, JC_IMU_ACCEL_RES_PER_G);
-	input_abs_set_res(ctlr->imu_input, ABS_Y, JC_IMU_ACCEL_RES_PER_G);
-	input_abs_set_res(ctlr->imu_input, ABS_Z, JC_IMU_ACCEL_RES_PER_G);
-
-	input_set_abs_params(ctlr->imu_input, ABS_RX,
-			     -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG,
-			     JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT);
-	input_set_abs_params(ctlr->imu_input, ABS_RY,
-			     -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG,
-			     JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT);
-	input_set_abs_params(ctlr->imu_input, ABS_RZ,
-			     -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG,
-			     JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT);
-
-	input_abs_set_res(ctlr->imu_input, ABS_RX, JC_IMU_GYRO_RES_PER_DPS);
-	input_abs_set_res(ctlr->imu_input, ABS_RY, JC_IMU_GYRO_RES_PER_DPS);
-	input_abs_set_res(ctlr->imu_input, ABS_RZ, JC_IMU_GYRO_RES_PER_DPS);
-
-	__set_bit(EV_MSC, ctlr->imu_input->evbit);
-	__set_bit(MSC_TIMESTAMP, ctlr->imu_input->mscbit);
-	__set_bit(INPUT_PROP_ACCELEROMETER, ctlr->imu_input->propbit);
-
-	ret = input_register_device(ctlr->imu_input);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int joycon_player_led_brightness_set(struct led_classdev *led,
-					    enum led_brightness brightness)
-{
-	struct device *dev = led->dev->parent;
-	struct hid_device *hdev = to_hid_device(dev);
-	struct joycon_ctlr *ctlr;
-	int val = 0;
-	int i;
-	int ret;
-	int num;
-
-	ctlr = hid_get_drvdata(hdev);
-	if (!ctlr) {
-		hid_err(hdev, "No controller data\n");
-		return -ENODEV;
-	}
-
-	/* determine which player led this is */
-	for (num = 0; num < JC_NUM_LEDS; num++) {
-		if (&ctlr->leds[num] == led)
-			break;
-	}
-	if (num >= JC_NUM_LEDS)
-		return -EINVAL;
-
-	mutex_lock(&ctlr->output_mutex);
-	for (i = 0; i < JC_NUM_LEDS; i++) {
-		if (i == num)
-			val |= brightness << i;
-		else
-			val |= ctlr->leds[i].brightness << i;
-	}
-	ret = joycon_set_player_leds(ctlr, 0, val);
-	mutex_unlock(&ctlr->output_mutex);
-
-	return ret;
-}
-
-static int joycon_home_led_brightness_set(struct led_classdev *led,
-					  enum led_brightness brightness)
-{
-	struct device *dev = led->dev->parent;
-	struct hid_device *hdev = to_hid_device(dev);
-	struct joycon_ctlr *ctlr;
-	struct joycon_subcmd_request *req;
-	u8 buffer[sizeof(*req) + 5] = { 0 };
-	u8 *data;
-	int ret;
-
-	ctlr = hid_get_drvdata(hdev);
-	if (!ctlr) {
-		hid_err(hdev, "No controller data\n");
-		return -ENODEV;
-	}
-
-	req = (struct joycon_subcmd_request *)buffer;
-	req->subcmd_id = JC_SUBCMD_SET_HOME_LIGHT;
-	data = req->data;
-	data[0] = 0x01;
-	data[1] = brightness << 4;
-	data[2] = brightness | (brightness << 4);
-	data[3] = 0x11;
-	data[4] = 0x11;
-
-	hid_dbg(hdev, "setting home led brightness\n");
-	mutex_lock(&ctlr->output_mutex);
-	ret = joycon_send_subcmd(ctlr, req, 5, HZ/4);
-	mutex_unlock(&ctlr->output_mutex);
-
-	return ret;
-}
-
-static DEFINE_MUTEX(joycon_input_num_mutex);
-static int joycon_leds_create(struct joycon_ctlr *ctlr)
-{
-	struct hid_device *hdev = ctlr->hdev;
-	struct device *dev = &hdev->dev;
-	const char *d_name = dev_name(dev);
-	struct led_classdev *led;
-	char *name;
-	int ret = 0;
-	int i;
-	static int input_num = 1;
-
 	/* Set the default controller player leds based on controller number */
 	mutex_lock(&joycon_input_num_mutex);
 	mutex_lock(&ctlr->output_mutex);
@@ -1845,190 +599,10 @@
 	if (ret)
 		hid_warn(ctlr->hdev, "Failed to set leds; ret=%d\n", ret);
 	mutex_unlock(&ctlr->output_mutex);
-
-	/* configure the player LEDs */
-	for (i = 0; i < JC_NUM_LEDS; i++) {
-		name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s",
-				      d_name,
-				      "green",
-				      joycon_player_led_names[i]);
-		if (!name) {
-			mutex_unlock(&joycon_input_num_mutex);
-			return -ENOMEM;
-		}
-
-		led = &ctlr->leds[i];
-		led->name = name;
-		led->brightness = ((i + 1) <= input_num) ? 1 : 0;
-		led->max_brightness = 1;
-		led->brightness_set_blocking =
-					joycon_player_led_brightness_set;
-		led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE;
-
-		ret = devm_led_classdev_register(&hdev->dev, led);
-		if (ret) {
-			hid_err(hdev, "Failed registering %s LED\n", led->name);
-			mutex_unlock(&joycon_input_num_mutex);
-			return ret;
-		}
-	}
-
 	if (++input_num > 4)
 		input_num = 1;
 	mutex_unlock(&joycon_input_num_mutex);
 
-	/* configure the home LED */
-	if (jc_type_has_right(ctlr)) {
-		name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s",
-				      d_name,
-				      "blue",
-				      LED_FUNCTION_PLAYER5);
-		if (!name)
-			return -ENOMEM;
-
-		led = &ctlr->home_led;
-		led->name = name;
-		led->brightness = 0;
-		led->max_brightness = 0xF;
-		led->brightness_set_blocking = joycon_home_led_brightness_set;
-		led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE;
-		ret = devm_led_classdev_register(&hdev->dev, led);
-		if (ret) {
-			hid_err(hdev, "Failed registering home led\n");
-			return ret;
-		}
-		/* Set the home LED to 0 as default state */
-		ret = joycon_home_led_brightness_set(led, 0);
-		if (ret) {
-			hid_err(hdev, "Failed to set home LED dflt; ret=%d\n",
-									ret);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int joycon_battery_get_property(struct power_supply *supply,
-				       enum power_supply_property prop,
-				       union power_supply_propval *val)
-{
-	struct joycon_ctlr *ctlr = power_supply_get_drvdata(supply);
-	unsigned long flags;
-	int ret = 0;
-	u8 capacity;
-	bool charging;
-	bool powered;
-
-	spin_lock_irqsave(&ctlr->lock, flags);
-	capacity = ctlr->battery_capacity;
-	charging = ctlr->battery_charging;
-	powered = ctlr->host_powered;
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	switch (prop) {
-	case POWER_SUPPLY_PROP_PRESENT:
-		val->intval = 1;
-		break;
-	case POWER_SUPPLY_PROP_SCOPE:
-		val->intval = POWER_SUPPLY_SCOPE_DEVICE;
-		break;
-	case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
-		val->intval = capacity;
-		break;
-	case POWER_SUPPLY_PROP_STATUS:
-		if (charging)
-			val->intval = POWER_SUPPLY_STATUS_CHARGING;
-		else if (capacity == POWER_SUPPLY_CAPACITY_LEVEL_FULL &&
-			 powered)
-			val->intval = POWER_SUPPLY_STATUS_FULL;
-		else
-			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
-}
-
-static enum power_supply_property joycon_battery_props[] = {
-	POWER_SUPPLY_PROP_PRESENT,
-	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
-	POWER_SUPPLY_PROP_SCOPE,
-	POWER_SUPPLY_PROP_STATUS,
-};
-
-static int joycon_power_supply_create(struct joycon_ctlr *ctlr)
-{
-	struct hid_device *hdev = ctlr->hdev;
-	struct power_supply_config supply_config = { .drv_data = ctlr, };
-	const char * const name_fmt = "nintendo_switch_controller_battery_%s";
-	int ret = 0;
-
-	/* Set initially to unknown before receiving first input report */
-	ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
-
-	/* Configure the battery's description */
-	ctlr->battery_desc.properties = joycon_battery_props;
-	ctlr->battery_desc.num_properties =
-					ARRAY_SIZE(joycon_battery_props);
-	ctlr->battery_desc.get_property = joycon_battery_get_property;
-	ctlr->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
-	ctlr->battery_desc.use_for_apm = 0;
-	ctlr->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
-						 name_fmt,
-						 dev_name(&hdev->dev));
-	if (!ctlr->battery_desc.name)
-		return -ENOMEM;
-
-	ctlr->battery = devm_power_supply_register(&hdev->dev,
-						   &ctlr->battery_desc,
-						   &supply_config);
-	if (IS_ERR(ctlr->battery)) {
-		ret = PTR_ERR(ctlr->battery);
-		hid_err(hdev, "Failed to register battery; ret=%d\n", ret);
-		return ret;
-	}
-
-	return power_supply_powers(ctlr->battery, &hdev->dev);
-}
-
-static int joycon_read_info(struct joycon_ctlr *ctlr)
-{
-	int ret;
-	int i;
-	int j;
-	struct joycon_subcmd_request req = { 0 };
-	struct joycon_input_report *report;
-
-	req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
-	ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
-	if (ret) {
-		hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
-		return ret;
-	}
-
-	report = (struct joycon_input_report *)ctlr->input_buf;
-
-	for (i = 4, j = 0; j < 6; i++, j++)
-		ctlr->mac_addr[j] = report->subcmd_reply.data[i];
-
-	ctlr->mac_addr_str = devm_kasprintf(&ctlr->hdev->dev, GFP_KERNEL,
-					    "%02X:%02X:%02X:%02X:%02X:%02X",
-					    ctlr->mac_addr[0],
-					    ctlr->mac_addr[1],
-					    ctlr->mac_addr[2],
-					    ctlr->mac_addr[3],
-					    ctlr->mac_addr[4],
-					    ctlr->mac_addr[5]);
-	if (!ctlr->mac_addr_str)
-		return -ENOMEM;
-	hid_info(ctlr->hdev, "controller MAC = %s\n", ctlr->mac_addr_str);
-
-	/* Retrieve the type so we can distinguish for charging grip */
-	ctlr->ctlr_type = report->subcmd_reply.data[2];
-
 	return 0;
 }
 
@@ -2036,6 +610,8 @@
 static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data,
 							      int size)
 {
+	int ret = 0;
+
 	if (data[0] == JC_INPUT_SUBCMD_REPLY || data[0] == JC_INPUT_IMU_DATA ||
 	    data[0] == JC_INPUT_MCU_DATA) {
 		if (size >= 12) /* make sure it contains the input report */
@@ -2043,7 +619,7 @@
 					    (struct joycon_input_report *)data);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int joycon_ctlr_handle_event(struct joycon_ctlr *ctlr, u8 *data,
@@ -2068,7 +644,7 @@
 			    data[0] != JC_INPUT_SUBCMD_REPLY)
 				break;
 			report = (struct joycon_input_report *)data;
-			if (report->subcmd_reply.id == ctlr->subcmd_ack_match)
+			if (report->reply.id == ctlr->subcmd_ack_match)
 				match = true;
 			break;
 		default:
@@ -2120,35 +696,20 @@
 
 	ctlr->hdev = hdev;
 	ctlr->ctlr_state = JOYCON_CTLR_STATE_INIT;
-	ctlr->rumble_queue_head = JC_RUMBLE_QUEUE_SIZE - 1;
-	ctlr->rumble_queue_tail = 0;
 	hid_set_drvdata(hdev, ctlr);
 	mutex_init(&ctlr->output_mutex);
 	init_waitqueue_head(&ctlr->wait);
-	spin_lock_init(&ctlr->lock);
-	ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq",
-					     WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
-	INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker);
 
 	ret = hid_parse(hdev);
 	if (ret) {
 		hid_err(hdev, "HID parse failed\n");
-		goto err_wq;
+		goto err;
 	}
 
-	/*
-	 * Patch the hw version of pro controller/joycons, so applications can
-	 * distinguish between the default HID mappings and the mappings defined
-	 * by the Linux game controller spec. This is important for the SDL2
-	 * library, which has a game controller database, which uses device ids
-	 * in combination with version as a key.
-	 */
-	hdev->version |= 0x8000;
-
 	ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
 	if (ret) {
 		hid_err(hdev, "HW start failed\n");
-		goto err_wq;
+		goto err;
 	}
 
 	ret = hid_hw_open(hdev);
@@ -2162,17 +723,17 @@
 	/* Initialize the controller */
 	mutex_lock(&ctlr->output_mutex);
 	/* if handshake command fails, assume ble pro controller */
-	if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
-	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
+	if (hdev->product == USB_DEVICE_ID_NINTENDO_PROCON &&
+	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE)) {
 		hid_dbg(hdev, "detected USB controller\n");
 		/* set baudrate for improved latency */
-		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
+		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M);
 		if (ret) {
 			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
 			goto err_mutex;
 		}
 		/* handshake */
-		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ);
+		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE);
 		if (ret) {
 			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
 			goto err_mutex;
@@ -2181,11 +742,7 @@
 		 * Set no timeout (to keep controller in USB mode).
 		 * This doesn't send a response, so ignore the timeout.
 		 */
-		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10);
-	} else if (jc_type_is_chrggrip(ctlr)) {
-		hid_err(hdev, "Failed charging grip handshake\n");
-		ret = -ETIMEDOUT;
-		goto err_mutex;
+		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT);
 	}
 
 	/* get controller calibration data, and parse it */
@@ -2198,16 +755,6 @@
 		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
 	}
 
-	/* get IMU calibration data, and parse it */
-	ret = joycon_request_imu_calibration(ctlr);
-	if (ret) {
-		/*
-		 * We can function with default calibration, but it may be
-		 * inaccurate. Provide a warning, and continue on.
-		 */
-		hid_warn(hdev, "Unable to read IMU calibration data\n");
-	}
-
 	/* Set the reporting mode to 0x30, which is the full report mode */
 	ret = joycon_set_report_mode(ctlr);
 	if (ret) {
@@ -2215,43 +762,8 @@
 		goto err_mutex;
 	}
 
-	/* Enable rumble */
-	ret = joycon_enable_rumble(ctlr);
-	if (ret) {
-		hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
-		goto err_mutex;
-	}
-
-	/* Enable the IMU */
-	ret = joycon_enable_imu(ctlr);
-	if (ret) {
-		hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
-		goto err_mutex;
-	}
-
-	ret = joycon_read_info(ctlr);
-	if (ret) {
-		hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
-			ret);
-		goto err_mutex;
-	}
-
 	mutex_unlock(&ctlr->output_mutex);
 
-	/* Initialize the leds */
-	ret = joycon_leds_create(ctlr);
-	if (ret) {
-		hid_err(hdev, "Failed to create leds; ret=%d\n", ret);
-		goto err_close;
-	}
-
-	/* Initialize the battery power supply */
-	ret = joycon_power_supply_create(ctlr);
-	if (ret) {
-		hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret);
-		goto err_close;
-	}
-
 	ret = joycon_input_create(ctlr);
 	if (ret) {
 		hid_err(hdev, "Failed to create input device; ret=%d\n", ret);
@@ -2269,8 +781,6 @@
 	hid_hw_close(hdev);
 err_stop:
 	hid_hw_stop(hdev);
-err_wq:
-	destroy_workqueue(ctlr->rumble_queue);
 err:
 	hid_err(hdev, "probe - fail = %d\n", ret);
 	return ret;
@@ -2278,18 +788,7 @@
 
 static void nintendo_hid_remove(struct hid_device *hdev)
 {
-	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
-	unsigned long flags;
-
 	hid_dbg(hdev, "remove\n");
-
-	/* Prevent further attempts at sending subcommands. */
-	spin_lock_irqsave(&ctlr->lock, flags);
-	ctlr->ctlr_state = JOYCON_CTLR_STATE_REMOVED;
-	spin_unlock_irqrestore(&ctlr->lock, flags);
-
-	destroy_workqueue(ctlr->rumble_queue);
-
 	hid_hw_close(hdev);
 	hid_hw_stop(hdev);
 }
@@ -2299,8 +798,6 @@
 			 USB_DEVICE_ID_NINTENDO_PROCON) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
 			 USB_DEVICE_ID_NINTENDO_PROCON) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
-			 USB_DEVICE_ID_NINTENDO_CHRGGRIP) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
 			 USB_DEVICE_ID_NINTENDO_JOYCONL) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
@@ -2321,4 +818,3 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Daniel J. Ogorchock <djogorchock@gmail.com>");
 MODULE_DESCRIPTION("Driver for Nintendo Switch Controllers");
-
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index b45ddb4..947d440 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -758,12 +758,6 @@
 #define USB_VENDOR_ID_THQ			0x20d6
 #define USB_DEVICE_ID_THQ_PS3_UDRAW			0xcb17
 
-#define USB_VENDOR_ID_NINTENDO		0x057e
-#define USB_DEVICE_ID_NINTENDO_JOYCONL	0x2006
-#define USB_DEVICE_ID_NINTENDO_JOYCONR	0x2007
-#define USB_DEVICE_ID_NINTENDO_PROCON	0x2009
-#define USB_DEVICE_ID_NINTENDO_CHRGGRIP	0x200E
-
 #define ACCEL_DEV(vnd, prd)						\
 	{								\
 		.flags = INPUT_DEVICE_ID_MATCH_VENDOR |			\
@@ -795,10 +789,6 @@
 	ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
 	ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
 	ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
-	ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON),
-	ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_CHRGGRIP),
-	ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONL),
-	ACCEL_DEV(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONR),
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b42e38a..01a9414 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -25,6 +25,7 @@
 #include <linux/vmalloc.h>
 #include <linux/crash_dump.h>
 #include <linux/dma-direct.h>
+#include <trace/hooks/iommu.h>
 
 struct iommu_dma_msi_page {
 	struct list_head	list;
@@ -465,6 +466,8 @@
 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
 				       true);
 
+	trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size);
+
 	return (dma_addr_t)iova << shift;
 }
 
@@ -483,6 +486,8 @@
 	else
 		free_iova_fast(iovad, iova_pfn(iovad, iova),
 				size >> iova_shift(iovad));
+
+	trace_android_vh_iommu_iovad_free_iova(iovad, iova, size);
 }
 
 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index daec330..ba8f819 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -18,6 +18,10 @@
 #include <linux/percpu.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/wakeup_reason.h>
+#include <trace/hooks/gic_v3.h>
+
 
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-common.h>
@@ -29,6 +33,8 @@
 #include <asm/smp_plat.h>
 #include <asm/virt.h>
 
+#include <trace/hooks/gic.h>
+
 #include "irq-gic-common.h"
 
 #define GICD_INT_NMI_PRI	(GICD_INT_DEF_PRI & ~0x80)
@@ -730,6 +736,7 @@
 
 	if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
 		WARN_ONCE(true, "Unexpected interrupt received!\n");
+		log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
 		gic_deactivate_unhandled(irqnr);
 	}
 }
@@ -824,11 +831,15 @@
 	 * enabled.
 	 */
 	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
-	for (i = 32; i < GIC_LINE_NR; i++)
+	for (i = 32; i < GIC_LINE_NR; i++) {
+		trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
 		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+	}
 
-	for (i = 0; i < GIC_ESPI_NR; i++)
+	for (i = 0; i < GIC_ESPI_NR; i++) {
+		trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity);
 		gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
+	}
 }
 
 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
@@ -1260,6 +1271,7 @@
 	reg = gic_dist_base(d) + offset + (index * 8);
 	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
 
+	trace_android_vh_gic_v3_set_affinity(d, mask_val, &val);
 	gic_write_irouter(val, reg);
 
 	/*
@@ -1314,6 +1326,28 @@
 static inline void gic_cpu_pm_init(void) { }
 #endif /* CONFIG_CPU_PM */
 
+#ifdef CONFIG_PM
+void gic_resume(void)
+{
+	trace_android_vh_gic_resume(gic_data.domain, gic_data.dist_base);
+}
+EXPORT_SYMBOL_GPL(gic_resume);
+
+static struct syscore_ops gic_syscore_ops = {
+	.resume = gic_resume,
+};
+
+static void gic_syscore_init(void)
+{
+	register_syscore_ops(&gic_syscore_ops);
+}
+
+#else
+static inline void gic_syscore_init(void) { }
+void gic_resume(void) { }
+#endif
+
+
 static struct irq_chip gic_chip = {
 	.name			= "GICv3",
 	.irq_mask		= gic_mask_irq,
@@ -1798,6 +1832,7 @@
 	gic_cpu_init();
 	gic_smp_init();
 	gic_cpu_pm_init();
+	gic_syscore_init();
 
 	if (gic_dist_supports_lpis()) {
 		its_init(handle, &gic_data.rdists, gic_data.domain);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b5ea378..2d7fc79 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -289,6 +289,27 @@
 
 	  If unsure, say N.
 
+config DM_DEFAULT_KEY
+	tristate "Default-key target support"
+	depends on BLK_DEV_DM
+	depends on BLK_INLINE_ENCRYPTION
+	# dm-default-key doesn't require -o inlinecrypt, but it does currently
+	# rely on the inline encryption hooks being built into the kernel.
+	depends on FS_ENCRYPTION_INLINE_CRYPT
+	help
+	  This device-mapper target allows you to create a device that
+	  assigns a default encryption key to bios that aren't for the
+	  contents of an encrypted file.
+
+	  This ensures that all blocks on-disk will be encrypted with
+	  some key, without the performance hit of file contents being
+	  encrypted twice when fscrypt (File-Based Encryption) is used.
+
+	  It is only appropriate to use dm-default-key when key
+	  configuration is tightly controlled, like it is in Android,
+	  such that all fscrypt keys are at least as hard to compromise
+	  as the default key.
+
 config DM_SNAPSHOT
        tristate "Snapshot target"
        depends on BLK_DEV_DM
@@ -652,4 +673,30 @@
 	  Enables audit logging of several security relevant events in the
 	  particular device-mapper targets, especially the integrity target.
 
+config DM_BOW
+	tristate "Backup block device"
+	depends on BLK_DEV_DM
+	select DM_BUFIO
+	help
+	  This device-mapper target takes a device and keeps a log of all
+	  changes using free blocks identified by issuing a trim command.
+	  This can then be restored by running a command line utility,
+	  or committed by simply replacing the target.
+
+	  If unsure, say N.
+
+config DM_USER
+	tristate "Block device in userspace"
+	depends on BLK_DEV_DM
+	default y
+	help
+	  This device-mapper target allows a userspace daemon to provide the
+	  contents of a block device.  See
+	  <file:Documentation/block/dm-user.rst> for more information.
+
+	  To compile this code as a module, choose M here: the module will be
+	  called dm-user.
+
+	  If unsure, say N.
+
 endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 0454b08..5b6c6ea 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -57,6 +57,7 @@
 obj-$(CONFIG_DM_BUFIO)		+= dm-bufio.o
 obj-$(CONFIG_DM_BIO_PRISON)	+= dm-bio-prison.o
 obj-$(CONFIG_DM_CRYPT)		+= dm-crypt.o
+obj-$(CONFIG_DM_DEFAULT_KEY)	+= dm-default-key.o
 obj-$(CONFIG_DM_DELAY)		+= dm-delay.o
 obj-$(CONFIG_DM_DUST)		+= dm-dust.o
 obj-$(CONFIG_DM_FLAKEY)		+= dm-flakey.o
@@ -83,6 +84,8 @@
 obj-$(CONFIG_DM_INTEGRITY)	+= dm-integrity.o
 obj-$(CONFIG_DM_ZONED)		+= dm-zoned.o
 obj-$(CONFIG_DM_WRITECACHE)	+= dm-writecache.o
+obj-$(CONFIG_DM_BOW)		+= dm-bow.o
+obj-$(CONFIG_DM_USER)		+= dm-user.o
 
 ifeq ($(CONFIG_DM_INIT),y)
 dm-mod-objs			+= dm-init.o
diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c
new file mode 100644
index 0000000..43bca2e
--- /dev/null
+++ b/drivers/md/dm-bow.c
@@ -0,0 +1,1301 @@
+/*
+ * Copyright (C) 2018 Google Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-core.h"
+
+#include <linux/crc32.h>
+#include <linux/dm-bufio.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "bow"
+
+struct log_entry {
+	u64 source;
+	u64 dest;
+	u32 size;
+	u32 checksum;
+} __packed;
+
+struct log_sector {
+	u32 magic;
+	u16 header_version;
+	u16 header_size;
+	u32 block_size;
+	u32 count;
+	u32 sequence;
+	sector_t sector0;
+	struct log_entry entries[];
+} __packed;
+
+/*
+ * MAGIC is BOW in ascii
+ */
+#define MAGIC 0x00574f42
+#define HEADER_VERSION 0x0100
+
+/*
+ * A sorted set of ranges representing the state of the data on the device.
+ * Use an rb_tree for fast lookup of a given sector
+ * Consecutive ranges are always of different type - operations on this
+ * set must merge matching consecutive ranges.
+ *
+ * Top range is always of type TOP
+ */
+struct bow_range {
+	struct rb_node		node;
+	sector_t		sector;
+	enum {
+		INVALID,	/* Type not set */
+		SECTOR0,	/* First sector - holds log record */
+		SECTOR0_CURRENT,/* Live contents of sector0 */
+		UNCHANGED,	/* Original contents */
+		TRIMMED,	/* Range has been trimmed */
+		CHANGED,	/* Range has been changed */
+		BACKUP,		/* Range is being used as a backup */
+		TOP,		/* Final range - sector is size of device */
+	} type;
+	struct list_head	trimmed_list; /* list of TRIMMED ranges */
+};
+
+static const char * const readable_type[] = {
+	"Invalid",
+	"Sector0",
+	"Sector0_current",
+	"Unchanged",
+	"Free",
+	"Changed",
+	"Backup",
+	"Top",
+};
+
+enum state {
+	TRIM,
+	CHECKPOINT,
+	COMMITTED,
+};
+
+struct bow_context {
+	struct dm_dev *dev;
+	u32 block_size;
+	u32 block_shift;
+	struct workqueue_struct *workqueue;
+	struct dm_bufio_client *bufio;
+	struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
+	struct rb_root ranges;
+	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
+	atomic_t state; /* One of the enum state values above */
+	u64 trims_total;
+	struct log_sector *log_sector;
+	struct list_head trimmed_list;
+	bool forward_trims;
+};
+
+sector_t range_top(struct bow_range *br)
+{
+	return container_of(rb_next(&br->node), struct bow_range, node)
+		->sector;
+}
+
+u64 range_size(struct bow_range *br)
+{
+	return (range_top(br) - br->sector) * SECTOR_SIZE;
+}
+
+static sector_t bvec_top(struct bvec_iter *bi_iter)
+{
+	return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
+}
+
+/*
+ * Find the first range that overlaps with bi_iter
+ * bi_iter is set to the size of the overlapping sub-range
+ */
+static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
+						      struct bvec_iter *bi_iter)
+{
+	struct rb_node *node = ranges->rb_node;
+	struct bow_range *br;
+
+	while (node) {
+		br = container_of(node, struct bow_range, node);
+
+		if (br->sector <= bi_iter->bi_sector
+		    && bi_iter->bi_sector < range_top(br))
+			break;
+
+		if (bi_iter->bi_sector < br->sector)
+			node = node->rb_left;
+		else
+			node = node->rb_right;
+	}
+
+	WARN_ON(!node);
+	if (!node)
+		return NULL;
+
+	if (range_top(br) - bi_iter->bi_sector
+	    < bi_iter->bi_size >> SECTOR_SHIFT)
+		bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
+			<< SECTOR_SHIFT;
+
+	return br;
+}
+
+void add_before(struct rb_root *ranges, struct bow_range *new_br,
+		struct bow_range *existing)
+{
+	struct rb_node *parent = &(existing->node);
+	struct rb_node **link = &(parent->rb_left);
+
+	while (*link) {
+		parent = *link;
+		link = &((*link)->rb_right);
+	}
+
+	rb_link_node(&new_br->node, parent, link);
+	rb_insert_color(&new_br->node, ranges);
+}
+
+/*
+ * Given a range br returned by find_first_overlapping_range, split br into a
+ * leading range, a range matching the bi_iter and a trailing range.
+ * Leading and trailing may end up size 0 and will then be deleted. The
+ * new range matching the bi_iter is then returned and should have its type
+ * and type specific fields populated.
+ * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
+ */
+static int split_range(struct bow_context *bc, struct bow_range **br,
+		       struct bvec_iter *bi_iter)
+{
+	struct bow_range *new_br;
+
+	if (bi_iter->bi_sector < (*br)->sector) {
+		WARN_ON(true);
+		return BLK_STS_IOERR;
+	}
+
+	if (bi_iter->bi_sector > (*br)->sector) {
+		struct bow_range *leading_br =
+			kzalloc(sizeof(*leading_br), GFP_KERNEL);
+
+		if (!leading_br)
+			return BLK_STS_RESOURCE;
+
+		*leading_br = **br;
+		if (leading_br->type == TRIMMED)
+			list_add(&leading_br->trimmed_list, &bc->trimmed_list);
+
+		add_before(&bc->ranges, leading_br, *br);
+		(*br)->sector = bi_iter->bi_sector;
+	}
+
+	if (bvec_top(bi_iter) >= range_top(*br)) {
+		bi_iter->bi_size = (range_top(*br) - (*br)->sector)
+					* SECTOR_SIZE;
+		return BLK_STS_OK;
+	}
+
+	/* new_br will be the beginning, existing br will be the tail */
+	new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
+	if (!new_br)
+		return BLK_STS_RESOURCE;
+
+	new_br->sector = (*br)->sector;
+	(*br)->sector = bvec_top(bi_iter);
+	add_before(&bc->ranges, new_br, *br);
+	*br = new_br;
+
+	return BLK_STS_OK;
+}
+
+/*
+ * Sets type of a range. May merge range into surrounding ranges
+ * Since br may be invalidated, always sets br to NULL to prevent
+ * usage after this is called
+ */
+static void set_type(struct bow_context *bc, struct bow_range **br, int type)
+{
+	struct bow_range *prev = container_of(rb_prev(&(*br)->node),
+						      struct bow_range, node);
+	struct bow_range *next = container_of(rb_next(&(*br)->node),
+						      struct bow_range, node);
+
+	if ((*br)->type == TRIMMED) {
+		bc->trims_total -= range_size(*br);
+		list_del(&(*br)->trimmed_list);
+	}
+
+	if (type == TRIMMED) {
+		bc->trims_total += range_size(*br);
+		list_add(&(*br)->trimmed_list, &bc->trimmed_list);
+	}
+
+	(*br)->type = type;
+
+	if (next->type == type) {
+		if (type == TRIMMED)
+			list_del(&next->trimmed_list);
+		rb_erase(&next->node, &bc->ranges);
+		kfree(next);
+	}
+
+	if (prev->type == type) {
+		if (type == TRIMMED)
+			list_del(&(*br)->trimmed_list);
+		rb_erase(&(*br)->node, &bc->ranges);
+		kfree(*br);
+	}
+
+	*br = NULL;
+}
+
+static struct bow_range *find_free_range(struct bow_context *bc)
+{
+	if (list_empty(&bc->trimmed_list)) {
+		DMERR("Unable to find free space to back up to");
+		return NULL;
+	}
+
+	return list_first_entry(&bc->trimmed_list, struct bow_range,
+				trimmed_list);
+}
+
+static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
+{
+	WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
+		!= 0);
+	return sector >> (bc->block_shift - SECTOR_SHIFT);
+}
+
+static int copy_data(struct bow_context const *bc,
+		     struct bow_range *source, struct bow_range *dest,
+		     u32 *checksum)
+{
+	int i;
+
+	if (range_size(source) != range_size(dest)) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (checksum)
+		*checksum = sector_to_page(bc, source->sector);
+
+	for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
+		struct dm_buffer *read_buffer, *write_buffer;
+		u8 *read, *write;
+		sector_t page = sector_to_page(bc, source->sector) + i;
+
+		read = dm_bufio_read(bc->bufio, page, &read_buffer);
+		if (IS_ERR(read)) {
+			DMERR("Cannot read page %llu",
+			      (unsigned long long)page);
+			return PTR_ERR(read);
+		}
+
+		if (checksum)
+			*checksum = crc32(*checksum, read, bc->block_size);
+
+		write = dm_bufio_new(bc->bufio,
+				     sector_to_page(bc, dest->sector) + i,
+				     &write_buffer);
+		if (IS_ERR(write)) {
+			DMERR("Cannot write sector");
+			dm_bufio_release(read_buffer);
+			return PTR_ERR(write);
+		}
+
+		memcpy(write, read, bc->block_size);
+
+		dm_bufio_mark_buffer_dirty(write_buffer);
+		dm_bufio_release(write_buffer);
+		dm_bufio_release(read_buffer);
+	}
+
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return BLK_STS_OK;
+}
+
+/****** logging functions ******/
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum);
+
+static int backup_log_sector(struct bow_context *bc)
+{
+	struct bow_range *first_br, *free_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+
+	if (first_br->type != SECTOR0) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	free_br = find_free_range(bc);
+	/* No space left - return this error to userspace */
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+	if (bi_iter.bi_size != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence++;
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return BLK_STS_OK;
+}
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum)
+{
+	struct dm_buffer *sector_buffer;
+	u8 *sector;
+
+	if (sizeof(struct log_sector)
+	    + sizeof(struct log_entry) * (bc->log_sector->count + 1)
+		> bc->block_size) {
+		int ret = backup_log_sector(bc);
+
+		if (ret)
+			return ret;
+	}
+
+	sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
+	if (IS_ERR(sector)) {
+		DMERR("Cannot write boot sector");
+		dm_bufio_release(sector_buffer);
+		return BLK_STS_NOSPC;
+	}
+
+	bc->log_sector->entries[bc->log_sector->count].source = source;
+	bc->log_sector->entries[bc->log_sector->count].dest = dest;
+	bc->log_sector->entries[bc->log_sector->count].size = size;
+	bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
+	bc->log_sector->count++;
+
+	memcpy(sector, bc->log_sector, bc->block_size);
+	dm_bufio_mark_buffer_dirty(sector_buffer);
+	dm_bufio_release(sector_buffer);
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return BLK_STS_OK;
+}
+
+static int prepare_log(struct bow_context *bc)
+{
+	struct bow_range *free_br, *first_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	/* Carve out first sector as log sector */
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+	if (first_br->type != UNCHANGED) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (range_size(first_br) < bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+	bi_iter.bi_sector = 0;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &first_br, &bi_iter);
+	if (ret)
+		return ret;
+	first_br->type = SECTOR0;
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	/* Find free sector for active sector0 reads/writes */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+
+	/* Copy data */
+	ret = copy_data(bc, first_br, free_br, NULL);
+	if (ret)
+		return ret;
+
+	bc->log_sector->sector0 = free_br->sector;
+
+	set_type(bc, &free_br, SECTOR0_CURRENT);
+
+	/* Find free sector to back up original sector zero */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+
+	/* Back up */
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set up our replacement boot sector - it will get written when we
+	 * add the first log entry, which we do immediately
+	 */
+	bc->log_sector->magic = MAGIC;
+	bc->log_sector->header_version = HEADER_VERSION;
+	bc->log_sector->header_size = sizeof(*bc->log_sector);
+	bc->log_sector->block_size = bc->block_size;
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence = 0;
+
+	/* Add log entry */
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return BLK_STS_OK;
+}
+
+static struct bow_range *find_sector0_current(struct bow_context *bc)
+{
+	struct bvec_iter bi_iter;
+
+	bi_iter.bi_sector = bc->log_sector->sector0;
+	bi_iter.bi_size = bc->block_size;
+	return find_first_overlapping_range(&bc->ranges, &bi_iter);
+}
+
+/****** sysfs interface functions ******/
+
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
+}
+
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	enum state state, original_state;
+	int ret;
+
+	state = buf[0] - '0';
+	if (state < TRIM || state > COMMITTED) {
+		DMERR("State value %d out of range", state);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bc->ranges_lock);
+	original_state = atomic_read(&bc->state);
+	if (state != original_state + 1) {
+		DMERR("Invalid state change from %d to %d",
+		      original_state, state);
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
+	       : state == COMMITTED ? "Committed" : "Unknown");
+
+	if (state == CHECKPOINT) {
+		ret = prepare_log(bc);
+		if (ret) {
+			DMERR("Failed to switch to checkpoint state");
+			goto bad;
+		}
+	} else if (state == COMMITTED) {
+		struct bow_range *br = find_sector0_current(bc);
+		struct bow_range *sector0_br =
+			container_of(rb_first(&bc->ranges), struct bow_range,
+				     node);
+
+		ret = copy_data(bc, br, sector0_br, 0);
+		if (ret) {
+			DMERR("Failed to switch to committed state");
+			goto bad;
+		}
+	}
+	atomic_inc(&bc->state);
+	ret = count;
+
+bad:
+	mutex_unlock(&bc->ranges_lock);
+	return ret;
+}
+
+static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	u64 trims_total;
+
+	mutex_lock(&bc->ranges_lock);
+	trims_total = bc->trims_total;
+	mutex_unlock(&bc->ranges_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
+}
+
+static struct kobj_attribute attr_state = __ATTR_RW(state);
+static struct kobj_attribute attr_free = __ATTR_RO(free);
+
+static struct attribute *bow_attrs[] = {
+	&attr_state.attr,
+	&attr_free.attr,
+	NULL
+};
+
+static struct kobj_type bow_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = bow_attrs,
+	.release = dm_kobject_release
+};
+
+/****** constructor/destructor ******/
+
+static void dm_bow_dtr(struct dm_target *ti)
+{
+	struct bow_context *bc = (struct bow_context *) ti->private;
+	struct kobject *kobj;
+
+	if (bc->workqueue)
+		destroy_workqueue(bc->workqueue);
+	if (bc->bufio)
+		dm_bufio_client_destroy(bc->bufio);
+
+	kobj = &bc->kobj_holder.kobj;
+	if (kobj->state_initialized) {
+		kobject_put(kobj);
+		wait_for_completion(dm_get_completion_from_kobject(kobj));
+	}
+
+	while (rb_first(&bc->ranges)) {
+		struct bow_range *br = container_of(rb_first(&bc->ranges),
+					      struct bow_range, node);
+
+		rb_erase(&br->node, &bc->ranges);
+		kfree(br);
+	}
+
+	mutex_destroy(&bc->ranges_lock);
+	kfree(bc->log_sector);
+	kfree(bc);
+}
+
+static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	struct bow_context *bc = ti->private;
+	const unsigned int block_size = bc->block_size;
+
+	limits->logical_block_size =
+		max_t(unsigned int, limits->logical_block_size, block_size);
+	limits->physical_block_size =
+		max_t(unsigned int, limits->physical_block_size, block_size);
+	limits->io_min = max_t(unsigned int, limits->io_min, block_size);
+
+	if (limits->max_discard_sectors == 0) {
+		limits->discard_granularity = 1 << 12;
+		limits->max_hw_discard_sectors = 1 << 15;
+		limits->max_discard_sectors = 1 << 15;
+		bc->forward_trims = false;
+	} else {
+		limits->discard_granularity = 1 << 12;
+		bc->forward_trims = true;
+	}
+}
+
+static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct bow_context *bc = ti->private;
+	struct dm_arg_set as;
+	static const struct dm_arg _args[] = {
+		{0, 1, "Invalid number of feature args"},
+	};
+	unsigned int opt_params;
+	const char *opt_string;
+	int err;
+	char dummy;
+
+	as.argc = argc;
+	as.argv = argv;
+
+	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+	if (err)
+		return err;
+
+	while (opt_params--) {
+		opt_string = dm_shift_arg(&as);
+		if (!opt_string) {
+			ti->error = "Not enough feature arguments";
+			return -EINVAL;
+		}
+
+		if (sscanf(opt_string, "block_size:%u%c",
+					&bc->block_size, &dummy) == 1) {
+			if (bc->block_size < SECTOR_SIZE ||
+			    bc->block_size > 4096 ||
+			    !is_power_of_2(bc->block_size)) {
+				ti->error = "Invalid block_size";
+				return -EINVAL;
+			}
+		} else {
+			ti->error = "Invalid feature arguments";
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct bow_context *bc;
+	struct bow_range *br;
+	int ret;
+	struct mapped_device *md = dm_table_get_md(ti->table);
+
+	if (argc < 1) {
+		ti->error = "Invalid argument count";
+		return -EINVAL;
+	}
+
+	bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+	if (!bc) {
+		ti->error = "Cannot allocate bow context";
+		return -ENOMEM;
+	}
+
+	ti->num_flush_bios = 1;
+	ti->num_discard_bios = 1;
+	ti->num_write_same_bios = 1;
+	ti->private = bc;
+
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &bc->dev);
+	if (ret) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	bc->block_size =
+		bdev_get_queue(bc->dev->bdev)->limits.logical_block_size;
+	if (argc > 1) {
+		ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
+		if (ret)
+			goto bad;
+	}
+
+	bc->block_shift = ilog2(bc->block_size);
+	bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
+	if (!bc->log_sector) {
+		ti->error = "Cannot allocate log sector";
+		goto bad;
+	}
+
+	init_completion(&bc->kobj_holder.completion);
+	ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
+				   &disk_to_dev(dm_disk(md))->kobj, "%s",
+				   "bow");
+	if (ret) {
+		ti->error = "Cannot create sysfs node";
+		goto bad;
+	}
+
+	mutex_init(&bc->ranges_lock);
+	bc->ranges = RB_ROOT;
+	bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
+					   NULL, NULL);
+	if (IS_ERR(bc->bufio)) {
+		ti->error = "Cannot initialize dm-bufio";
+		ret = PTR_ERR(bc->bufio);
+		bc->bufio = NULL;
+		goto bad;
+	}
+
+	bc->workqueue = alloc_workqueue("dm-bow",
+					WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
+					| WQ_UNBOUND, num_online_cpus());
+	if (!bc->workqueue) {
+		ti->error = "Cannot allocate workqueue";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	INIT_LIST_HEAD(&bc->trimmed_list);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = ti->len;
+	br->type = TOP;
+	rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = 0;
+	br->type = UNCHANGED;
+	rb_link_node(&br->node, bc->ranges.rb_node,
+		     &bc->ranges.rb_node->rb_left);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	ti->discards_supported = true;
+
+	return 0;
+
+bad:
+	dm_bow_dtr(ti);
+	return ret;
+}
+
+/****** Handle writes ******/
+
+static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
+				   struct bvec_iter *bi_iter,
+				   bool record_checksum)
+{
+	struct bow_range *backup_br;
+	struct bvec_iter backup_bi;
+	sector_t log_source, log_dest;
+	unsigned int log_size;
+	u32 checksum = 0;
+	int ret;
+	int original_type;
+	sector_t sector0;
+
+	/* Find a free range */
+	backup_br = find_free_range(bc);
+	if (!backup_br)
+		return BLK_STS_NOSPC;
+
+	/* Carve out a backup range. This may be smaller than the br given */
+	backup_bi.bi_sector = backup_br->sector;
+	backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
+	ret = split_range(bc, &backup_br, &backup_bi);
+	if (ret)
+		return ret;
+
+	/*
+	 * Carve out a changed range. This will not be smaller than the backup
+	 * br since the backup br is smaller than the source range and iterator
+	 */
+	bi_iter->bi_size = backup_bi.bi_size;
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	if (range_size(br) != range_size(backup_br)) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+
+	/* Copy data over */
+	ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
+	if (ret)
+		return ret;
+
+	/* Add an entry to the log */
+	log_source = br->sector;
+	log_dest = backup_br->sector;
+	log_size = range_size(br);
+
+	/*
+	 * Set the types. Note that since set_type also amalgamates ranges
+	 * we have to set both sectors to their final type before calling
+	 * set_type on either
+	 */
+	original_type = br->type;
+	sector0 = backup_br->sector;
+	bc->trims_total -= range_size(backup_br);
+	if (backup_br->type == TRIMMED)
+		list_del(&backup_br->trimmed_list);
+	backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
+						      : BACKUP;
+	br->type = CHANGED;
+	set_type(bc, &backup_br, backup_br->type);
+
+	/*
+	 * Add the log entry after marking the backup sector, since adding a log
+	 * can cause another backup
+	 */
+	ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
+	if (ret) {
+		br->type = original_type;
+		return ret;
+	}
+
+	/* Now it is safe to mark this backup successful */
+	if (original_type == SECTOR0_CURRENT)
+		bc->log_sector->sector0 = sector0;
+
+	set_type(bc, &br, br->type);
+	return ret;
+}
+
+static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
+			      struct bvec_iter *bi_iter)
+{
+	int ret;
+
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	set_type(bc, &br, CHANGED);
+	return BLK_STS_OK;
+}
+
+static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
+				 struct bvec_iter *bi_iter)
+{
+	/* Nothing to do ... */
+	return BLK_STS_OK;
+}
+
+static int prepare_one_range(struct bow_context *bc,
+			     struct bvec_iter *bi_iter)
+{
+	struct bow_range *br = find_first_overlapping_range(&bc->ranges,
+							    bi_iter);
+	switch (br->type) {
+	case CHANGED:
+		return prepare_changed_range(bc, br, bi_iter);
+
+	case TRIMMED:
+		return prepare_free_range(bc, br, bi_iter);
+
+	case UNCHANGED:
+	case BACKUP:
+		return prepare_unchanged_range(bc, br, bi_iter, true);
+
+	/*
+	 * We cannot track the checksum for the active sector0, since it
+	 * may change at any point.
+	 */
+	case SECTOR0_CURRENT:
+		return prepare_unchanged_range(bc, br, bi_iter, false);
+
+	case SECTOR0:	/* Handled in the dm_bow_map */
+	case TOP:	/* Illegal - top is off the end of the device */
+	default:
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+}
+
+struct write_work {
+	struct work_struct work;
+	struct bow_context *bc;
+	struct bio *bio;
+};
+
+static void bow_write(struct work_struct *work)
+{
+	struct write_work *ww = container_of(work, struct write_work, work);
+	struct bow_context *bc = ww->bc;
+	struct bio *bio = ww->bio;
+	struct bvec_iter bi_iter = bio->bi_iter;
+	int ret = BLK_STS_OK;
+
+	kfree(ww);
+
+	mutex_lock(&bc->ranges_lock);
+	do {
+		ret = prepare_one_range(bc, &bi_iter);
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+	} while (!ret && bi_iter.bi_size);
+
+	mutex_unlock(&bc->ranges_lock);
+
+	if (!ret) {
+		bio_set_dev(bio, bc->dev->bdev);
+		submit_bio(bio);
+	} else {
+		DMERR("Write failure with error %d", -ret);
+		bio->bi_status = ret;
+		bio_endio(bio);
+	}
+}
+
+static int queue_write(struct bow_context *bc, struct bio *bio)
+{
+	struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
+					| __GFP_NOMEMALLOC | __GFP_NOWARN);
+	if (!ww) {
+		DMERR("Failed to allocate write_work");
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&ww->work, bow_write);
+	ww->bc = bc;
+	ww->bio = bio;
+	queue_work(bc->workqueue, &ww->work);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int handle_sector0(struct bow_context *bc, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+
+	if (bio->bi_iter.bi_size > bc->block_size) {
+		struct bio * split = bio_split(bio,
+					       bc->block_size >> SECTOR_SHIFT,
+					       GFP_NOIO,
+					       &fs_bio_set);
+		if (!split) {
+			DMERR("Failed to split bio");
+			bio->bi_status = BLK_STS_RESOURCE;
+			bio_endio(bio);
+			return DM_MAPIO_SUBMITTED;
+		}
+
+		bio_chain(split, bio);
+		split->bi_iter.bi_sector = bc->log_sector->sector0;
+		bio_set_dev(split, bc->dev->bdev);
+		submit_bio(split);
+
+		if (bio_data_dir(bio) == WRITE)
+			ret = queue_write(bc, bio);
+	} else {
+		bio->bi_iter.bi_sector = bc->log_sector->sector0;
+	}
+
+	return ret;
+}
+
+static int add_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("add_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, TRIMMED);
+			break;
+
+		case TRIMMED:
+			/* Nothing to do */
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	bio_endio(bio);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int remove_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("remove_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			/* Nothing to do */
+			break;
+
+		case TRIMMED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, UNCHANGED);
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	return DM_MAPIO_REMAPPED;
+}
+
+int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
+{
+	if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
+		bio->bi_status = BLK_STS_NOTSUPP;
+		bio_endio(bio);
+		return DM_MAPIO_SUBMITTED;
+	} else {
+		bio_set_dev(bio, bc->dev->bdev);
+		return DM_MAPIO_REMAPPED;
+	}
+}
+
+/****** dm interface ******/
+
+static int dm_bow_map(struct dm_target *ti, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+	struct bow_context *bc = ti->private;
+
+	if (likely(bc->state.counter == COMMITTED))
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (atomic_read(&bc->state) != COMMITTED) {
+		enum state state;
+
+		mutex_lock(&bc->ranges_lock);
+		state = atomic_read(&bc->state);
+		if (state == TRIM) {
+			if (bio_op(bio) == REQ_OP_DISCARD)
+				ret = add_trim(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = remove_trim(bc, bio);
+			else
+				/* pass-through */;
+		} else if (state == CHECKPOINT) {
+			if (bio->bi_iter.bi_sector == 0)
+				ret = handle_sector0(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = queue_write(bc, bio);
+			else
+				/* pass-through */;
+		} else {
+			/* pass-through */
+		}
+		mutex_unlock(&bc->ranges_lock);
+	}
+
+	if (ret == DM_MAPIO_REMAPPED)
+		return remap_unless_illegal_trim(bc, bio);
+
+	return ret;
+}
+
+static void dm_bow_tablestatus(struct dm_target *ti, char *result,
+			       unsigned int maxlen)
+{
+	char *end = result + maxlen;
+	struct bow_context *bc = ti->private;
+	struct rb_node *i;
+	int trimmed_list_length = 0;
+	int trimmed_range_count = 0;
+	struct bow_range *br;
+
+	if (maxlen == 0)
+		return;
+	result[0] = 0;
+
+	list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
+		if (br->type == TRIMMED) {
+			++trimmed_list_length;
+		} else {
+			scnprintf(result, end - result,
+				  "ERROR: non-trimmed entry in trimmed_list");
+			return;
+		}
+
+	if (!rb_first(&bc->ranges)) {
+		scnprintf(result, end - result, "ERROR: Empty ranges");
+		return;
+	}
+
+	if (container_of(rb_first(&bc->ranges), struct bow_range, node)
+	    ->sector) {
+		scnprintf(result, end - result,
+			 "ERROR: First range does not start at sector 0");
+		return;
+	}
+
+	for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
+		struct bow_range *br = container_of(i, struct bow_range, node);
+
+		result += scnprintf(result, end - result, "%s: %llu",
+				    readable_type[br->type],
+				    (unsigned long long)br->sector);
+		if (result >= end)
+			return;
+
+		result += scnprintf(result, end - result, "\n");
+		if (result >= end)
+			return;
+
+		if (br->type == TRIMMED)
+			++trimmed_range_count;
+
+		if (br->type == TOP) {
+			if (br->sector != ti->len) {
+				scnprintf(result, end - result,
+					 "\nERROR: Top sector is incorrect");
+			}
+
+			if (&br->node != rb_last(&bc->ranges)) {
+				scnprintf(result, end - result,
+					  "\nERROR: Top sector is not last");
+			}
+
+			break;
+		}
+
+		if (!rb_next(i)) {
+			scnprintf(result, end - result,
+				  "\nERROR: Last range not of type TOP");
+			return;
+		}
+
+		if (br->sector > range_top(br)) {
+			scnprintf(result, end - result,
+				  "\nERROR: sectors out of order");
+			return;
+		}
+	}
+
+	if (trimmed_range_count != trimmed_list_length)
+		scnprintf(result, end - result,
+			  "\nERROR: not all trimmed ranges in trimmed list");
+}
+
+static void dm_bow_status(struct dm_target *ti, status_type_t type,
+			  unsigned int status_flags, char *result,
+			  unsigned int maxlen)
+{
+	switch (type) {
+	case STATUSTYPE_INFO:
+	case STATUSTYPE_IMA:
+		if (maxlen)
+			result[0] = 0;
+		break;
+
+	case STATUSTYPE_TABLE:
+		dm_bow_tablestatus(ti, result, maxlen);
+		break;
+	}
+}
+
+int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+	struct bow_context *bc = ti->private;
+	struct dm_dev *dev = bc->dev;
+
+	*bdev = dev->bdev;
+	/* Only pass ioctls through if the device sizes match exactly. */
+	return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int dm_bow_iterate_devices(struct dm_target *ti,
+				  iterate_devices_callout_fn fn, void *data)
+{
+	struct bow_context *bc = ti->private;
+
+	return fn(ti, bc->dev, 0, ti->len, data);
+}
+
+static struct target_type bow_target = {
+	.name   = "bow",
+	.version = {1, 2, 0},
+	.features = DM_TARGET_PASSES_CRYPTO,
+	.module = THIS_MODULE,
+	.ctr    = dm_bow_ctr,
+	.dtr    = dm_bow_dtr,
+	.map    = dm_bow_map,
+	.status = dm_bow_status,
+	.prepare_ioctl  = dm_bow_prepare_ioctl,
+	.iterate_devices = dm_bow_iterate_devices,
+	.io_hints = dm_bow_io_hints,
+};
+
+int __init dm_bow_init(void)
+{
+	int r = dm_register_target(&bow_target);
+
+	if (r < 0)
+		DMERR("registering bow failed %d", r);
+	return r;
+}
+
+void dm_bow_exit(void)
+{
+	dm_unregister_target(&bow_target);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(dm_bow_init);
+module_exit(dm_bow_exit);
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
new file mode 100644
index 0000000..9b3af1e
--- /dev/null
+++ b/drivers/md/dm-default-key.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Google, Inc.
+ */
+
+#include <linux/blk-crypto.h>
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX		"default-key"
+
+static const struct dm_default_key_cipher {
+	const char *name;
+	enum blk_crypto_mode_num mode_num;
+	int key_size;
+} dm_default_key_ciphers[] = {
+	{
+		.name = "aes-xts-plain64",
+		.mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS,
+		.key_size = 64,
+	}, {
+		.name = "xchacha12,aes-adiantum-plain64",
+		.mode_num = BLK_ENCRYPTION_MODE_ADIANTUM,
+		.key_size = 32,
+	},
+};
+
+/**
+ * struct dm_default_c - private data of a default-key target
+ * @dev: the underlying device
+ * @start: starting sector of the range of @dev which this target actually maps.
+ *	   For this purpose a "sector" is 512 bytes.
+ * @cipher_string: the name of the encryption algorithm being used
+ * @iv_offset: starting offset for IVs.  IVs are generated as if the target were
+ *	       preceded by @iv_offset 512-byte sectors.
+ * @sector_size: crypto sector size in bytes (usually 4096)
+ * @sector_bits: log2(sector_size)
+ * @key: the encryption key to use
+ * @max_dun: the maximum DUN that may be used (computed from other params)
+ */
+struct default_key_c {
+	struct dm_dev *dev;
+	sector_t start;
+	const char *cipher_string;
+	u64 iv_offset;
+	unsigned int sector_size;
+	unsigned int sector_bits;
+	struct blk_crypto_key key;
+	enum blk_crypto_key_type key_type;
+	u64 max_dun;
+};
+
+static const struct dm_default_key_cipher *
+lookup_cipher(const char *cipher_string)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) {
+		if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0)
+			return &dm_default_key_ciphers[i];
+	}
+	return NULL;
+}
+
+static void default_key_dtr(struct dm_target *ti)
+{
+	struct default_key_c *dkc = ti->private;
+	int err;
+
+	if (dkc->dev) {
+		err = blk_crypto_evict_key(bdev_get_queue(dkc->dev->bdev),
+					   &dkc->key);
+		if (err && err != -ENOKEY)
+			DMWARN("Failed to evict crypto key: %d", err);
+		dm_put_device(ti, dkc->dev);
+	}
+	kfree_sensitive(dkc->cipher_string);
+	kfree_sensitive(dkc);
+}
+
+static int default_key_ctr_optional(struct dm_target *ti,
+				    unsigned int argc, char **argv)
+{
+	struct default_key_c *dkc = ti->private;
+	struct dm_arg_set as;
+	static const struct dm_arg _args[] = {
+		{0, 4, "Invalid number of feature args"},
+	};
+	unsigned int opt_params;
+	const char *opt_string;
+	bool iv_large_sectors = false;
+	char dummy;
+	int err;
+
+	as.argc = argc;
+	as.argv = argv;
+
+	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+	if (err)
+		return err;
+
+	while (opt_params--) {
+		opt_string = dm_shift_arg(&as);
+		if (!opt_string) {
+			ti->error = "Not enough feature arguments";
+			return -EINVAL;
+		}
+		if (!strcmp(opt_string, "allow_discards")) {
+			ti->num_discard_bios = 1;
+		} else if (sscanf(opt_string, "sector_size:%u%c",
+				  &dkc->sector_size, &dummy) == 1) {
+			if (dkc->sector_size < SECTOR_SIZE ||
+			    dkc->sector_size > 4096 ||
+			    !is_power_of_2(dkc->sector_size)) {
+				ti->error = "Invalid sector_size";
+				return -EINVAL;
+			}
+		} else if (!strcmp(opt_string, "iv_large_sectors")) {
+			iv_large_sectors = true;
+		} else if (!strcmp(opt_string, "wrappedkey_v0")) {
+			dkc->key_type = BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
+		} else {
+			ti->error = "Invalid feature arguments";
+			return -EINVAL;
+		}
+	}
+
+	/* dm-default-key doesn't implement iv_large_sectors=false. */
+	if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) {
+		ti->error = "iv_large_sectors must be specified";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Construct a default-key mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ *
+ * This syntax matches dm-crypt's, but lots of unneeded functionality has been
+ * removed.  Also, dm-default-key requires that the "iv_large_sectors" option be
+ * given whenever a non-default sector size is used.
+ */
+static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct default_key_c *dkc;
+	const struct dm_default_key_cipher *cipher;
+	u8 raw_key[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
+	unsigned int raw_key_size;
+	unsigned int dun_bytes;
+	unsigned long long tmpll;
+	char dummy;
+	int err;
+
+	if (argc < 5) {
+		ti->error = "Not enough arguments";
+		return -EINVAL;
+	}
+
+	dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
+	if (!dkc) {
+		ti->error = "Out of memory";
+		return -ENOMEM;
+	}
+	ti->private = dkc;
+	dkc->key_type = BLK_CRYPTO_KEY_TYPE_STANDARD;
+
+	/* <cipher> */
+	dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL);
+	if (!dkc->cipher_string) {
+		ti->error = "Out of memory";
+		err = -ENOMEM;
+		goto bad;
+	}
+	cipher = lookup_cipher(dkc->cipher_string);
+	if (!cipher) {
+		ti->error = "Unsupported cipher";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <key> */
+	raw_key_size = strlen(argv[1]);
+	if (raw_key_size > 2 * BLK_CRYPTO_MAX_ANY_KEY_SIZE ||
+	    raw_key_size % 2) {
+		ti->error = "Invalid keysize";
+		err = -EINVAL;
+		goto bad;
+	}
+	raw_key_size /= 2;
+	if (hex2bin(raw_key, argv[1], raw_key_size) != 0) {
+		ti->error = "Malformed key string";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <iv_offset> */
+	if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) {
+		ti->error = "Invalid iv_offset sector";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <dev_path> */
+	err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+			    &dkc->dev);
+	if (err) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	/* <start> */
+	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 ||
+	    tmpll != (sector_t)tmpll) {
+		ti->error = "Invalid start sector";
+		err = -EINVAL;
+		goto bad;
+	}
+	dkc->start = tmpll;
+
+	/* optional arguments */
+	dkc->sector_size = SECTOR_SIZE;
+	if (argc > 5) {
+		err = default_key_ctr_optional(ti, argc - 5, &argv[5]);
+		if (err)
+			goto bad;
+	}
+	dkc->sector_bits = ilog2(dkc->sector_size);
+	if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
+		ti->error = "Device size is not a multiple of sector_size";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	dkc->max_dun = (dkc->iv_offset + ti->len - 1) >>
+		       (dkc->sector_bits - SECTOR_SHIFT);
+	dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8);
+
+	err = blk_crypto_init_key(&dkc->key, raw_key, raw_key_size,
+				  dkc->key_type, cipher->mode_num,
+				  dun_bytes, dkc->sector_size);
+	if (err) {
+		ti->error = "Error initializing blk-crypto key";
+		goto bad;
+	}
+
+	err = blk_crypto_start_using_key(&dkc->key,
+					 bdev_get_queue(dkc->dev->bdev));
+	if (err) {
+		ti->error = "Error starting to use blk-crypto";
+		goto bad;
+	}
+
+	ti->num_flush_bios = 1;
+
+	err = 0;
+	goto out;
+
+bad:
+	default_key_dtr(ti);
+out:
+	memzero_explicit(raw_key, sizeof(raw_key));
+	return err;
+}
+
+static int default_key_map(struct dm_target *ti, struct bio *bio)
+{
+	const struct default_key_c *dkc = ti->private;
+	sector_t sector_in_target;
+	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 };
+
+	bio_set_dev(bio, dkc->dev->bdev);
+
+	/*
+	 * If the bio is a device-level request which doesn't target a specific
+	 * sector, there's nothing more to do.
+	 */
+	if (bio_sectors(bio) == 0)
+		return DM_MAPIO_REMAPPED;
+
+	/* Map the bio's sector to the underlying device. (512-byte sectors) */
+	sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector);
+	bio->bi_iter.bi_sector = dkc->start + sector_in_target;
+
+	/*
+	 * If the bio should skip dm-default-key (i.e. if it's for an encrypted
+	 * file's contents), or if it doesn't have any data (e.g. if it's a
+	 * DISCARD request), there's nothing more to do.
+	 */
+	if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio))
+		return DM_MAPIO_REMAPPED;
+
+	/*
+	 * Else, dm-default-key needs to set this bio's encryption context.
+	 * It must not already have one.
+	 */
+	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
+		return DM_MAPIO_KILL;
+
+	/* Calculate the DUN and enforce data-unit (crypto sector) alignment. */
+	dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */
+	if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1))
+		return DM_MAPIO_KILL;
+	dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */
+
+	/*
+	 * This check isn't necessary as we should have calculated max_dun
+	 * correctly, but be safe.
+	 */
+	if (WARN_ON_ONCE(dun[0] > dkc->max_dun))
+		return DM_MAPIO_KILL;
+
+	bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO);
+
+	return DM_MAPIO_REMAPPED;
+}
+
+static void default_key_status(struct dm_target *ti, status_type_t type,
+			       unsigned int status_flags, char *result,
+			       unsigned int maxlen)
+{
+	const struct default_key_c *dkc = ti->private;
+	unsigned int sz = 0;
+	int num_feature_args = 0;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+	case STATUSTYPE_IMA:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+		/* Omit the key for now. */
+		DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset,
+		       dkc->dev->name, (unsigned long long)dkc->start);
+
+		num_feature_args += !!ti->num_discard_bios;
+		if (dkc->sector_size != SECTOR_SIZE)
+			num_feature_args += 2;
+		if (dkc->key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+			num_feature_args += 1;
+		if (num_feature_args != 0) {
+			DMEMIT(" %d", num_feature_args);
+			if (ti->num_discard_bios)
+				DMEMIT(" allow_discards");
+			if (dkc->sector_size != SECTOR_SIZE) {
+				DMEMIT(" sector_size:%u", dkc->sector_size);
+				DMEMIT(" iv_large_sectors");
+			}
+			if (dkc->key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+				DMEMIT(" wrappedkey_v0");
+		}
+		break;
+	}
+}
+
+static int default_key_prepare_ioctl(struct dm_target *ti,
+				     struct block_device **bdev)
+{
+	const struct default_key_c *dkc = ti->private;
+	const struct dm_dev *dev = dkc->dev;
+
+	*bdev = dev->bdev;
+
+	/* Only pass ioctls through if the device sizes match exactly. */
+	if (dkc->start != 0 ||
+	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+		return 1;
+	return 0;
+}
+
+static int default_key_iterate_devices(struct dm_target *ti,
+				       iterate_devices_callout_fn fn,
+				       void *data)
+{
+	const struct default_key_c *dkc = ti->private;
+
+	return fn(ti, dkc->dev, dkc->start, ti->len, data);
+}
+
+static void default_key_io_hints(struct dm_target *ti,
+				 struct queue_limits *limits)
+{
+	const struct default_key_c *dkc = ti->private;
+	const unsigned int sector_size = dkc->sector_size;
+
+	limits->logical_block_size =
+		max_t(unsigned int, limits->logical_block_size, sector_size);
+	limits->physical_block_size =
+		max_t(unsigned int, limits->physical_block_size, sector_size);
+	limits->io_min = max_t(unsigned int, limits->io_min, sector_size);
+}
+
+static struct target_type default_key_target = {
+	.name			= "default-key",
+	.version		= {2, 1, 0},
+	.features		= DM_TARGET_PASSES_CRYPTO,
+	.module			= THIS_MODULE,
+	.ctr			= default_key_ctr,
+	.dtr			= default_key_dtr,
+	.map			= default_key_map,
+	.status			= default_key_status,
+	.prepare_ioctl		= default_key_prepare_ioctl,
+	.iterate_devices	= default_key_iterate_devices,
+	.io_hints		= default_key_io_hints,
+};
+
+static int __init dm_default_key_init(void)
+{
+	return dm_register_target(&default_key_target);
+}
+
+static void __exit dm_default_key_exit(void)
+{
+	dm_unregister_target(&default_key_target);
+}
+
+module_init(dm_default_key_init);
+module_exit(dm_default_key_exit);
+
+MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
+MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aa173f5..4ed13ed 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1237,6 +1237,70 @@
 	return args.err;
 }
 
+struct dm_derive_sw_secret_args {
+	const u8 *wrapped_key;
+	unsigned int wrapped_key_size;
+	u8 *secret;
+	int err;
+};
+
+static int dm_derive_sw_secret_callback(struct dm_target *ti,
+					struct dm_dev *dev, sector_t start,
+					sector_t len, void *data)
+{
+	struct dm_derive_sw_secret_args *args = data;
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	if (!args->err)
+		return 0;
+
+	args->err = blk_crypto_derive_sw_secret(q->crypto_profile,
+						args->wrapped_key,
+						args->wrapped_key_size,
+						args->secret);
+	/* Try another device in case this fails. */
+	return 0;
+}
+
+/*
+ * Retrieve the sw_secret from the underlying device.  Given that only one
+ * sw_secret can exist for a particular wrapped key, retrieve it only from the
+ * first device that supports derive_sw_secret().
+ */
+static int dm_derive_sw_secret(struct blk_crypto_profile *profile,
+			       const u8 *wrapped_key,
+			       unsigned int wrapped_key_size,
+			       u8 secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+	struct mapped_device *md =
+		container_of(profile, struct dm_crypto_profile, profile)->md;
+	struct dm_derive_sw_secret_args args = {
+		.wrapped_key = wrapped_key,
+		.wrapped_key_size = wrapped_key_size,
+		.secret = secret,
+		.err = -EOPNOTSUPP,
+	};
+	struct dm_table *t;
+	int srcu_idx;
+	int i;
+	struct dm_target *ti;
+
+	t = dm_get_live_table(md, &srcu_idx);
+	if (!t)
+		return -EOPNOTSUPP;
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+		if (!ti->type->iterate_devices)
+			continue;
+		ti->type->iterate_devices(ti, dm_derive_sw_secret_callback,
+					  &args);
+		if (!args.err)
+			break;
+	}
+	dm_put_live_table(md, srcu_idx);
+	return args.err;
+}
+
 static int
 device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
 				     sector_t start, sector_t len, void *data)
@@ -1293,9 +1357,11 @@
 	profile = &dmcp->profile;
 	blk_crypto_profile_init(profile, 0);
 	profile->ll_ops.keyslot_evict = dm_keyslot_evict;
+	profile->ll_ops.derive_sw_secret = dm_derive_sw_secret;
 	profile->max_dun_bytes_supported = UINT_MAX;
 	memset(profile->modes_supported, 0xFF,
 	       sizeof(profile->modes_supported));
+	profile->key_types_supported = ~0;
 
 	for (i = 0; i < dm_table_get_num_targets(t); i++) {
 		ti = dm_table_get_target(t, i);
diff --git a/drivers/md/dm-user.c b/drivers/md/dm-user.c
new file mode 100644
index 0000000..2ff224b
--- /dev/null
+++ b/drivers/md/dm-user.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Google, Inc
+ * Copyright (C) 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+ */
+
+#include <linux/device-mapper.h>
+#include <uapi/linux/dm-user.h>
+
+#include <linux/bio.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/uio.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define DM_MSG_PREFIX "user"
+
+#define MAX_OUTSTANDING_MESSAGES 128
+
+static unsigned int daemon_timeout_msec = 4000;
+module_param_named(dm_user_daemon_timeout_msec, daemon_timeout_msec, uint,
+		   0644);
+MODULE_PARM_DESC(dm_user_daemon_timeout_msec,
+		 "IO Timeout in msec if daemon does not process");
+
+/*
+ * dm-user uses four structures:
+ *
+ *  - "struct target", the outermost structure, corresponds to a single device
+ *    mapper target.  This contains the set of outstanding BIOs that have been
+ *    provided by DM and are not actively being processed by the user, along
+ *    with a misc device that userspace can open to communicate with the
+ *    kernel.  Each time userspaces opens the misc device a new channel is
+ *    created.
+ *  - "struct channel", which represents a single active communication channel
+ *    with userspace.  Userspace may choose arbitrary read/write sizes to use
+ *    when processing messages, channels form these into logical accesses.
+ *    When userspace responds to a full message the channel completes the BIO
+ *    and obtains a new message to process from the target.
+ *  - "struct message", which wraps a BIO with the additional information
+ *    required by the kernel to sort out what to do with BIOs when they return
+ *    from userspace.
+ *  - "struct dm_user_message", which is the exact message format that
+ *    userspace sees.
+ *
+ * The hot path contains three distinct operations:
+ *
+ *  - user_map(), which is provided a BIO from device mapper that is queued
+ *    into the target.  This allocates and enqueues a new message.
+ *  - dev_read(), which dequeues a message, copies it to userspace.
+ *  - dev_write(), which looks up a message (keyed by sequence number) and
+ *    completes the corresponding BIO.
+ *
+ * Lock ordering (outer to inner)
+ *
+ * 1) miscdevice's global lock.  This is held around dev_open, so it has to be
+ *    the outermost lock.
+ * 2) target->lock
+ * 3) channel->lock
+ */
+
+struct message {
+	/*
+	 * Messages themselves do not need a lock, they're protected by either
+	 * the target or channel's lock, depending on which can reference them
+	 * directly.
+	 */
+	struct dm_user_message msg;
+	struct bio *bio;
+	size_t posn_to_user;
+	size_t total_to_user;
+	size_t posn_from_user;
+	size_t total_from_user;
+
+	struct list_head from_user;
+	struct list_head to_user;
+
+	/*
+	 * These are written back from the user.  They live in the same spot in
+	 * the message, but we need to either keep the old values around or
+	 * call a bunch more BIO helpers.  These are only valid after write has
+	 * adopted the message.
+	 */
+	u64 return_type;
+	u64 return_flags;
+
+	struct delayed_work work;
+	bool delayed;
+	struct target *t;
+};
+
+struct target {
+	/*
+	 * A target has a single lock, which protects everything in the target
+	 * (but does not protect the channels associated with a target).
+	 */
+	struct mutex lock;
+
+	/*
+	 * There is only one point at which anything blocks: userspace blocks
+	 * reading a new message, which is woken up by device mapper providing
+	 * a new BIO to process (or tearing down the target).  The
+	 * corresponding write side doesn't block, instead we treat userspace's
+	 * response containing a message that has yet to be mapped as an
+	 * invalid operation.
+	 */
+	struct wait_queue_head wq;
+
+	/*
+	 * Messages are delivered to userspace in order, but may be returned
+	 * out of order.  This allows userspace to schedule IO if it wants to.
+	 */
+	mempool_t message_pool;
+	u64 next_seq_to_map;
+	u64 next_seq_to_user;
+	struct list_head to_user;
+
+	/*
+	 * There is a misc device per target.  The name is selected by
+	 * userspace (via a DM create ioctl argument), and each ends up in
+	 * /dev/dm-user/.  It looks like a better way to do this may be to have
+	 * a filesystem to manage these, but this was more expedient.  The
+	 * current mechanism is functional, but does result in an arbitrary
+	 * number of dynamically created misc devices.
+	 */
+	struct miscdevice miscdev;
+
+	/*
+	 * Device mapper's target destructor triggers tearing this all down,
+	 * but we can't actually free until every channel associated with this
+	 * target has been destroyed.  Channels each have a reference to their
+	 * target, and there is an additional single reference that corresponds
+	 * to both DM and the misc device (both of which are destroyed by DM).
+	 *
+	 * In the common case userspace will be asleep waiting for a new
+	 * message when device mapper decides to destroy the target, which
+	 * means no new messages will appear.  The destroyed flag triggers a
+	 * wakeup, which will end up removing the reference.
+	 */
+	struct kref references;
+	int dm_destroyed;
+	bool daemon_terminated;
+};
+
+struct channel {
+	struct target *target;
+
+	/*
+	 * A channel has a single lock, which prevents multiple reads (or
+	 * multiple writes) from conflicting with each other.
+	 */
+	struct mutex lock;
+
+	struct message *cur_to_user;
+	struct message *cur_from_user;
+	ssize_t to_user_error;
+	ssize_t from_user_error;
+
+	/*
+	 * Once a message has been forwarded to userspace on a channel it must
+	 * be responded to on the same channel.  This allows us to error out
+	 * the messages that have not yet been responded to by a channel when
+	 * that channel closes, which makes handling errors more reasonable for
+	 * fault-tolerant userspace daemons.  It also happens to make avoiding
+	 * shared locks between user_map() and dev_read() a lot easier.
+	 *
+	 * This does preclude a multi-threaded work stealing userspace
+	 * implementation (or at least, force a degree of head-of-line blocking
+	 * on the response path).
+	 */
+	struct list_head from_user;
+
+	/*
+	 * Responses from userspace can arrive in arbitrarily small chunks.
+	 * We need some place to buffer one up until we can find the
+	 * corresponding kernel-side message to continue processing, so instead
+	 * of allocating them we just keep one off to the side here.  This can
+	 * only ever be pointer to by from_user_cur, and will never have a BIO.
+	 */
+	struct message scratch_message_from_user;
+};
+
+static void message_kill(struct message *m, mempool_t *pool)
+{
+	m->bio->bi_status = BLK_STS_IOERR;
+	bio_endio(m->bio);
+	bio_put(m->bio);
+	mempool_free(m, pool);
+}
+
+static inline bool is_user_space_thread_present(struct target *t)
+{
+	lockdep_assert_held(&t->lock);
+	return (kref_read(&t->references) > 1);
+}
+
+static void process_delayed_work(struct work_struct *work)
+{
+	struct delayed_work *del_work = to_delayed_work(work);
+	struct message *msg = container_of(del_work, struct message, work);
+
+	struct target *t = msg->t;
+
+	mutex_lock(&t->lock);
+
+	/*
+	 * There is at least one thread to process the IO.
+	 */
+	if (is_user_space_thread_present(t)) {
+		mutex_unlock(&t->lock);
+		return;
+	}
+
+	/*
+	 * Terminate the IO with an error
+	 */
+	list_del(&msg->to_user);
+	pr_err("I/O error: sector %llu: no user-space daemon for %s target\n",
+	       msg->bio->bi_iter.bi_sector,
+	       t->miscdev.name);
+	message_kill(msg, &t->message_pool);
+	mutex_unlock(&t->lock);
+}
+
+static void enqueue_delayed_work(struct message *m, bool is_delay)
+{
+	unsigned long delay = 0;
+
+	m->delayed = true;
+	INIT_DELAYED_WORK(&m->work, process_delayed_work);
+
+	/*
+	 * Snapuserd daemon is the user-space process
+	 * which processes IO request from dm-user
+	 * when OTA is applied. Per the current design,
+	 * when a dm-user target is created, daemon
+	 * attaches to target and starts processing
+	 * the IO's. Daemon is terminated only when
+	 * dm-user target is destroyed.
+	 *
+	 * If for some reason, daemon crashes or terminates early,
+	 * without destroying the dm-user target; then
+	 * there is no mechanism to restart the daemon
+	 * and start processing the IO's from the same target.
+	 * Theoretically, it is possible but that infrastructure
+	 * doesn't exist in the android ecosystem.
+	 *
+	 * Thus, when the daemon terminates, there is no way the IO's
+	 * issued on that target will be processed. Hence,
+	 * we set the delay to 0 and fail the IO's immediately.
+	 *
+	 * On the other hand, when a new dm-user target is created,
+	 * we wait for the daemon to get attached for the first time.
+	 * This primarily happens when init first stage spins up
+	 * the daemon. At this point, since the snapshot device is mounted
+	 * of a root filesystem, dm-user target may receive IO request
+	 * even though daemon is not fully launched. We don't want
+	 * to fail those IO requests immediately. Thus, we queue these
+	 * requests with a timeout so that daemon is ready to process
+	 * those IO requests. Again, if the daemon fails to launch within
+	 * the timeout period, then IO's will be failed.
+	 */
+	if (is_delay)
+		delay = msecs_to_jiffies(daemon_timeout_msec);
+
+	queue_delayed_work(system_wq, &m->work, delay);
+}
+
+static inline struct target *target_from_target(struct dm_target *target)
+{
+	WARN_ON(target->private == NULL);
+	return target->private;
+}
+
+static inline struct target *target_from_miscdev(struct miscdevice *miscdev)
+{
+	return container_of(miscdev, struct target, miscdev);
+}
+
+static inline struct channel *channel_from_file(struct file *file)
+{
+	WARN_ON(file->private_data == NULL);
+	return file->private_data;
+}
+
+static inline struct target *target_from_channel(struct channel *c)
+{
+	WARN_ON(c->target == NULL);
+	return c->target;
+}
+
+static inline size_t bio_size(struct bio *bio)
+{
+	struct bio_vec bvec;
+	struct bvec_iter iter;
+	size_t out = 0;
+
+	bio_for_each_segment (bvec, bio, iter)
+		out += bio_iter_len(bio, iter);
+	return out;
+}
+
+static inline size_t bio_bytes_needed_to_user(struct bio *bio)
+{
+	switch (bio_op(bio)) {
+	case REQ_OP_WRITE:
+		return sizeof(struct dm_user_message) + bio_size(bio);
+	case REQ_OP_READ:
+	case REQ_OP_FLUSH:
+	case REQ_OP_DISCARD:
+	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_SAME:
+	case REQ_OP_WRITE_ZEROES:
+		return sizeof(struct dm_user_message);
+
+	/*
+	 * These ops are not passed to userspace under the assumption that
+	 * they're not going to be particularly useful in that context.
+	 */
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static inline size_t bio_bytes_needed_from_user(struct bio *bio)
+{
+	switch (bio_op(bio)) {
+	case REQ_OP_READ:
+		return sizeof(struct dm_user_message) + bio_size(bio);
+	case REQ_OP_WRITE:
+	case REQ_OP_FLUSH:
+	case REQ_OP_DISCARD:
+	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_SAME:
+	case REQ_OP_WRITE_ZEROES:
+		return sizeof(struct dm_user_message);
+
+	/*
+	 * These ops are not passed to userspace under the assumption that
+	 * they're not going to be particularly useful in that context.
+	 */
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static inline long bio_type_to_user_type(struct bio *bio)
+{
+	switch (bio_op(bio)) {
+	case REQ_OP_READ:
+		return DM_USER_REQ_MAP_READ;
+	case REQ_OP_WRITE:
+		return DM_USER_REQ_MAP_WRITE;
+	case REQ_OP_FLUSH:
+		return DM_USER_REQ_MAP_FLUSH;
+	case REQ_OP_DISCARD:
+		return DM_USER_REQ_MAP_DISCARD;
+	case REQ_OP_SECURE_ERASE:
+		return DM_USER_REQ_MAP_SECURE_ERASE;
+	case REQ_OP_WRITE_SAME:
+		return DM_USER_REQ_MAP_WRITE_SAME;
+	case REQ_OP_WRITE_ZEROES:
+		return DM_USER_REQ_MAP_WRITE_ZEROES;
+
+	/*
+	 * These ops are not passed to userspace under the assumption that
+	 * they're not going to be particularly useful in that context.
+	 */
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static inline long bio_flags_to_user_flags(struct bio *bio)
+{
+	u64 out = 0;
+	typeof(bio->bi_opf) opf = bio->bi_opf & ~REQ_OP_MASK;
+
+	if (opf & REQ_FAILFAST_DEV) {
+		opf &= ~REQ_FAILFAST_DEV;
+		out |= DM_USER_REQ_MAP_FLAG_FAILFAST_DEV;
+	}
+
+	if (opf & REQ_FAILFAST_TRANSPORT) {
+		opf &= ~REQ_FAILFAST_TRANSPORT;
+		out |= DM_USER_REQ_MAP_FLAG_FAILFAST_TRANSPORT;
+	}
+
+	if (opf & REQ_FAILFAST_DRIVER) {
+		opf &= ~REQ_FAILFAST_DRIVER;
+		out |= DM_USER_REQ_MAP_FLAG_FAILFAST_DRIVER;
+	}
+
+	if (opf & REQ_SYNC) {
+		opf &= ~REQ_SYNC;
+		out |= DM_USER_REQ_MAP_FLAG_SYNC;
+	}
+
+	if (opf & REQ_META) {
+		opf &= ~REQ_META;
+		out |= DM_USER_REQ_MAP_FLAG_META;
+	}
+
+	if (opf & REQ_PRIO) {
+		opf &= ~REQ_PRIO;
+		out |= DM_USER_REQ_MAP_FLAG_PRIO;
+	}
+
+	if (opf & REQ_NOMERGE) {
+		opf &= ~REQ_NOMERGE;
+		out |= DM_USER_REQ_MAP_FLAG_NOMERGE;
+	}
+
+	if (opf & REQ_IDLE) {
+		opf &= ~REQ_IDLE;
+		out |= DM_USER_REQ_MAP_FLAG_IDLE;
+	}
+
+	if (opf & REQ_INTEGRITY) {
+		opf &= ~REQ_INTEGRITY;
+		out |= DM_USER_REQ_MAP_FLAG_INTEGRITY;
+	}
+
+	if (opf & REQ_FUA) {
+		opf &= ~REQ_FUA;
+		out |= DM_USER_REQ_MAP_FLAG_FUA;
+	}
+
+	if (opf & REQ_PREFLUSH) {
+		opf &= ~REQ_PREFLUSH;
+		out |= DM_USER_REQ_MAP_FLAG_PREFLUSH;
+	}
+
+	if (opf & REQ_RAHEAD) {
+		opf &= ~REQ_RAHEAD;
+		out |= DM_USER_REQ_MAP_FLAG_RAHEAD;
+	}
+
+	if (opf & REQ_BACKGROUND) {
+		opf &= ~REQ_BACKGROUND;
+		out |= DM_USER_REQ_MAP_FLAG_BACKGROUND;
+	}
+
+	if (opf & REQ_NOWAIT) {
+		opf &= ~REQ_NOWAIT;
+		out |= DM_USER_REQ_MAP_FLAG_NOWAIT;
+	}
+
+	if (opf & REQ_NOUNMAP) {
+		opf &= ~REQ_NOUNMAP;
+		out |= DM_USER_REQ_MAP_FLAG_NOUNMAP;
+	}
+
+	if (unlikely(opf)) {
+		pr_warn("unsupported BIO type %x\n", opf);
+		return -EOPNOTSUPP;
+	}
+	WARN_ON(out < 0);
+	return out;
+}
+
+/*
+ * Not quite what's in blk-map.c, but instead what I thought the functions in
+ * blk-map did.  This one seems more generally useful and I think we could
+ * write the blk-map version in terms of this one.  The differences are that
+ * this has a return value that counts, and blk-map uses the BIO _all iters.
+ * Neither  advance the BIO iter but don't advance the IOV iter, which is a bit
+ * odd here.
+ */
+static ssize_t bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
+{
+	struct bio_vec bvec;
+	struct bvec_iter biter;
+	ssize_t out = 0;
+
+	bio_for_each_segment (bvec, bio, biter) {
+		ssize_t ret;
+
+		ret = copy_page_from_iter(bvec.bv_page, bvec.bv_offset,
+					  bvec.bv_len, iter);
+
+		/*
+		 * FIXME: I thought that IOV copies had a mechanism for
+		 * terminating early, if for example a signal came in while
+		 * sleeping waiting for a page to be mapped, but I don't see
+		 * where that would happen.
+		 */
+		WARN_ON(ret < 0);
+		out += ret;
+
+		if (!iov_iter_count(iter))
+			break;
+
+		if (ret < bvec.bv_len)
+			return ret;
+	}
+
+	return out;
+}
+
+static ssize_t bio_copy_to_iter(struct bio *bio, struct iov_iter *iter)
+{
+	struct bio_vec bvec;
+	struct bvec_iter biter;
+	ssize_t out = 0;
+
+	bio_for_each_segment (bvec, bio, biter) {
+		ssize_t ret;
+
+		ret = copy_page_to_iter(bvec.bv_page, bvec.bv_offset,
+					bvec.bv_len, iter);
+
+		/* as above */
+		WARN_ON(ret < 0);
+		out += ret;
+
+		if (!iov_iter_count(iter))
+			break;
+
+		if (ret < bvec.bv_len)
+			return ret;
+	}
+
+	return out;
+}
+
+static ssize_t msg_copy_to_iov(struct message *msg, struct iov_iter *to)
+{
+	ssize_t copied = 0;
+
+	if (!iov_iter_count(to))
+		return 0;
+
+	if (msg->posn_to_user < sizeof(msg->msg)) {
+		copied = copy_to_iter((char *)(&msg->msg) + msg->posn_to_user,
+				      sizeof(msg->msg) - msg->posn_to_user, to);
+	} else {
+		copied = bio_copy_to_iter(msg->bio, to);
+		if (copied > 0)
+			bio_advance(msg->bio, copied);
+	}
+
+	if (copied < 0)
+		return copied;
+
+	msg->posn_to_user += copied;
+	return copied;
+}
+
+static ssize_t msg_copy_from_iov(struct message *msg, struct iov_iter *from)
+{
+	ssize_t copied = 0;
+
+	if (!iov_iter_count(from))
+		return 0;
+
+	if (msg->posn_from_user < sizeof(msg->msg)) {
+		copied = copy_from_iter(
+			(char *)(&msg->msg) + msg->posn_from_user,
+			sizeof(msg->msg) - msg->posn_from_user, from);
+	} else {
+		copied = bio_copy_from_iter(msg->bio, from);
+		if (copied > 0)
+			bio_advance(msg->bio, copied);
+	}
+
+	if (copied < 0)
+		return copied;
+
+	msg->posn_from_user += copied;
+	return copied;
+}
+
+static struct message *msg_get_map(struct target *t)
+{
+	struct message *m;
+
+	lockdep_assert_held(&t->lock);
+
+	m = mempool_alloc(&t->message_pool, GFP_NOIO);
+	m->msg.seq = t->next_seq_to_map++;
+	INIT_LIST_HEAD(&m->to_user);
+	INIT_LIST_HEAD(&m->from_user);
+	return m;
+}
+
+static struct message *msg_get_to_user(struct target *t)
+{
+	struct message *m;
+
+	lockdep_assert_held(&t->lock);
+
+	if (list_empty(&t->to_user))
+		return NULL;
+
+	m = list_first_entry(&t->to_user, struct message, to_user);
+
+	list_del(&m->to_user);
+
+	/*
+	 * If the IO was queued to workqueue since there
+	 * was no daemon to service the IO, then we
+	 * will have to cancel the delayed work as the
+	 * IO will be processed by this user-space thread.
+	 *
+	 * If the delayed work was already picked up for
+	 * processing, then wait for it to complete. Note
+	 * that the IO will not be terminated by the work
+	 * queue thread.
+	 */
+	if (unlikely(m->delayed)) {
+		mutex_unlock(&t->lock);
+		cancel_delayed_work_sync(&m->work);
+		mutex_lock(&t->lock);
+	}
+	return m;
+}
+
+static struct message *msg_get_from_user(struct channel *c, u64 seq)
+{
+	struct message *m;
+	struct list_head *cur, *tmp;
+
+	lockdep_assert_held(&c->lock);
+
+	list_for_each_safe (cur, tmp, &c->from_user) {
+		m = list_entry(cur, struct message, from_user);
+		if (m->msg.seq == seq) {
+			list_del(&m->from_user);
+			return m;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Returns 0 when there is no work left to do.  This must be callable without
+ * holding the target lock, as it is part of the waitqueue's check expression.
+ * When called without the lock it may spuriously indicate there is remaining
+ * work, but when called with the lock it must be accurate.
+ */
+static int target_poll(struct target *t)
+{
+	return !list_empty(&t->to_user) || t->dm_destroyed;
+}
+
+static void target_release(struct kref *ref)
+{
+	struct target *t = container_of(ref, struct target, references);
+	struct list_head *cur, *tmp;
+
+	/*
+	 * There may be outstanding BIOs that have not yet been given to
+	 * userspace.  At this point there's nothing we can do about them, as
+	 * there are and will never be any channels.
+	 */
+	list_for_each_safe (cur, tmp, &t->to_user) {
+		struct message *m = list_entry(cur, struct message, to_user);
+
+		if (unlikely(m->delayed)) {
+			bool ret;
+
+			mutex_unlock(&t->lock);
+			ret = cancel_delayed_work_sync(&m->work);
+			mutex_lock(&t->lock);
+			if (!ret)
+				continue;
+		}
+		message_kill(m, &t->message_pool);
+	}
+
+	mempool_exit(&t->message_pool);
+	mutex_unlock(&t->lock);
+	mutex_destroy(&t->lock);
+	kfree(t);
+}
+
+static void target_put(struct target *t)
+{
+	/*
+	 * This both releases a reference to the target and the lock.  We leave
+	 * it up to the caller to hold the lock, as they probably needed it for
+	 * something else.
+	 */
+	lockdep_assert_held(&t->lock);
+
+	if (!kref_put(&t->references, target_release)) {
+		/*
+		 * User-space thread is getting terminated.
+		 * We need to scan the list for all those
+		 * pending IO's which were not processed yet
+		 * and put them back to work-queue for delayed
+		 * processing.
+		 */
+		if (!is_user_space_thread_present(t)) {
+			struct list_head *cur, *tmp;
+
+			list_for_each_safe(cur, tmp, &t->to_user) {
+				struct message *m = list_entry(cur,
+							       struct message,
+							       to_user);
+				if (!m->delayed)
+					enqueue_delayed_work(m, false);
+			}
+			/*
+			 * Daemon attached to this target is terminated.
+			 */
+			t->daemon_terminated = true;
+		}
+		mutex_unlock(&t->lock);
+	}
+}
+
+static struct channel *channel_alloc(struct target *t)
+{
+	struct channel *c;
+
+	lockdep_assert_held(&t->lock);
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (c == NULL)
+		return NULL;
+
+	kref_get(&t->references);
+	c->target = t;
+	c->cur_from_user = &c->scratch_message_from_user;
+	mutex_init(&c->lock);
+	INIT_LIST_HEAD(&c->from_user);
+	return c;
+}
+
+static void channel_free(struct channel *c)
+{
+	struct list_head *cur, *tmp;
+
+	lockdep_assert_held(&c->lock);
+
+	/*
+	 * There may be outstanding BIOs that have been given to userspace but
+	 * have not yet been completed.  The channel has been shut down so
+	 * there's no way to process the rest of those messages, so we just go
+	 * ahead and error out the BIOs.  Hopefully whatever's on the other end
+	 * can handle the errors.  One could imagine splitting the BIOs and
+	 * completing as much as we got, but that seems like overkill here.
+	 *
+	 * Our only other options would be to let the BIO hang around (which
+	 * seems way worse) or to resubmit it to userspace in the hope there's
+	 * another channel.  I don't really like the idea of submitting a
+	 * message twice.
+	 */
+	if (c->cur_to_user != NULL)
+		message_kill(c->cur_to_user, &c->target->message_pool);
+	if (c->cur_from_user != &c->scratch_message_from_user)
+		message_kill(c->cur_from_user, &c->target->message_pool);
+	list_for_each_safe (cur, tmp, &c->from_user)
+		message_kill(list_entry(cur, struct message, from_user),
+			     &c->target->message_pool);
+
+	mutex_lock(&c->target->lock);
+	target_put(c->target);
+	mutex_unlock(&c->lock);
+	mutex_destroy(&c->lock);
+	kfree(c);
+}
+
+static int dev_open(struct inode *inode, struct file *file)
+{
+	struct channel *c;
+	struct target *t;
+
+	/*
+	 * This is called by miscdev, which sets private_data to point to the
+	 * struct miscdevice that was opened.  The rest of our file operations
+	 * want to refer to the channel that's been opened, so we swap that
+	 * pointer out with a fresh channel.
+	 *
+	 * This is called with the miscdev lock held, which is also held while
+	 * registering/unregistering the miscdev.  The miscdev must be
+	 * registered for this to get called, which means there must be an
+	 * outstanding reference to the target, which means it cannot be freed
+	 * out from under us despite us not holding a reference yet.
+	 */
+	t = container_of(file->private_data, struct target, miscdev);
+	mutex_lock(&t->lock);
+	file->private_data = c = channel_alloc(t);
+
+	if (c == NULL) {
+		mutex_unlock(&t->lock);
+		return -ENOMEM;
+	}
+
+	mutex_unlock(&t->lock);
+	return 0;
+}
+
+static ssize_t dev_read(struct kiocb *iocb, struct iov_iter *to)
+{
+	struct channel *c = channel_from_file(iocb->ki_filp);
+	ssize_t total_processed = 0;
+	ssize_t processed;
+
+	mutex_lock(&c->lock);
+
+	if (unlikely(c->to_user_error)) {
+		total_processed = c->to_user_error;
+		goto cleanup_unlock;
+	}
+
+	if (c->cur_to_user == NULL) {
+		struct target *t = target_from_channel(c);
+
+		mutex_lock(&t->lock);
+
+		while (!target_poll(t)) {
+			int e;
+
+			mutex_unlock(&t->lock);
+			mutex_unlock(&c->lock);
+			e = wait_event_interruptible(t->wq, target_poll(t));
+			mutex_lock(&c->lock);
+			mutex_lock(&t->lock);
+
+			if (unlikely(e != 0)) {
+				/*
+				 * We haven't processed any bytes in either the
+				 * BIO or the IOV, so we can just terminate
+				 * right now.  Elsewhere in the kernel handles
+				 * restarting the syscall when appropriate.
+				 */
+				total_processed = e;
+				mutex_unlock(&t->lock);
+				goto cleanup_unlock;
+			}
+		}
+
+		if (unlikely(t->dm_destroyed)) {
+			/*
+			 * DM has destroyed this target, so just lock
+			 * the user out.  There's really nothing else
+			 * we can do here.  Note that we don't actually
+			 * tear any thing down until userspace has
+			 * closed the FD, as there may still be
+			 * outstanding BIOs.
+			 *
+			 * This is kind of a wacky error code to
+			 * return.  My goal was really just to try and
+			 * find something that wasn't likely to be
+			 * returned by anything else in the miscdev
+			 * path.  The message "block device required"
+			 * seems like a somewhat reasonable thing to
+			 * say when the target has disappeared out from
+			 * under us, but "not block" isn't sensible.
+			 */
+			c->to_user_error = total_processed = -ENOTBLK;
+			mutex_unlock(&t->lock);
+			goto cleanup_unlock;
+		}
+
+		/*
+		 * Ensures that accesses to the message data are not ordered
+		 * before the remote accesses that produce that message data.
+		 *
+		 * This pairs with the barrier in user_map(), via the
+		 * conditional within the while loop above. Also see the lack
+		 * of barrier in user_dtr(), which is why this can be after the
+		 * destroyed check.
+		 */
+		smp_rmb();
+
+		c->cur_to_user = msg_get_to_user(t);
+		WARN_ON(c->cur_to_user == NULL);
+		mutex_unlock(&t->lock);
+	}
+
+	processed = msg_copy_to_iov(c->cur_to_user, to);
+	total_processed += processed;
+
+	WARN_ON(c->cur_to_user->posn_to_user > c->cur_to_user->total_to_user);
+	if (c->cur_to_user->posn_to_user == c->cur_to_user->total_to_user) {
+		struct message *m = c->cur_to_user;
+
+		c->cur_to_user = NULL;
+		list_add_tail(&m->from_user, &c->from_user);
+	}
+
+cleanup_unlock:
+	mutex_unlock(&c->lock);
+	return total_processed;
+}
+
+static ssize_t dev_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	struct channel *c = channel_from_file(iocb->ki_filp);
+	ssize_t total_processed = 0;
+	ssize_t processed;
+
+	mutex_lock(&c->lock);
+
+	if (unlikely(c->from_user_error)) {
+		total_processed = c->from_user_error;
+		goto cleanup_unlock;
+	}
+
+	/*
+	 * cur_from_user can never be NULL.  If there's no real message it must
+	 * point to the scratch space.
+	 */
+	WARN_ON(c->cur_from_user == NULL);
+	if (c->cur_from_user->posn_from_user < sizeof(struct dm_user_message)) {
+		struct message *msg, *old;
+
+		processed = msg_copy_from_iov(c->cur_from_user, from);
+		if (processed <= 0) {
+			pr_warn("msg_copy_from_iov() returned %zu\n",
+				processed);
+			c->from_user_error = -EINVAL;
+			goto cleanup_unlock;
+		}
+		total_processed += processed;
+
+		/*
+		 * In the unlikely event the user has provided us a very short
+		 * write, not even big enough to fill a message, just succeed.
+		 * We'll eventually build up enough bytes to do something.
+		 */
+		if (unlikely(c->cur_from_user->posn_from_user <
+			     sizeof(struct dm_user_message)))
+			goto cleanup_unlock;
+
+		old = c->cur_from_user;
+		mutex_lock(&c->target->lock);
+		msg = msg_get_from_user(c, c->cur_from_user->msg.seq);
+		if (msg == NULL) {
+			pr_info("user provided an invalid messag seq of %llx\n",
+				old->msg.seq);
+			mutex_unlock(&c->target->lock);
+			c->from_user_error = -EINVAL;
+			goto cleanup_unlock;
+		}
+		mutex_unlock(&c->target->lock);
+
+		WARN_ON(old->posn_from_user != sizeof(struct dm_user_message));
+		msg->posn_from_user = sizeof(struct dm_user_message);
+		msg->return_type = old->msg.type;
+		msg->return_flags = old->msg.flags;
+		WARN_ON(msg->posn_from_user > msg->total_from_user);
+		c->cur_from_user = msg;
+		WARN_ON(old != &c->scratch_message_from_user);
+	}
+
+	/*
+	 * Userspace can signal an error for single requests by overwriting the
+	 * seq field.
+	 */
+	switch (c->cur_from_user->return_type) {
+	case DM_USER_RESP_SUCCESS:
+		c->cur_from_user->bio->bi_status = BLK_STS_OK;
+		break;
+	case DM_USER_RESP_ERROR:
+	case DM_USER_RESP_UNSUPPORTED:
+	default:
+		c->cur_from_user->bio->bi_status = BLK_STS_IOERR;
+		goto finish_bio;
+	}
+
+	/*
+	 * The op was a success as far as userspace is concerned, so process
+	 * whatever data may come along with it.  The user may provide the BIO
+	 * data in multiple chunks, in which case we don't need to finish the
+	 * BIO.
+	 */
+	processed = msg_copy_from_iov(c->cur_from_user, from);
+	total_processed += processed;
+
+	if (c->cur_from_user->posn_from_user <
+	    c->cur_from_user->total_from_user)
+		goto cleanup_unlock;
+
+finish_bio:
+	/*
+	 * When we set up this message the BIO's size matched the
+	 * message size, if that's not still the case then something
+	 * has gone off the rails.
+	 */
+	WARN_ON(bio_size(c->cur_from_user->bio) != 0);
+	bio_endio(c->cur_from_user->bio);
+	bio_put(c->cur_from_user->bio);
+
+	/*
+	 * We don't actually need to take the target lock here, as all
+	 * we're doing is freeing the message and mempools have their
+	 * own lock.  Each channel has its ows scratch message.
+	 */
+	WARN_ON(c->cur_from_user == &c->scratch_message_from_user);
+	mempool_free(c->cur_from_user, &c->target->message_pool);
+	c->scratch_message_from_user.posn_from_user = 0;
+	c->cur_from_user = &c->scratch_message_from_user;
+
+cleanup_unlock:
+	mutex_unlock(&c->lock);
+	return total_processed;
+}
+
+static int dev_release(struct inode *inode, struct file *file)
+{
+	struct channel *c;
+
+	c = channel_from_file(file);
+	mutex_lock(&c->lock);
+	channel_free(c);
+
+	return 0;
+}
+
+static const struct file_operations file_operations = {
+	.owner = THIS_MODULE,
+	.open = dev_open,
+	.llseek = no_llseek,
+	.read_iter = dev_read,
+	.write_iter = dev_write,
+	.release = dev_release,
+};
+
+static int user_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct target *t;
+	int r;
+
+	if (argc != 3) {
+		ti->error = "Invalid argument count";
+		r = -EINVAL;
+		goto cleanup_none;
+	}
+
+	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	if (t == NULL) {
+		r = -ENOMEM;
+		goto cleanup_none;
+	}
+	ti->private = t;
+
+	/* Enable more BIO types. */
+	ti->num_discard_bios = 1;
+	ti->discards_supported = true;
+	ti->num_flush_bios = 1;
+	ti->flush_supported = true;
+
+	/*
+	 * We begin with a single reference to the target, which is miscdev's
+	 * reference.  This ensures that the target won't be freed
+	 * until after the miscdev has been unregistered and all extant
+	 * channels have been closed.
+	 */
+	kref_init(&t->references);
+
+	t->daemon_terminated = false;
+	mutex_init(&t->lock);
+	init_waitqueue_head(&t->wq);
+	INIT_LIST_HEAD(&t->to_user);
+	mempool_init_kmalloc_pool(&t->message_pool, MAX_OUTSTANDING_MESSAGES,
+				  sizeof(struct message));
+
+	t->miscdev.minor = MISC_DYNAMIC_MINOR;
+	t->miscdev.fops = &file_operations;
+	t->miscdev.name = kasprintf(GFP_KERNEL, "dm-user/%s", argv[2]);
+	if (t->miscdev.name == NULL) {
+		r = -ENOMEM;
+		goto cleanup_message_pool;
+	}
+
+	/*
+	 * Once the miscdev is registered it can be opened and therefor
+	 * concurrent references to the channel can happen.  Holding the target
+	 * lock during misc_register() could deadlock.  If registration
+	 * succeeds then we will not access the target again so we just stick a
+	 * barrier here, which pairs with taking the target lock everywhere
+	 * else the target is accessed.
+	 *
+	 * I forgot where we ended up on the RCpc/RCsc locks.  IIU RCsc locks
+	 * would mean that we could take the target lock earlier and release it
+	 * here instead of the memory barrier.  I'm not sure that's any better,
+	 * though, and this isn't on a hot path so it probably doesn't matter
+	 * either way.
+	 */
+	smp_mb();
+
+	r = misc_register(&t->miscdev);
+	if (r) {
+		DMERR("Unable to register miscdev %s for dm-user",
+		      t->miscdev.name);
+		r = -ENOMEM;
+		goto cleanup_misc_name;
+	}
+
+	return 0;
+
+cleanup_misc_name:
+	kfree(t->miscdev.name);
+cleanup_message_pool:
+	mempool_exit(&t->message_pool);
+	kfree(t);
+cleanup_none:
+	return r;
+}
+
+static void user_dtr(struct dm_target *ti)
+{
+	struct target *t = target_from_target(ti);
+
+	/*
+	 * Removes the miscdev.  This must be called without the target lock
+	 * held to avoid a possible deadlock because our open implementation is
+	 * called holding the miscdev lock and must later take the target lock.
+	 *
+	 * There is no race here because only DM can register/unregister the
+	 * miscdev, and DM ensures that doesn't happen twice.  The internal
+	 * miscdev lock is sufficient to ensure there are no races between
+	 * deregistering the miscdev and open.
+	 */
+	misc_deregister(&t->miscdev);
+
+	/*
+	 * We are now free to take the target's lock and drop our reference to
+	 * the target.  There are almost certainly tasks sleeping in read on at
+	 * least one of the channels associated with this target, this
+	 * explicitly wakes them up and terminates the read.
+	 */
+	mutex_lock(&t->lock);
+	/*
+	 * No barrier here, as wait/wake ensures that the flag visibility is
+	 * correct WRT the wake/sleep state of the target tasks.
+	 */
+	t->dm_destroyed = true;
+	wake_up_all(&t->wq);
+	target_put(t);
+}
+
+/*
+ * Consumes a BIO from device mapper, queueing it up for userspace.
+ */
+static int user_map(struct dm_target *ti, struct bio *bio)
+{
+	struct target *t;
+	struct message *entry;
+
+	t = target_from_target(ti);
+	/*
+	 * FIXME
+	 *
+	 * This seems like a bad idea.  Specifically, here we're
+	 * directly on the IO path when we take the target lock, which may also
+	 * be taken from a user context.  The user context doesn't actively
+	 * trigger anything that may sleep while holding the lock, but this
+	 * still seems like a bad idea.
+	 *
+	 * The obvious way to fix this would be to use a proper queue, which
+	 * would result in no shared locks between the direct IO path and user
+	 * tasks.  I had a version that did this, but the head-of-line blocking
+	 * from the circular buffer resulted in us needing a fairly large
+	 * allocation in order to avoid situations in which the queue fills up
+	 * and everything goes off the rails.
+	 *
+	 * I could jump through a some hoops to avoid a shared lock while still
+	 * allowing for a large queue, but I'm not actually sure that allowing
+	 * for very large queues is the right thing to do here.  Intuitively it
+	 * seems better to keep the queues small in here (essentially sized to
+	 * the user latency for performance reasons only) and rely on returning
+	 * DM_MAPIO_REQUEUE regularly, as that would give the rest of the
+	 * kernel more information.
+	 *
+	 * I'll spend some time trying to figure out what's going on with
+	 * DM_MAPIO_REQUEUE, but if someone has a better idea of how to fix
+	 * this I'm all ears.
+	 */
+	mutex_lock(&t->lock);
+
+	/*
+	 * FIXME
+	 *
+	 * The assumption here is that there's no benefit to returning
+	 * DM_MAPIO_KILL as opposed to just erroring out the BIO, but I'm not
+	 * sure that's actually true -- for example, I could imagine users
+	 * expecting that submitted BIOs are unlikely to fail and therefor
+	 * relying on submission failure to indicate an unsupported type.
+	 *
+	 * There's two ways I can think of to fix this:
+	 *   - Add DM arguments that are parsed during the constructor that
+	 *     allow various dm_target flags to be set that indicate the op
+	 *     types supported by this target.  This may make sense for things
+	 *     like discard, where DM can already transform the BIOs to a form
+	 *     that's likely to be supported.
+	 *   - Some sort of pre-filter that allows userspace to hook in here
+	 *     and kill BIOs before marking them as submitted.  My guess would
+	 *     be that a userspace round trip is a bad idea here, but a BPF
+	 *     call seems resonable.
+	 *
+	 * My guess is that we'd likely want to do both.  The first one is easy
+	 * and gives DM the proper info, so it seems better.  The BPF call
+	 * seems overly complex for just this, but one could imagine wanting to
+	 * sometimes return _MAPPED and a BPF filter would be the way to do
+	 * that.
+	 *
+	 * For example, in Android we have an in-kernel DM device called
+	 * "dm-bow" that takes advange of some portion of the space that has
+	 * been discarded on a device to provide opportunistic block-level
+	 * backups.  While one could imagine just implementing this entirely in
+	 * userspace, that would come with an appreciable performance penalty.
+	 * Instead one could keep a BPF program that forwards most accesses
+	 * directly to the backing block device while informing a userspace
+	 * daemon of any discarded space and on writes to blocks that are to be
+	 * backed up.
+	 */
+	if (unlikely((bio_type_to_user_type(bio) < 0) ||
+		     (bio_flags_to_user_flags(bio) < 0))) {
+		mutex_unlock(&t->lock);
+		return DM_MAPIO_KILL;
+	}
+
+	entry = msg_get_map(t);
+	if (unlikely(entry == NULL)) {
+		mutex_unlock(&t->lock);
+		return DM_MAPIO_REQUEUE;
+	}
+
+	bio_get(bio);
+	entry->msg.type = bio_type_to_user_type(bio);
+	entry->msg.flags = bio_flags_to_user_flags(bio);
+	entry->msg.sector = bio->bi_iter.bi_sector;
+	entry->msg.len = bio_size(bio);
+	entry->bio = bio;
+	entry->posn_to_user = 0;
+	entry->total_to_user = bio_bytes_needed_to_user(bio);
+	entry->posn_from_user = 0;
+	entry->total_from_user = bio_bytes_needed_from_user(bio);
+	entry->delayed = false;
+	entry->t = t;
+	/* Pairs with the barrier in dev_read() */
+	smp_wmb();
+	list_add_tail(&entry->to_user, &t->to_user);
+
+	/*
+	 * If there is no daemon to process the IO's,
+	 * queue these messages into a workqueue with
+	 * a timeout.
+	 */
+	if (!is_user_space_thread_present(t))
+		enqueue_delayed_work(entry, !t->daemon_terminated);
+
+	wake_up_interruptible(&t->wq);
+	mutex_unlock(&t->lock);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static struct target_type user_target = {
+	.name = "user",
+	.version = { 1, 0, 0 },
+	.module = THIS_MODULE,
+	.ctr = user_ctr,
+	.dtr = user_dtr,
+	.map = user_map,
+};
+
+static int __init dm_user_init(void)
+{
+	int r;
+
+	r = dm_register_target(&user_target);
+	if (r) {
+		DMERR("register failed %d", r);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	return r;
+}
+
+static void __exit dm_user_exit(void)
+{
+	dm_unregister_target(&user_target);
+}
+
+module_init(dm_user_init);
+module_exit(dm_user_exit);
+MODULE_AUTHOR("Palmer Dabbelt <palmerdabbelt@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target returning blocks from userspace");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b0781265..f12b43f 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -65,8 +65,7 @@
 # Multimedia support - automatically enable V4L2 and DVB core
 #
 config MEDIA_CAMERA_SUPPORT
-	bool
-	prompt "Cameras and video grabbers" if MEDIA_SUPPORT_FILTER
+	bool "Cameras and video grabbers"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable support for webcams and video grabbers.
@@ -74,8 +73,7 @@
 	  Say Y when you have a webcam or a video capture grabber board.
 
 config MEDIA_ANALOG_TV_SUPPORT
-	bool
-	prompt "Analog TV" if MEDIA_SUPPORT_FILTER
+	bool "Analog TV"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable analog TV support.
@@ -88,8 +86,7 @@
 		will disable support for them.
 
 config MEDIA_DIGITAL_TV_SUPPORT
-	bool
-	prompt "Digital TV" if MEDIA_SUPPORT_FILTER
+	bool "Digital TV"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable digital TV support.
@@ -98,8 +95,7 @@
 	  hybrid digital TV and analog TV.
 
 config MEDIA_RADIO_SUPPORT
-	bool
-	prompt "AM/FM radio receivers/transmitters" if MEDIA_SUPPORT_FILTER
+	bool "AM/FM radio receivers/transmitters"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable AM/FM radio support.
@@ -114,8 +110,7 @@
 		disable support for them.
 
 config MEDIA_SDR_SUPPORT
-	bool
-	prompt "Software defined radio" if MEDIA_SUPPORT_FILTER
+	bool "Software defined radio"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable software defined radio support.
@@ -123,8 +118,7 @@
 	  Say Y when you have a software defined radio device.
 
 config MEDIA_PLATFORM_SUPPORT
-	bool
-	prompt "Platform-specific devices" if MEDIA_SUPPORT_FILTER
+	bool "Platform-specific devices"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable support for complex cameras, codecs, and other hardware
@@ -137,8 +131,7 @@
 	  Say Y when you want to be able to see such devices.
 
 config MEDIA_TEST_SUPPORT
-	bool
-	prompt "Test drivers" if MEDIA_SUPPORT_FILTER
+	bool "Test drivers"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Those drivers should not be used on production Kernels, but
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0f5a49f..a453f75 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -460,6 +460,21 @@
 	tristate
 	default MISC_RTSX_PCI || MISC_RTSX_USB
 
+config UID_SYS_STATS
+	bool "Per-UID statistics"
+	depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
+	help
+	  Per UID based cpu time statistics exported to /proc/uid_cputime
+	  Per UID based io statistics exported to /proc/uid_io
+	  Per UID based procstat control in /proc/uid_procstat
+
+config UID_SYS_STATS_DEBUG
+	bool "Per-TASK statistics"
+	depends on UID_SYS_STATS
+	default n
+	help
+	  Per TASK based io statistics exported to /proc/uid_io
+
 config HISI_HIKEY_USB
 	tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
 	depends on (OF && GPIOLIB) || COMPILE_TEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index a086197..dfc2c42 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -59,3 +59,4 @@
 obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
 obj-$(CONFIG_HISI_HIKEY_USB)	+= hisi_hikey_usb.o
 obj-$(CONFIG_HI6421V600_IRQ)	+= hi6421v600-irq.o
+obj-$(CONFIG_UID_SYS_STATS)	+= uid_sys_stats.o
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
new file mode 100644
index 0000000..4733670
--- /dev/null
+++ b/drivers/misc/uid_sys_stats.c
@@ -0,0 +1,706 @@
+/* drivers/misc/uid_sys_stats.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/rtmutex.h>
+#include <linux/sched/cputime.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+
+#define UID_HASH_BITS	10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_RT_MUTEX(uid_lock);
+static struct proc_dir_entry *cpu_parent;
+static struct proc_dir_entry *io_parent;
+static struct proc_dir_entry *proc_parent;
+
+struct io_stats {
+	u64 read_bytes;
+	u64 write_bytes;
+	u64 rchar;
+	u64 wchar;
+	u64 fsync;
+};
+
+#define UID_STATE_FOREGROUND	0
+#define UID_STATE_BACKGROUND	1
+#define UID_STATE_BUCKET_SIZE	2
+
+#define UID_STATE_TOTAL_CURR	2
+#define UID_STATE_TOTAL_LAST	3
+#define UID_STATE_DEAD_TASKS	4
+#define UID_STATE_SIZE		5
+
+#define MAX_TASK_COMM_LEN 256
+
+struct task_entry {
+	char comm[MAX_TASK_COMM_LEN];
+	pid_t pid;
+	struct io_stats io[UID_STATE_SIZE];
+	struct hlist_node hash;
+};
+
+struct uid_entry {
+	uid_t uid;
+	u64 utime;
+	u64 stime;
+	u64 active_utime;
+	u64 active_stime;
+	int state;
+	struct io_stats io[UID_STATE_SIZE];
+	struct hlist_node hash;
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
+#endif
+};
+
+static u64 compute_write_bytes(struct task_struct *task)
+{
+	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
+		return 0;
+
+	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
+}
+
+static void compute_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
+{
+	/* tasks could switch to another uid group, but its io_last in the
+	 * previous uid group could still be positive.
+	 * therefore before each update, do an overflow check first
+	 */
+	int64_t delta;
+
+	delta = io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->read_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->write_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->rchar += delta > 0 ? delta : 0;
+	delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->wchar += delta > 0 ? delta : 0;
+	delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
+	io_bucket->fsync += delta > 0 ? delta : 0;
+
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
+}
+
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+static void get_full_task_comm(struct task_entry *task_entry,
+		struct task_struct *task)
+{
+	int i = 0, offset = 0, len = 0;
+	/* save one byte for terminating null character */
+	int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
+	char buf[MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1];
+	struct mm_struct *mm = task->mm;
+
+	/* fill the first TASK_COMM_LEN bytes with thread name */
+	__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
+	i = strlen(task_entry->comm);
+	while (i < TASK_COMM_LEN)
+		task_entry->comm[i++] = ' ';
+
+	/* next the executable file name */
+	if (mm) {
+		mmap_write_lock(mm);
+		if (mm->exe_file) {
+			char *pathname = d_path(&mm->exe_file->f_path, buf,
+					unused_len);
+
+			if (!IS_ERR(pathname)) {
+				len = strlcpy(task_entry->comm + i, pathname,
+						unused_len);
+				i += len;
+				task_entry->comm[i++] = ' ';
+				unused_len--;
+			}
+		}
+		mmap_write_unlock(mm);
+	}
+	unused_len -= len;
+
+	/* fill the rest with command line argument
+	 * replace each null or new line character
+	 * between args in argv with whitespace */
+	len = get_cmdline(task, buf, unused_len);
+	while (offset < len) {
+		if (buf[offset] != '\0' && buf[offset] != '\n')
+			task_entry->comm[i++] = buf[offset];
+		else
+			task_entry->comm[i++] = ' ';
+		offset++;
+	}
+
+	/* get rid of trailing whitespaces in case when arg is memset to
+	 * zero before being reset in userspace
+	 */
+	while (task_entry->comm[i-1] == ' ')
+		i--;
+	task_entry->comm[i] = '\0';
+}
+
+static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+
+	hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
+			task->pid) {
+		if (task->pid == task_entry->pid) {
+			/* if thread name changed, update the entire command */
+			int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
+				- task_entry->comm;
+
+			if (strncmp(task_entry->comm, task->comm, len))
+				get_full_task_comm(task_entry, task);
+			return task_entry;
+		}
+	}
+	return NULL;
+}
+
+static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+	pid_t pid = task->pid;
+
+	task_entry = find_task_entry(uid_entry, task);
+	if (task_entry)
+		return task_entry;
+
+	task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
+	if (!task_entry)
+		return NULL;
+
+	get_full_task_comm(task_entry, task);
+
+	task_entry->pid = pid;
+	hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
+
+	return task_entry;
+}
+
+static void remove_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+	struct hlist_node *tmp_task;
+
+	hash_for_each_safe(uid_entry->task_entries, bkt_task,
+			tmp_task, task_entry, hash) {
+		hash_del(&task_entry->hash);
+		kfree(task_entry);
+	}
+}
+
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
+			sizeof(struct io_stats));
+	}
+}
+
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot)
+{
+	struct task_entry *task_entry = find_or_register_task(uid_entry, task);
+	struct io_stats *task_io_slot = &task_entry->io[slot];
+
+	task_io_slot->read_bytes += task->ioac.read_bytes;
+	task_io_slot->write_bytes += compute_write_bytes(task);
+	task_io_slot->rchar += task->ioac.rchar;
+	task_io_slot->wchar += task->ioac.wchar;
+	task_io_slot->fsync += task->ioac.syscfs;
+}
+
+static void compute_io_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		compute_io_bucket_stats(&task_entry->io[uid_entry->state],
+					&task_entry->io[UID_STATE_TOTAL_CURR],
+					&task_entry->io[UID_STATE_TOTAL_LAST],
+					&task_entry->io[UID_STATE_DEAD_TASKS]);
+	}
+}
+
+static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		/* Separated by comma because space exists in task comm */
+		seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
+				task_entry->comm,
+				(unsigned long)task_entry->pid,
+				task_entry->io[UID_STATE_FOREGROUND].rchar,
+				task_entry->io[UID_STATE_FOREGROUND].wchar,
+				task_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].rchar,
+				task_entry->io[UID_STATE_BACKGROUND].wchar,
+				task_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].fsync,
+				task_entry->io[UID_STATE_BACKGROUND].fsync);
+	}
+}
+#else
+static void remove_uid_tasks(struct uid_entry *uid_entry) {};
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot) {};
+static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
+static void show_io_uid_tasks(struct seq_file *m,
+		struct uid_entry *uid_entry) {}
+#endif
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	uid_entry = find_uid_entry(uid);
+	if (uid_entry)
+		return uid_entry;
+
+	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+	if (!uid_entry)
+		return NULL;
+
+	uid_entry->uid = uid;
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	hash_init(uid_entry->task_entries);
+#endif
+	hash_add(hash_table, &uid_entry->hash, uid);
+
+	return uid_entry;
+}
+
+static int uid_cputime_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry = NULL;
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+	u64 utime;
+	u64 stime;
+	unsigned long bkt;
+	uid_t uid;
+
+	rt_mutex_lock(&uid_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		uid_entry->active_stime = 0;
+		uid_entry->active_utime = 0;
+	}
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
+		if (!uid_entry) {
+			rcu_read_unlock();
+			rt_mutex_unlock(&uid_lock);
+			pr_err("%s: failed to find the uid_entry for uid %d\n",
+				__func__, uid);
+			return -ENOMEM;
+		}
+		/* avoid double accounting of dying threads */
+		if (!(task->flags & PF_EXITING)) {
+			task_cputime_adjusted(task, &utime, &stime);
+			uid_entry->active_utime += utime;
+			uid_entry->active_stime += stime;
+		}
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		u64 total_utime = uid_entry->utime +
+							uid_entry->active_utime;
+		u64 total_stime = uid_entry->stime +
+							uid_entry->active_stime;
+		seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+			ktime_to_us(total_utime), ktime_to_us(total_stime));
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_cputime_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_cputime_show, PDE_DATA(inode));
+}
+
+static const struct proc_ops uid_cputime_fops = {
+	.proc_open	= uid_cputime_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	struct hlist_node *tmp;
+	char uids[128];
+	char *start_uid, *end_uid = NULL;
+	long int uid_start = 0, uid_end = 0;
+
+	if (count >= sizeof(uids))
+		count = sizeof(uids) - 1;
+
+	if (copy_from_user(uids, buffer, count))
+		return -EFAULT;
+
+	uids[count] = '\0';
+	end_uid = uids;
+	start_uid = strsep(&end_uid, "-");
+
+	if (!start_uid || !end_uid)
+		return -EINVAL;
+
+	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+		kstrtol(end_uid, 10, &uid_end) != 0) {
+		return -EINVAL;
+	}
+
+	rt_mutex_lock(&uid_lock);
+
+	for (; uid_start <= uid_end; uid_start++) {
+		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+							hash, (uid_t)uid_start) {
+			if (uid_start == uid_entry->uid) {
+				remove_uid_tasks(uid_entry);
+				hash_del(&uid_entry->hash);
+				kfree(uid_entry);
+			}
+		}
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return count;
+}
+
+static const struct proc_ops uid_remove_fops = {
+	.proc_open	= uid_remove_open,
+	.proc_release	= single_release,
+	.proc_write	= uid_remove_write,
+};
+
+
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+			struct task_struct *task, int slot)
+{
+	struct io_stats *io_slot = &uid_entry->io[slot];
+
+	/* avoid double accounting of dying threads */
+	if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
+		return;
+
+	io_slot->read_bytes += task->ioac.read_bytes;
+	io_slot->write_bytes += compute_write_bytes(task);
+	io_slot->rchar += task->ioac.rchar;
+	io_slot->wchar += task->ioac.wchar;
+	io_slot->fsync += task->ioac.syscfs;
+
+	add_uid_tasks_io_stats(uid_entry, task, slot);
+}
+
+static void update_io_stats_all_locked(void)
+{
+	struct uid_entry *uid_entry = NULL;
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+	unsigned long bkt;
+	uid_t uid;
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+			sizeof(struct io_stats));
+		set_io_uid_tasks_zero(uid_entry);
+	}
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
+		if (!uid_entry)
+			continue;
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
+					&uid_entry->io[UID_STATE_TOTAL_CURR],
+					&uid_entry->io[UID_STATE_TOTAL_LAST],
+					&uid_entry->io[UID_STATE_DEAD_TASKS]);
+		compute_io_uid_tasks(uid_entry);
+	}
+}
+
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
+{
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+
+	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+		sizeof(struct io_stats));
+	set_io_uid_tasks_zero(uid_entry);
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
+			continue;
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
+				&uid_entry->io[UID_STATE_TOTAL_CURR],
+				&uid_entry->io[UID_STATE_TOTAL_LAST],
+				&uid_entry->io[UID_STATE_DEAD_TASKS]);
+	compute_io_uid_tasks(uid_entry);
+}
+
+
+static int uid_io_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry;
+	unsigned long bkt;
+
+	rt_mutex_lock(&uid_lock);
+
+	update_io_stats_all_locked();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+				uid_entry->uid,
+				uid_entry->io[UID_STATE_FOREGROUND].rchar,
+				uid_entry->io[UID_STATE_FOREGROUND].wchar,
+				uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].rchar,
+				uid_entry->io[UID_STATE_BACKGROUND].wchar,
+				uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].fsync,
+				uid_entry->io[UID_STATE_BACKGROUND].fsync);
+
+		show_io_uid_tasks(m, uid_entry);
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_io_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_io_show, PDE_DATA(inode));
+}
+
+static const struct proc_ops uid_io_fops = {
+	.proc_open	= uid_io_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+static int uid_procstat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_procstat_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	uid_t uid;
+	int argc, state;
+	char input[128];
+
+	if (count >= sizeof(input))
+		return -EINVAL;
+
+	if (copy_from_user(input, buffer, count))
+		return -EFAULT;
+
+	input[count] = '\0';
+
+	argc = sscanf(input, "%u %d", &uid, &state);
+	if (argc != 2)
+		return -EINVAL;
+
+	if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
+		return -EINVAL;
+
+	rt_mutex_lock(&uid_lock);
+
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		rt_mutex_unlock(&uid_lock);
+		return -EINVAL;
+	}
+
+	if (uid_entry->state == state) {
+		rt_mutex_unlock(&uid_lock);
+		return count;
+	}
+
+	update_io_stats_uid_locked(uid_entry);
+
+	uid_entry->state = state;
+
+	rt_mutex_unlock(&uid_lock);
+
+	return count;
+}
+
+static const struct proc_ops uid_procstat_fops = {
+	.proc_open	= uid_procstat_open,
+	.proc_release	= single_release,
+	.proc_write	= uid_procstat_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+			unsigned long cmd, void *v)
+{
+	struct task_struct *task = v;
+	struct uid_entry *uid_entry;
+	u64 utime, stime;
+	uid_t uid;
+
+	if (!task)
+		return NOTIFY_OK;
+
+	rt_mutex_lock(&uid_lock);
+	uid = from_kuid_munged(current_user_ns(), task_uid(task));
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		pr_err("%s: failed to find uid %d\n", __func__, uid);
+		goto exit;
+	}
+
+	task_cputime_adjusted(task, &utime, &stime);
+	uid_entry->utime += utime;
+	uid_entry->stime += stime;
+
+	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
+
+exit:
+	rt_mutex_unlock(&uid_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+	.notifier_call	= process_notifier,
+};
+
+static int __init proc_uid_sys_stats_init(void)
+{
+	hash_init(hash_table);
+
+	cpu_parent = proc_mkdir("uid_cputime", NULL);
+	if (!cpu_parent) {
+		pr_err("%s: failed to create uid_cputime proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("remove_uid_range", 0222, cpu_parent,
+		&uid_remove_fops, NULL);
+	proc_create_data("show_uid_stat", 0444, cpu_parent,
+		&uid_cputime_fops, NULL);
+
+	io_parent = proc_mkdir("uid_io", NULL);
+	if (!io_parent) {
+		pr_err("%s: failed to create uid_io proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("stats", 0444, io_parent,
+		&uid_io_fops, NULL);
+
+	proc_parent = proc_mkdir("uid_procstat", NULL);
+	if (!proc_parent) {
+		pr_err("%s: failed to create uid_procstat proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("set", 0222, proc_parent,
+		&uid_procstat_fops, NULL);
+
+	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+	return 0;
+
+err:
+	remove_proc_subtree("uid_cputime", NULL);
+	remove_proc_subtree("uid_io", NULL);
+	remove_proc_subtree("uid_procstat", NULL);
+	return -ENOMEM;
+}
+
+early_initcall(proc_uid_sys_stats_init);
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index d5f4b69..6652982 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -210,6 +210,8 @@
 	/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
 	profile->max_dun_bytes_supported = 4;
 
+	profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
+
 	/*
 	 * Cache all the crypto capabilities and advertise the supported crypto
 	 * modes and data unit sizes to the block layer.
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 514f2c1..3cfd990 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -14,6 +14,7 @@
 #include <linux/etherdevice.h>
 #include <linux/math64.h>
 #include <linux/module.h>
+#include <net/virt_wifi.h>
 
 static struct wiphy *common_wiphy;
 
@@ -21,6 +22,7 @@
 	struct delayed_work scan_result;
 	struct cfg80211_scan_request *scan_request;
 	bool being_deleted;
+	struct virt_wifi_network_simulation *network_simulation;
 };
 
 static struct ieee80211_channel channel_2ghz = {
@@ -172,6 +174,9 @@
 
 	priv->scan_request = request;
 	schedule_delayed_work(&priv->scan_result, HZ * 2);
+	if (priv->network_simulation &&
+	    priv->network_simulation->notify_scan_trigger)
+		priv->network_simulation->notify_scan_trigger(wiphy, request);
 
 	return 0;
 }
@@ -187,6 +192,12 @@
 
 	virt_wifi_inform_bss(wiphy);
 
+	if(priv->network_simulation &&
+	   priv->network_simulation->generate_virt_scan_result) {
+		if(priv->network_simulation->generate_virt_scan_result(wiphy))
+			wiphy_err(wiphy, "Fail to generater the simulated scan result.\n");
+	}
+
 	/* Schedules work which acquires and releases the rtnl lock. */
 	cfg80211_scan_done(priv->scan_request, &scan_info);
 	priv->scan_request = NULL;
@@ -378,6 +389,8 @@
 	priv = wiphy_priv(wiphy);
 	priv->being_deleted = false;
 	priv->scan_request = NULL;
+	priv->network_simulation = NULL;
+
 	INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result);
 
 	err = wiphy_register(wiphy);
@@ -393,7 +406,6 @@
 static void virt_wifi_destroy_wiphy(struct wiphy *wiphy)
 {
 	struct virt_wifi_wiphy_priv *priv;
-
 	WARN(!wiphy, "%s called with null wiphy", __func__);
 	if (!wiphy)
 		return;
@@ -427,8 +439,13 @@
 static int virt_wifi_net_device_open(struct net_device *dev)
 {
 	struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
-
+	struct virt_wifi_wiphy_priv *w_priv;
 	priv->is_up = true;
+	w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+	if(w_priv->network_simulation &&
+	   w_priv->network_simulation->notify_device_open)
+		w_priv->network_simulation->notify_device_open(dev);
+
 	return 0;
 }
 
@@ -436,16 +453,22 @@
 static int virt_wifi_net_device_stop(struct net_device *dev)
 {
 	struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev);
+	struct virt_wifi_wiphy_priv *w_priv;
 
 	n_priv->is_up = false;
 
 	if (!dev->ieee80211_ptr)
 		return 0;
+	w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
 
 	virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
 	virt_wifi_cancel_connect(dev);
 	netif_carrier_off(dev);
 
+	if (w_priv->network_simulation &&
+	    w_priv->network_simulation->notify_device_stop)
+		w_priv->network_simulation->notify_device_stop(dev);
+
 	return 0;
 }
 
@@ -688,6 +711,27 @@
 	unregister_netdevice_notifier(&virt_wifi_notifier);
 }
 
+int virt_wifi_register_network_simulation
+	(struct virt_wifi_network_simulation *ops)
+{
+	struct virt_wifi_wiphy_priv *priv = wiphy_priv(common_wiphy);
+	if (priv->network_simulation)
+		return -EEXIST;
+	priv->network_simulation = ops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(virt_wifi_register_network_simulation);
+
+int virt_wifi_unregister_network_simulation(void)
+{
+	struct virt_wifi_wiphy_priv *priv = wiphy_priv(common_wiphy);
+	if(!priv->network_simulation)
+		return -ENODATA;
+	priv->network_simulation = NULL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(virt_wifi_unregister_network_simulation);
+
 module_init(virt_wifi_init_module);
 module_exit(virt_wifi_cleanup_module);
 
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index bdca352..41c144d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1124,16 +1124,40 @@
 	return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data)
 {
-	int l;
-	const char *p;
+	int l = 0;
+	const char *p = NULL;
 	const void *rng_seed;
+	char *cmdline = data;
 
 	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-	if (depth != 1 || !data ||
+	if (depth != 1 || !cmdline ||
 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
 		return 0;
 
@@ -1141,28 +1165,28 @@
 	early_init_dt_check_for_elfcorehdr(node);
 	early_init_dt_check_for_usable_mem_range(node);
 
-	/* Retrieve command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
+	/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+	if (overwrite_incoming_cmdline || !cmdline[0])
+		strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
 
-	/*
-	 * CONFIG_CMDLINE is meant to be a default in case nothing else
-	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-	 * is set in which case we override whatever was found earlier.
-	 */
-#ifdef CONFIG_CMDLINE
-#if defined(CONFIG_CMDLINE_EXTEND)
-	strlcat(data, " ", COMMAND_LINE_SIZE);
-	strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#elif defined(CONFIG_CMDLINE_FORCE)
-	strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#else
-	/* No arguments from boot loader, use kernel's  cmdl*/
-	if (!((char *)data)[0])
-		strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
-#endif /* CONFIG_CMDLINE */
+	/* Retrieve command line unless forcing */
+	if (read_dt_cmdline)
+		p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+	if (p != NULL && l > 0) {
+		if (concat_cmdline) {
+			int cmdline_len;
+			int copy_len;
+			strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+			cmdline_len = strlen(cmdline);
+			copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+			copy_len = min((int)l, copy_len);
+			strncpy(cmdline + cmdline_len, p, copy_len);
+			cmdline[cmdline_len + copy_len] = '\0';
+		} else {
+			strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+		}
+	}
 
 	pr_debug("Command line is: %s\n", (char *)data);
 
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index fbded24..a5d2d92 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -37,9 +37,7 @@
 DTC_FLAGS_testcases += -@
 
 # suppress warnings about intentional errors
-DTC_FLAGS_testcases += -Wno-interrupts_property \
-	-Wno-node_name_vs_property_name \
-	-Wno-interrupt_map
+DTC_FLAGS_testcases += -Wno-interrupts_property
 
 # Apply overlays statically with fdtoverlay.  This is a build time test that
 # the overlays can be applied successfully by fdtoverlay.  This does not
@@ -84,10 +82,6 @@
 
 apply_static_overlay_2 := overlay.dtbo
 
-DTC_FLAGS_static_base_1 += -Wno-interrupts_property \
-	-Wno-node_name_vs_property_name \
-	-Wno-interrupt_map
-
 static_test_1-dtbs := static_base_1.dtb $(apply_static_overlay_1)
 static_test_2-dtbs := static_base_2.dtb $(apply_static_overlay_2)
 
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index f4755f3..db2495b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -83,6 +83,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
 
 /* Chained MSI interrupt service routine */
 static void dw_chained_msi_isr(struct irq_desc *desc)
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index fc12a4f..efb43fc 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -32,6 +32,13 @@
 
 static struct device_type power_supply_dev_type;
 
+struct match_device_node_array_param {
+	struct device_node *parent_of_node;
+	struct power_supply **psy;
+	ssize_t psy_size;
+	ssize_t psy_count;
+};
+
 #define POWER_SUPPLY_DEFERRED_REGISTER_TIME	msecs_to_jiffies(10)
 
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
@@ -522,6 +529,77 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
 
+static int power_supply_match_device_node_array(struct device *dev,
+						void *data)
+{
+	struct match_device_node_array_param *param =
+		(struct match_device_node_array_param *)data;
+	struct power_supply **psy = param->psy;
+	ssize_t size = param->psy_size;
+	ssize_t *count = &param->psy_count;
+
+	if (!dev->parent || dev->parent->of_node != param->parent_of_node)
+		return 0;
+
+	if (*count >= size)
+		return -EOVERFLOW;
+
+	psy[*count] = dev_get_drvdata(dev);
+	atomic_inc(&psy[*count]->use_cnt);
+	(*count)++;
+
+	return 0;
+}
+
+/**
+ * power_supply_get_by_phandle_array() - Similar to
+ * power_supply_get_by_phandle but returns an array of power supply
+ * objects which are associated with the phandle.
+ * @np: Pointer to device node holding phandle property.
+ * @property: Name of property holding a power supply name.
+ * @psy: Array of power_supply pointers provided by the client which is
+ * filled by power_supply_get_by_phandle_array.
+ * @size: size of power_supply pointer array.
+ *
+ * If power supply was found, it increases reference count for the
+ * internal power supply's device. The user should power_supply_put()
+ * after usage.
+ *
+ * Return: On success returns the number of power supply objects filled
+ * in the @psy array.
+ * -EOVERFLOW when size of @psy array is not suffice.
+ * -EINVAL when @psy is NULL or @size is 0.
+ * -ENODEV when matching device_node is not found.
+ */
+int power_supply_get_by_phandle_array(struct device_node *np,
+				      const char *property,
+				      struct power_supply **psy,
+				      ssize_t size)
+{
+	struct device_node *power_supply_np;
+	int ret;
+	struct match_device_node_array_param param;
+
+	if (!psy || !size)
+		return -EINVAL;
+
+	power_supply_np = of_parse_phandle(np, property, 0);
+	if (!power_supply_np)
+		return -ENODEV;
+
+	param.parent_of_node = power_supply_np;
+	param.psy = psy;
+	param.psy_size = size;
+	param.psy_count = 0;
+	ret = class_for_each_device(power_supply_class, NULL, &param,
+				    power_supply_match_device_node_array);
+
+	of_node_put(power_supply_np);
+
+	return param.psy_count;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_by_phandle_array);
+
 static void devm_power_supply_put(struct device *dev, void *res)
 {
 	struct power_supply **psy = res;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index c3d7cbc..64fd5d3 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -89,6 +89,7 @@
 	[POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE]	= "Adaptive",
 	[POWER_SUPPLY_CHARGE_TYPE_CUSTOM]	= "Custom",
 	[POWER_SUPPLY_CHARGE_TYPE_LONGLIFE]	= "Long Life",
+	[POWER_SUPPLY_CHARGE_TYPE_TAPER_EXT]	= "Taper",
 };
 
 static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 5c405ff..f59e569 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -9,6 +9,8 @@
 #include "ufs.h"
 #include "ufs-sysfs.h"
 
+#include <trace/hooks/ufshcd.h>
+
 static const char *ufshcd_uic_link_state_to_string(
 			enum uic_link_state state)
 {
@@ -1250,15 +1252,19 @@
 	.attrs = ufs_sysfs_lun_attributes,
 };
 
-void ufs_sysfs_add_nodes(struct device *dev)
+void ufs_sysfs_add_nodes(struct ufs_hba *hba)
 {
 	int ret;
 
-	ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
-	if (ret)
-		dev_err(dev,
+	ret = sysfs_create_groups(&hba->dev->kobj, ufs_sysfs_groups);
+	if (ret) {
+		dev_err(hba->dev,
 			"%s: sysfs groups creation failed (err = %d)\n",
 			__func__, ret);
+		return;
+	}
+
+	trace_android_vh_ufs_update_sysfs(hba);
 }
 
 void ufs_sysfs_remove_nodes(struct device *dev)
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index 0f4e750..ec7b0c8 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -9,7 +9,7 @@
 
 #include "ufshcd.h"
 
-void ufs_sysfs_add_nodes(struct device *dev);
+void ufs_sysfs_add_nodes(struct ufs_hba *hba);
 void ufs_sysfs_remove_nodes(struct device *dev);
 
 extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index 67402ba..493dd1c 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -123,6 +123,10 @@
 
 	/* Reset might clear all keys, so reprogram all the keys. */
 	blk_crypto_reprogram_all_keys(&hba->crypto_profile);
+
+	if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE)
+		return false;
+
 	return true;
 }
 
@@ -159,6 +163,9 @@
 	int err = 0;
 	enum blk_crypto_mode_num blk_mode_num;
 
+	if (hba->quirks & UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE)
+		return 0;
+
 	/*
 	 * Don't use crypto if either the hardware doesn't advertise the
 	 * standard crypto capability bit *or* if the vendor specific driver
@@ -190,6 +197,7 @@
 	hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
 	/* UFS only supports 8 bytes for any DUN */
 	hba->crypto_profile.max_dun_bytes_supported = 8;
+	hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
 	hba->crypto_profile.dev = hba->dev;
 
 	/*
@@ -228,9 +236,10 @@
 	if (!(hba->caps & UFSHCD_CAP_CRYPTO))
 		return;
 
-	/* Clear all keyslots - the number of keyslots is (CFGC + 1) */
-	for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
-		ufshcd_clear_keyslot(hba, slot);
+	/* Clear all keyslots */
+	for (slot = 0; slot < hba->crypto_profile.num_slots; slot++)
+		hba->crypto_profile.ll_ops.keyslot_evict(&hba->crypto_profile,
+							 NULL, slot);
 }
 
 void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
index e18c012..ae74555 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/scsi/ufs/ufshcd-crypto.h
@@ -34,6 +34,19 @@
 	}
 }
 
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+					    struct ufshcd_lrb *lrbp)
+{
+	if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
+		return;
+
+	if (!(scsi_cmd_to_rq(lrbp->cmd)->crypt_ctx))
+		return;
+
+	memzero_explicit(lrbp->ucd_prdt_ptr,
+			 hba->sg_entry_size * scsi_sg_count(lrbp->cmd));
+}
+
 bool ufshcd_crypto_enable(struct ufs_hba *hba);
 
 int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
@@ -51,6 +64,9 @@
 ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
 				   u32 *dword_1, u32 *dword_3) { }
 
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+					    struct ufshcd_lrb *lrbp) { }
+
 static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
 {
 	return false;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 13c09db..855f9b9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -31,6 +31,9 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
 
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/ufshcd.h>
+
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
 				 UFSHCD_ERROR_MASK)
@@ -328,6 +331,8 @@
 {
 	struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
 
+	trace_android_vh_ufs_send_tm_command(hba, tag, (int)str_t);
+
 	if (!trace_ufshcd_upiu_enabled())
 		return;
 
@@ -349,6 +354,8 @@
 {
 	u32 cmd;
 
+	trace_android_vh_ufs_send_uic_command(hba, ucmd, (int)str_t);
+
 	if (!trace_ufshcd_uic_command_enabled())
 		return;
 
@@ -506,7 +513,7 @@
 		prdt_length = le16_to_cpu(
 			lrbp->utr_descriptor_ptr->prd_table_length);
 		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
-			prdt_length /= sizeof(struct ufshcd_sg_entry);
+			prdt_length /= hba->sg_entry_size;
 
 		dev_err(hba->dev,
 			"UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
@@ -515,7 +522,7 @@
 
 		if (pr_prdt)
 			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
-				sizeof(struct ufshcd_sg_entry) * prdt_length);
+				hba->sg_entry_size * prdt_length);
 	}
 }
 
@@ -2099,6 +2106,7 @@
 
 	lrbp->issue_time_stamp = ktime_get();
 	lrbp->compl_time_stamp = ktime_set(0, 0);
+	trace_android_vh_ufs_send_command(hba, lrbp);
 	ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
 	ufshcd_clk_scaling_start_busy(hba);
 	if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -2358,11 +2366,12 @@
  */
 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 {
-	struct ufshcd_sg_entry *prd_table;
+	struct ufshcd_sg_entry *prd;
 	struct scatterlist *sg;
 	struct scsi_cmnd *cmd;
 	int sg_segments;
 	int i;
+	int err;
 
 	cmd = lrbp->cmd;
 	sg_segments = scsi_dma_map(cmd);
@@ -2373,13 +2382,12 @@
 
 		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
 			lrbp->utr_descriptor_ptr->prd_table_length =
-				cpu_to_le16((sg_segments *
-					sizeof(struct ufshcd_sg_entry)));
+				cpu_to_le16(sg_segments * hba->sg_entry_size);
 		else
 			lrbp->utr_descriptor_ptr->prd_table_length =
 				cpu_to_le16(sg_segments);
 
-		prd_table = lrbp->ucd_prdt_ptr;
+		prd = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
 
 		scsi_for_each_sg(cmd, sg, sg_segments, i) {
 			const unsigned int len = sg_dma_len(sg);
@@ -2393,15 +2401,18 @@
 			 * indicates 4 bytes, '7' indicates 8 bytes, etc."
 			 */
 			WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
-			prd_table[i].size = cpu_to_le32(len - 1);
-			prd_table[i].addr = cpu_to_le64(sg->dma_address);
-			prd_table[i].reserved = 0;
+			prd->size = cpu_to_le32(len - 1);
+			prd->addr = cpu_to_le64(sg->dma_address);
+			prd->reserved = 0;
+			prd = (void *)prd + hba->sg_entry_size;
 		}
 	} else {
 		lrbp->utr_descriptor_ptr->prd_table_length = 0;
 	}
 
-	return 0;
+	err = 0;
+	trace_android_vh_ufs_fill_prdt(hba, lrbp, sg_segments, &err);
+	return err;
 }
 
 /**
@@ -2663,10 +2674,11 @@
 
 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
 {
-	struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+	struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+		i * sizeof_utp_transfer_cmd_desc(hba);
 	struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
 	dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
-		i * sizeof(struct utp_transfer_cmd_desc);
+		i * sizeof_utp_transfer_cmd_desc(hba);
 	u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
 				       response_upiu);
 	u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2674,11 +2686,11 @@
 	lrb->utr_descriptor_ptr = utrdlp + i;
 	lrb->utrd_dma_addr = hba->utrdl_dma_addr +
 		i * sizeof(struct utp_transfer_req_desc);
-	lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+	lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
 	lrb->ucd_req_dma_addr = cmd_desc_element_addr;
-	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
 	lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
-	lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
+	lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
 	lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
 }
 
@@ -2765,6 +2777,14 @@
 
 	ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
 
+	trace_android_vh_ufs_prepare_command(hba, scsi_cmd_to_rq(cmd), lrbp,
+					     &err);
+	if (err) {
+		lrbp->cmd = NULL;
+		ufshcd_release(hba);
+		goto out;
+	}
+
 	lrbp->req_abort_skip = false;
 
 	ufshpb_prep(hba, lrbp);
@@ -3007,7 +3027,7 @@
 	(*request)->upiu_req.selector = selector;
 }
 
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
 	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
 {
 	int ret;
@@ -3029,6 +3049,7 @@
 			__func__, opcode, idn, ret, retries);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_flag_retry);
 
 /**
  * ufshcd_query_flag() - API function for sending flag query requests
@@ -3097,6 +3118,7 @@
 	ufshcd_release(hba);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_flag);
 
 /**
  * ufshcd_query_attr - API function for sending attribute requests
@@ -3160,6 +3182,7 @@
 	ufshcd_release(hba);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_attr);
 
 /**
  * ufshcd_query_attr_retry() - API function for sending query
@@ -3197,6 +3220,7 @@
 			__func__, idn, ret, QUERY_REQ_RETRIES);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_attr_retry);
 
 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
 			enum query_opcode opcode, enum desc_idn idn, u8 index,
@@ -3292,6 +3316,7 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_descriptor_retry);
 
 /**
  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
@@ -3410,6 +3435,7 @@
 		kfree(desc_buf);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_read_desc_param);
 
 /**
  * struct uc_string_id - unicode string
@@ -3583,7 +3609,7 @@
 	size_t utmrdl_size, utrdl_size, ucdl_size;
 
 	/* Allocate memory for UTP command descriptors */
-	ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+	ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs);
 	hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
 						  ucdl_size,
 						  &hba->ucdl_dma_addr,
@@ -3677,7 +3703,7 @@
 	prdt_offset =
 		offsetof(struct utp_transfer_cmd_desc, prd_table);
 
-	cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+	cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
 	cmd_desc_dma_addr = hba->ucdl_dma_addr;
 
 	for (i = 0; i < hba->nutrs; i++) {
@@ -5275,12 +5301,14 @@
 		lrbp->compl_time_stamp = ktime_get();
 		cmd = lrbp->cmd;
 		if (cmd) {
+			trace_android_vh_ufs_compl_command(hba, lrbp);
 			if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
 				ufshcd_update_monitor(hba, lrbp);
 			ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
 			result = ufshcd_transfer_rsp_status(hba, lrbp);
 			scsi_dma_unmap(cmd);
 			cmd->result = result;
+			ufshcd_crypto_clear_prdt(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
 			/* Do not touch lrbp after scsi done */
@@ -5290,6 +5318,7 @@
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
 			lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
 			if (hba->dev_cmd.complete) {
+				trace_android_vh_ufs_compl_command(hba, lrbp);
 				ufshcd_add_command_trace(hba, index,
 							 UFS_DEV_COMP);
 				complete(hba->dev_cmd.complete);
@@ -5545,7 +5574,7 @@
  * to know whether auto bkops is enabled or disabled after this function
  * returns control to it.
  */
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+int ufshcd_bkops_ctrl(struct ufs_hba *hba,
 			     enum bkops_status status)
 {
 	int err;
@@ -5570,6 +5599,7 @@
 out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_bkops_ctrl);
 
 /**
  * ufshcd_urgent_bkops - handle urgent bkops exception event
@@ -6404,6 +6434,8 @@
 		queue_eh_work = true;
 	}
 
+	trace_android_vh_ufs_check_int_errors(hba, queue_eh_work);
+
 	if (queue_eh_work) {
 		/*
 		 * update the transfer error masks to sticky bits, let's do this
@@ -9450,6 +9482,7 @@
 	hba->dev = dev;
 	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
 	hba->nop_out_timeout = NOP_OUT_TIMEOUT;
+	hba->sg_entry_size = sizeof(struct ufshcd_sg_entry);
 	INIT_LIST_HEAD(&hba->clk_list_head);
 	spin_lock_init(&hba->outstanding_lock);
 
@@ -9671,7 +9704,7 @@
 	ufshcd_set_ufs_dev_active(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
-	ufs_sysfs_add_nodes(hba->dev);
+	ufs_sysfs_add_nodes(hba);
 
 	device_enable_async_suspend(dev);
 	return 0;
@@ -9794,11 +9827,6 @@
 {
 	int ret;
 
-	/* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
-	static_assert(sizeof(struct utp_transfer_cmd_desc) ==
-		      2 * ALIGNED_UPIU_SIZE +
-			      SG_ALL * sizeof(struct ufshcd_sg_entry));
-
 	ufs_debugfs_init();
 
 	ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 54750d7..ea5d19d 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -601,6 +601,28 @@
 	 * support physical host configuration.
 	 */
 	UFSHCD_QUIRK_SKIP_PH_CONFIGURATION		= 1 << 16,
+	/*
+	 * This quirk needs to be enabled if the host controller supports inline
+	 * encryption, but it needs to initialize the crypto capabilities in a
+	 * nonstandard way and/or it needs to override blk_crypto_ll_ops.  If
+	 * enabled, the standard code won't initialize the blk_crypto_profile;
+	 * ufs_hba_variant_ops::init() must do it instead.
+	 */
+	UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE		= 1 << 20,
+
+	/*
+	 * This quirk needs to be enabled if the host controller supports inline
+	 * encryption, but the CRYPTO_GENERAL_ENABLE bit is not implemented and
+	 * breaks the HCE sequence if used.
+	 */
+	UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE		= 1 << 21,
+
+	/*
+	 * This quirk needs to be enabled if the host controller requires that
+	 * the PRDT be cleared after each encrypted request because encryption
+	 * keys were stored in it.
+	 */
+	UFSHCD_QUIRK_KEYS_IN_PRDT			= 1 << 22,
 };
 
 enum ufshcd_caps {
@@ -747,6 +769,7 @@
  * @ufs_version: UFS Version to which controller complies
  * @vops: pointer to variant specific operations
  * @priv: pointer to variant specific private data
+ * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for UIC command
@@ -840,6 +863,7 @@
 	const struct ufs_hba_variant_ops *vops;
 	struct ufs_hba_variant_params *vps;
 	void *priv;
+	size_t sg_entry_size;
 	unsigned int irq;
 	bool is_irq_enabled;
 	enum ufs_ref_clk_freq dev_ref_clk_freq;
@@ -1181,8 +1205,14 @@
 			    u32 *attr_val);
 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 		      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+	u32 *attr_val);
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 	enum flag_idn idn, u8 index, bool *flag_res);
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
+int ufshcd_bkops_ctrl(struct ufs_hba *hba, enum bkops_status status);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6a295c8..073d5cf 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -423,20 +423,28 @@
 	__le64    addr;
 	__le32    reserved;
 	__le32    size;
+	/*
+	 * followed by variant-specific fields if
+	 * hba->sg_entry_size != sizeof(struct ufshcd_sg_entry)
+	 */
 };
 
 /**
  * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
  * @command_upiu: Command UPIU Frame address
  * @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
+ * @prd_table: Physical Region Descriptor: an array of SG_ALL struct
+ *	ufshcd_sg_entry's.  Variant-specific fields may be present after each.
  */
 struct utp_transfer_cmd_desc {
 	u8 command_upiu[ALIGNED_UPIU_SIZE];
 	u8 response_upiu[ALIGNED_UPIU_SIZE];
-	struct ufshcd_sg_entry    prd_table[SG_ALL];
+	u8 prd_table[];
 };
 
+#define sizeof_utp_transfer_cmd_desc(hba)	\
+	(sizeof(struct utp_transfer_cmd_desc) + SG_ALL * (hba)->sg_entry_size)
+
 /**
  * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
  * @dword0: Descriptor Header DW0
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 70498ad..9aca9ce 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,6 +14,17 @@
 	  It is, in theory, a good memory allocator for low-memory devices,
 	  because it can discard shared memory units when under memory pressure.
 
+config DEBUG_KINFO
+	bool "Debug Kernel Information Support"
+	depends on KALLSYMS
+	help
+	  This supports kernel information backup for bootloader usage.
+	  Specifics:
+	   - The kallsyms symbols for unwind_backtrace
+	   - Page directory pointer
+	   - UTS_RELEASE
+	   - BUILD_INFO(ro.build.fingerprint)
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index e9a55a5..b92c5fe 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -2,3 +2,4 @@
 ccflags-y += -I$(src)			# needed for trace events
 
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
+obj-$(CONFIG_DEBUG_KINFO)	+= debug_kinfo.o
diff --git a/drivers/staging/android/debug_kinfo.c b/drivers/staging/android/debug_kinfo.c
new file mode 100644
index 0000000..8e67fb3
--- /dev/null
+++ b/drivers/staging/android/debug_kinfo.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debug_kinfo.c - backup kernel information for bootloader usage
+ *
+ * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * Copyright 2021 Google LLC
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pgtable.h>
+#include <asm/module.h>
+#include "debug_kinfo.h"
+
+/*
+ * These will be re-linked against their real values
+ * during the second link stage.
+ */
+extern const unsigned long kallsyms_addresses[] __weak;
+extern const int kallsyms_offsets[] __weak;
+extern const u8 kallsyms_names[] __weak;
+
+/*
+ * Tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV).
+ */
+extern const unsigned int kallsyms_num_syms __weak
+__section(".rodata");
+
+extern const unsigned long kallsyms_relative_base __weak
+__section(".rodata");
+
+extern const u8 kallsyms_token_table[] __weak;
+extern const u16 kallsyms_token_index[] __weak;
+
+extern const unsigned int kallsyms_markers[] __weak;
+
+static void *all_info_addr;
+static u32 all_info_size;
+
+static void update_kernel_all_info(struct kernel_all_info *all_info)
+{
+	int index;
+	struct kernel_info *info;
+	u32 *checksum_info;
+
+	all_info->magic_number = DEBUG_KINFO_MAGIC;
+	all_info->combined_checksum = 0;
+
+	info = &(all_info->info);
+	checksum_info = (u32 *)info;
+	for (index = 0; index < sizeof(*info) / sizeof(u32); index++)
+		all_info->combined_checksum ^= checksum_info[index];
+}
+
+static int build_info_set(const char *str, const struct kernel_param *kp)
+{
+	struct kernel_all_info *all_info;
+	size_t build_info_size;
+	int ret = 0;
+
+	if (all_info_addr == 0 || all_info_size == 0) {
+		ret = -EPERM;
+		goto Exit;
+	}
+
+	all_info = (struct kernel_all_info *)all_info_addr;
+	build_info_size = sizeof(all_info->info.build_info);
+
+	memcpy(&all_info->info.build_info, str, min(build_info_size - 1, strlen(str)));
+	update_kernel_all_info(all_info);
+
+	if (strlen(str) > build_info_size) {
+		pr_warn("%s: Build info buffer (len: %zd) can't hold entire string '%s'\n",
+				__func__, build_info_size, str);
+		ret = -ENOMEM;
+	}
+
+Exit:
+	vunmap(all_info_addr);
+	return ret;
+}
+
+static const struct kernel_param_ops build_info_op = {
+	.set = build_info_set,
+};
+
+module_param_cb(build_info, &build_info_op, NULL, 0200);
+MODULE_PARM_DESC(build_info, "Write build info to field 'build_info' of debug kinfo.");
+
+static int debug_kinfo_probe(struct platform_device *pdev)
+{
+	struct device_node *mem_region;
+	struct reserved_mem *rmem;
+	struct kernel_all_info *all_info;
+	struct kernel_info *info;
+
+	mem_region = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	if (!mem_region) {
+		dev_warn(&pdev->dev, "no such memory-region\n");
+		return -ENODEV;
+	}
+
+	rmem = of_reserved_mem_lookup(mem_region);
+	if (!rmem) {
+		dev_warn(&pdev->dev, "no such reserved mem of node name %s\n",
+				pdev->dev.of_node->name);
+		return -ENODEV;
+	}
+
+	/* Need to wait for reserved memory to be mapped */
+	if (!rmem->priv) {
+		return -EPROBE_DEFER;
+	}
+
+	if (!rmem->base || !rmem->size) {
+		dev_warn(&pdev->dev, "unexpected reserved memory\n");
+		return -EINVAL;
+	}
+
+	if (rmem->size < sizeof(struct kernel_all_info)) {
+		dev_warn(&pdev->dev, "unexpected reserved memory size\n");
+		return -EINVAL;
+	}
+
+	all_info_addr = rmem->priv;
+	all_info_size = rmem->size;
+
+	memset(all_info_addr, 0, sizeof(struct kernel_all_info));
+	all_info = (struct kernel_all_info *)all_info_addr;
+	info = &(all_info->info);
+	info->enabled_all = IS_ENABLED(CONFIG_KALLSYMS_ALL);
+	info->enabled_base_relative = IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE);
+	info->enabled_absolute_percpu = IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU);
+	info->enabled_cfi_clang = IS_ENABLED(CONFIG_CFI_CLANG);
+	info->num_syms = kallsyms_num_syms;
+	info->name_len = KSYM_NAME_LEN;
+	info->bit_per_long = BITS_PER_LONG;
+	info->module_name_len = MODULE_NAME_LEN;
+	info->symbol_len = KSYM_SYMBOL_LEN;
+	if (!info->enabled_base_relative)
+		info->_addresses_pa = (u64)__pa_symbol((volatile void *)kallsyms_addresses);
+	else {
+		info->_relative_pa = (u64)__pa_symbol((volatile void *)kallsyms_relative_base);
+		info->_offsets_pa = (u64)__pa_symbol((volatile void *)kallsyms_offsets);
+	}
+	info->_stext_pa = (u64)__pa_symbol(_stext);
+	info->_etext_pa = (u64)__pa_symbol(_etext);
+	info->_sinittext_pa = (u64)__pa_symbol(_sinittext);
+	info->_einittext_pa = (u64)__pa_symbol(_einittext);
+	info->_end_pa = (u64)__pa_symbol(_end);
+	info->_names_pa = (u64)__pa_symbol((volatile void *)kallsyms_names);
+	info->_token_table_pa = (u64)__pa_symbol((volatile void *)kallsyms_token_table);
+	info->_token_index_pa = (u64)__pa_symbol((volatile void *)kallsyms_token_index);
+	info->_markers_pa = (u64)__pa_symbol((volatile void *)kallsyms_markers);
+	info->thread_size = THREAD_SIZE;
+	info->swapper_pg_dir_pa = (u64)__pa_symbol(swapper_pg_dir);
+	strlcpy(info->last_uts_release, init_utsname()->release, sizeof(info->last_uts_release));
+	info->enabled_modules_tree_lookup = IS_ENABLED(CONFIG_MODULES_TREE_LOOKUP);
+	info->mod_core_layout_offset = offsetof(struct module, core_layout);
+	info->mod_init_layout_offset = offsetof(struct module, init_layout);
+	info->mod_kallsyms_offset = offsetof(struct module, kallsyms);
+#if defined(CONFIG_RANDOMIZE_BASE) && defined(MODULES_VSIZE)
+	info->module_start_va = module_alloc_base;
+	info->module_end_va = info->module_start_va + MODULES_VSIZE;
+#elif defined(CONFIG_MODULES) && defined(MODULES_VADDR)
+	info->module_start_va = MODULES_VADDR;
+	info->module_end_va = MODULES_END;
+#else
+	info->module_start_va = VMALLOC_START;
+	info->module_end_va = VMALLOC_END;
+#endif
+	update_kernel_all_info(all_info);
+
+	return 0;
+}
+
+static const struct of_device_id debug_kinfo_of_match[] = {
+	{ .compatible	= "google,debug-kinfo" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, debug_kinfo_of_match);
+
+static struct platform_driver debug_kinfo_driver = {
+	.probe = debug_kinfo_probe,
+	.driver = {
+		.name = "debug-kinfo",
+		.of_match_table = of_match_ptr(debug_kinfo_of_match),
+	},
+};
+module_platform_driver(debug_kinfo_driver);
+
+MODULE_AUTHOR("Jone Chou <jonechou@google.com>");
+MODULE_DESCRIPTION("Debug Kinfo Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/debug_kinfo.h b/drivers/staging/android/debug_kinfo.h
new file mode 100644
index 0000000..921f140
--- /dev/null
+++ b/drivers/staging/android/debug_kinfo.h
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debug_kinfo.h - backup kernel information for bootloader usage
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#ifndef DEBUG_KINFO_H
+#define DEBUG_KINFO_H
+
+#include <linux/utsname.h>
+
+#define BUILD_INFO_LEN		256
+#define DEBUG_KINFO_MAGIC	0xCCEEDDFF
+
+/*
+ * Header structure must be byte-packed, since the table is provided to
+ * bootloader.
+ */
+struct kernel_info {
+	/* For kallsyms */
+	__u8 enabled_all;
+	__u8 enabled_base_relative;
+	__u8 enabled_absolute_percpu;
+	__u8 enabled_cfi_clang;
+	__u32 num_syms;
+	__u16 name_len;
+	__u16 bit_per_long;
+	__u16 module_name_len;
+	__u16 symbol_len;
+	__u64 _addresses_pa;
+	__u64 _relative_pa;
+	__u64 _stext_pa;
+	__u64 _etext_pa;
+	__u64 _sinittext_pa;
+	__u64 _einittext_pa;
+	__u64 _end_pa;
+	__u64 _offsets_pa;
+	__u64 _names_pa;
+	__u64 _token_table_pa;
+	__u64 _token_index_pa;
+	__u64 _markers_pa;
+
+	/* For frame pointer */
+	__u32 thread_size;
+
+	/* For virt_to_phys */
+	__u64 swapper_pg_dir_pa;
+
+	/* For linux banner */
+	__u8 last_uts_release[__NEW_UTS_LEN];
+
+	/* Info of running build */
+	__u8 build_info[BUILD_INFO_LEN];
+
+	/* For module kallsyms */
+	__u32 enabled_modules_tree_lookup;
+	__u32 mod_core_layout_offset;
+	__u32 mod_init_layout_offset;
+	__u32 mod_kallsyms_offset;
+	__u64 module_start_va;
+	__u64 module_end_va;
+} __packed;
+
+struct kernel_all_info {
+	__u32 magic_number;
+	__u32 combined_checksum;
+	struct kernel_info info;
+} __packed;
+
+#endif // DEBUG_KINFO_H
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 18d0058..97200fb 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -30,7 +30,7 @@
  * for the tty device.  Since this driver supports hotplug of vty adapters we
  * need to make sure we have enough allocated.
  */
-#define HVC_ALLOC_TTY_ADAPTERS	8
+#define HVC_ALLOC_TTY_ADAPTERS	64
 
 struct hvc_struct {
 	struct tty_port port;
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 8e0edb7..03c0a18 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -3,6 +3,7 @@
 
 #include <linux/console.h>
 #include <linux/init.h>
+#include <linux/moduleparam.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
@@ -11,6 +12,13 @@
 
 #include "hvc_console.h"
 
+/*
+ * Disable DCC driver at runtime. Want driver enabled for GKI, but some devices
+ * do not support the registers and crash when driver pokes the registers
+ */
+static bool enable;
+module_param(enable, bool, 0444);
+
 /* DCC Status Bits */
 #define DCC_STATUS_RX		(1 << 30)
 #define DCC_STATUS_TX		(1 << 29)
@@ -91,7 +99,7 @@
 {
 	int ret;
 
-	if (!hvc_dcc_check())
+	if (!enable || !hvc_dcc_check())
 		return -ENODEV;
 
 	/* Returns -1 if error */
@@ -105,7 +113,7 @@
 {
 	struct hvc_struct *p;
 
-	if (!hvc_dcc_check())
+	if (!enable || !hvc_dcc_check())
 		return -ENODEV;
 
 	p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fc543ac..dfb46f6 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -236,7 +236,6 @@
 
 config SERIAL_SAMSUNG
 	tristate "Samsung SoC serial support"
-	depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_APPLE || COMPILE_TEST
 	select SERIAL_CORE
 	help
 	  Support for the on-chip UARTs on the Samsung
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index bbfd004..0020762 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -55,6 +55,8 @@
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
 
+#include <trace/hooks/sysrqcrash.h>
+
 /* Whether we react on sysrq keys or just ignore them */
 static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
@@ -152,6 +154,8 @@
 	/* release the RCU read lock before crashing */
 	rcu_read_unlock();
 
+	trace_android_vh_sysrq_crash(current);
+
 	panic("sysrq triggered crash\n");
 }
 static const struct sysrq_key_op sysrq_crash_op = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index dd58094..bc2cfb4 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -216,6 +216,12 @@
 config USB_F_TCM
 	tristate
 
+config USB_F_ACC
+	tristate
+
+config USB_F_AUDIO_SRC
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -230,6 +236,14 @@
 	  appropriate symbolic links.
 	  For more information see Documentation/usb/gadget_configfs.rst.
 
+config USB_CONFIGFS_UEVENT
+	bool "Uevent notification of Gadget state"
+	depends on USB_CONFIGFS
+	help
+	  Enable uevent notifications to userspace when the gadget
+	  state changes. The gadget can be in any of the following
+	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
+
 config USB_CONFIGFS_SERIAL
 	bool "Generic serial bulk in/out"
 	depends on USB_CONFIGFS
@@ -371,6 +385,23 @@
 	  implemented in kernel space (for instance Ethernet, serial or
 	  mass storage) and other are implemented in user space.
 
+config USB_CONFIGFS_F_ACC
+	bool "Accessory gadget"
+	depends on USB_CONFIGFS
+	depends on HID=y
+	select USB_F_ACC
+	help
+	  USB gadget Accessory support
+
+config USB_CONFIGFS_F_AUDIO_SRC
+	bool "Audio Source gadget"
+	depends on USB_CONFIGFS
+	depends on SND
+	select SND_PCM
+	select USB_F_AUDIO_SRC
+	help
+	  USB gadget Audio Source support
+
 config USB_CONFIGFS_F_UAC1
 	bool "Audio Class 1.0"
 	depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 36c611d..59f25dc 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -10,6 +10,32 @@
 #include "u_f.h"
 #include "u_os_desc.h"
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+#include <linux/platform_device.h>
+#include <linux/kdev_t.h>
+#include <linux/usb/ch9.h>
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl);
+void acc_disconnect(void);
+#endif
+static struct class *android_class;
+static struct device *android_device;
+static int index;
+static int gadget_index;
+
+struct device *create_function_device(char *name)
+{
+	if (android_device && !IS_ERR(android_device))
+		return device_create(android_class, android_device,
+			MKDEV(0, index++), NULL, name);
+	else
+		return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(create_function_device);
+#endif
+
 int check_user_usb_string(const char *name,
 		struct usb_gadget_strings *stringtab_dev)
 {
@@ -51,6 +77,12 @@
 	char qw_sign[OS_STRING_QW_SIGN_LEN];
 	spinlock_t spinlock;
 	bool unbind;
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+	struct device *dev;
+#endif
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -277,7 +309,7 @@
 
 	mutex_lock(&gi->lock);
 
-	if (!strlen(name)) {
+	if (!strlen(name) || strcmp(name, "none") == 0) {
 		ret = unregister_gadget(gi);
 		if (ret)
 			goto err;
@@ -1434,6 +1466,57 @@
 	return ret;
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static void android_work(struct work_struct *data)
+{
+	struct gadget_info *gi = container_of(data, struct gadget_info, work);
+	struct usb_composite_dev *cdev = &gi->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	/* 0-connected 1-configured 2-disconnected*/
+	bool status[3] = { false, false, false };
+	unsigned long flags;
+	bool uevent_sent = false;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		status[1] = true;
+
+	if (gi->connected != gi->sw_connected) {
+		if (gi->connected)
+			status[0] = true;
+		else
+			status[2] = true;
+		gi->sw_connected = gi->connected;
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if (status[0]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected);
+		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
+		uevent_sent = true;
+	}
+
+	if (status[1]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, configured);
+		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
+		uevent_sent = true;
+	}
+
+	if (status[2]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected);
+		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
+		uevent_sent = true;
+	}
+
+	if (!uevent_sent) {
+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+			gi->connected, gi->sw_connected, cdev->config);
+	}
+}
+#endif
+
 static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
 	struct usb_composite_dev	*cdev;
@@ -1459,6 +1542,50 @@
 	spin_unlock_irqrestore(&gi->spinlock, flags);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static int android_setup(struct usb_gadget *gadget,
+			const struct usb_ctrlrequest *c)
+{
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+	unsigned long flags;
+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+	int value = -EOPNOTSUPP;
+	struct usb_function_instance *fi;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!gi->connected) {
+		gi->connected = 1;
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	list_for_each_entry(fi, &gi->available_func, cfs_list) {
+		if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
+			value = fi->f->setup(fi->f, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	if (value < 0)
+		value = acc_ctrlrequest(cdev, c);
+#endif
+
+	if (value < 0)
+		value = composite_setup(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+						cdev->config) {
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+#else // CONFIG_USB_CONFIGFS_UEVENT
+
 static int configfs_composite_setup(struct usb_gadget *gadget,
 		const struct usb_ctrlrequest *ctrl)
 {
@@ -1484,6 +1611,8 @@
 	return ret;
 }
 
+#endif // CONFIG_USB_CONFIGFS_UEVENT
+
 static void configfs_composite_disconnect(struct usb_gadget *gadget)
 {
 	struct usb_composite_dev *cdev;
@@ -1494,6 +1623,14 @@
 	if (!cdev)
 		return;
 
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	/*
+	 * accessory HID support can be active while the
+	 * accessory function is not actually enabled,
+	 * so we need to inform it when we are disconnected.
+	 */
+	acc_disconnect();
+#endif
 	gi = container_of(cdev, struct gadget_info, cdev);
 	spin_lock_irqsave(&gi->spinlock, flags);
 	cdev = get_gadget_data(gadget);
@@ -1502,6 +1639,10 @@
 		return;
 	}
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	gi->connected = 0;
+	schedule_work(&gi->work);
+#endif
 	composite_disconnect(gadget);
 	spin_unlock_irqrestore(&gi->spinlock, flags);
 }
@@ -1576,10 +1717,13 @@
 	.bind           = configfs_composite_bind,
 	.unbind         = configfs_composite_unbind,
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	.setup          = android_setup,
+#else
 	.setup          = configfs_composite_setup,
+#endif
 	.reset          = configfs_composite_reset,
 	.disconnect     = configfs_composite_disconnect,
-
 	.suspend	= configfs_composite_suspend,
 	.resume		= configfs_composite_resume,
 
@@ -1591,6 +1735,91 @@
 	.match_existing_only = 1,
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			char *buf)
+{
+	struct gadget_info *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev;
+	char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!dev)
+		goto out;
+
+	cdev = &dev->cdev;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_state,
+	NULL
+};
+
+static int android_device_create(struct gadget_info *gi)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	INIT_WORK(&gi->work, android_work);
+	gi->dev = device_create(android_class, NULL,
+			MKDEV(0, 0), NULL, "android%d", gadget_index++);
+	if (IS_ERR(gi->dev))
+		return PTR_ERR(gi->dev);
+
+	dev_set_drvdata(gi->dev, gi);
+	if (!android_device)
+		android_device = gi->dev;
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++)) {
+		int err;
+
+		err = device_create_file(gi->dev, attr);
+		if (err) {
+			device_destroy(gi->dev->class,
+				       gi->dev->devt);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void android_device_destroy(struct gadget_info *gi)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++))
+		device_remove_file(gi->dev, attr);
+	device_destroy(gi->dev->class, gi->dev->devt);
+}
+#else
+static inline int android_device_create(struct gadget_info *gi)
+{
+	return 0;
+}
+
+static inline void android_device_destroy(struct gadget_info *gi)
+{
+}
+#endif
+
 static struct config_group *gadgets_make(
 		struct config_group *group,
 		const char *name)
@@ -1643,7 +1872,11 @@
 	if (!gi->composite.gadget_driver.function)
 		goto err;
 
+	if (android_device_create(gi) < 0)
+		goto err;
+
 	return &gi->group;
+
 err:
 	kfree(gi);
 	return ERR_PTR(-ENOMEM);
@@ -1651,7 +1884,11 @@
 
 static void gadgets_drop(struct config_group *group, struct config_item *item)
 {
+	struct gadget_info *gi;
+
+	gi = container_of(to_config_group(item), struct gadget_info, group);
 	config_item_put(item);
+	android_device_destroy(gi);
 }
 
 static struct configfs_group_operations gadgets_ops = {
@@ -1691,6 +1928,13 @@
 	config_group_init(&gadget_subsys.su_group);
 
 	ret = configfs_register_subsystem(&gadget_subsys);
+
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+#endif
+
 	return ret;
 }
 module_init(gadget_cfs_init);
@@ -1698,5 +1942,10 @@
 static void __exit gadget_cfs_exit(void)
 {
 	configfs_unregister_subsystem(&gadget_subsys);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	if (!IS_ERR(android_class))
+		class_destroy(android_class);
+#endif
+
 }
 module_exit(gadget_cfs_exit);
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 5d3a6cf..dd33a12 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -50,3 +50,7 @@
 obj-$(CONFIG_USB_F_PRINTER)	+= usb_f_printer.o
 usb_f_tcm-y			:= f_tcm.o
 obj-$(CONFIG_USB_F_TCM)		+= usb_f_tcm.o
+usb_f_accessory-y		:= f_accessory.o
+obj-$(CONFIG_USB_F_ACC)		+= usb_f_accessory.o
+usb_f_audio_source-y		:= f_audio_source.o
+obj-$(CONFIG_USB_F_AUDIO_SRC)	+= usb_f_audio_source.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
new file mode 100644
index 0000000..4fb58375
--- /dev/null
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -0,0 +1,1543 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/kref.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#define MAX_INST_NAME_LEN        40
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+	struct list_head	list;
+	struct hid_device *hid;
+	struct acc_dev *dev;
+	/* accessory defined ID */
+	int id;
+	/* HID report descriptor */
+	u8 *report_desc;
+	/* length of HID report descriptor */
+	int report_desc_len;
+	/* number of bytes of report_desc we have received so far */
+	int report_desc_offset;
+};
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+	struct acc_dev_ref *ref;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* online indicates state of function_set_alt & function_unbind
+	 * set to 1 when we connect
+	 */
+	int online;
+
+	/* disconnected indicates state of open & release
+	 * Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* set to 1 if we have a pending start request */
+	int start_requested;
+
+	int audio_mode;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* delayed work for handling ACCESSORY_START */
+	struct delayed_work start_work;
+
+	/* work for handling ACCESSORY GET PROTOCOL */
+	struct work_struct getprotocol_work;
+
+	/* work for handling ACCESSORY SEND STRING */
+	struct work_struct sendstring_work;
+
+	/* worker for registering and unregistering hid devices */
+	struct work_struct hid_work;
+
+	/* list of active HID devices */
+	struct list_head	hid_list;
+
+	/* list of new HID devices to register */
+	struct list_head	new_hid_list;
+
+	/* list of dead HID devices to unregister */
+	struct list_head	dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_superspeedplus_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acc_superspeedplus_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
+	.bLength                = sizeof(acc_superspeedplus_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
+	.bLength                = sizeof(acc_superspeed_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ssp_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+struct acc_dev_ref {
+	struct kref	kref;
+	struct acc_dev	*acc_dev;
+};
+
+static struct acc_dev_ref _acc_dev_ref = {
+	.kref = KREF_INIT(0),
+};
+
+struct acc_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+};
+
+static struct acc_dev *get_acc_dev(void)
+{
+	struct acc_dev_ref *ref = &_acc_dev_ref;
+
+	return kref_get_unless_zero(&ref->kref) ? ref->acc_dev : NULL;
+}
+
+static void __put_acc_dev(struct kref *kref)
+{
+	struct acc_dev_ref *ref = container_of(kref, struct acc_dev_ref, kref);
+	struct acc_dev *dev = ref->acc_dev;
+
+	/* Cancel any async work */
+	cancel_delayed_work_sync(&dev->start_work);
+	cancel_work_sync(&dev->getprotocol_work);
+	cancel_work_sync(&dev->sendstring_work);
+	cancel_work_sync(&dev->hid_work);
+
+	ref->acc_dev = NULL;
+	kfree(dev);
+}
+
+static void put_acc_dev(struct acc_dev *dev)
+{
+	struct acc_dev_ref *ref = dev->ref;
+
+	WARN_ON(ref->acc_dev != dev);
+	kref_put(&ref->kref, __put_acc_dev);
+}
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	if (!dev)
+		return;
+
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_in set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+	put_acc_dev(dev);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	if (!dev)
+		return;
+
+	dev->rx_done = 1;
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_out set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	wake_up(&dev->read_wq);
+	put_acc_dev(dev);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	struct acc_dev *dev = hid->dev;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_hid_report_desc, err %d\n",
+			req->status);
+		return;
+	}
+
+	memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+	hid->report_desc_offset += length;
+	if (hid->report_desc_offset == hid->report_desc_len) {
+		/* After we have received the entire report descriptor
+		 * we schedule work to initialize the HID device
+		 */
+		schedule_work(&dev->hid_work);
+	}
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+		return;
+	}
+
+	hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+	struct acc_hid_dev *hdev = hid->driver_data;
+
+	hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+	return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+	__u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+	return 0;
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+	.parse = acc_hid_parse,
+	.start = acc_hid_start,
+	.stop = acc_hid_stop,
+	.open = acc_hid_open,
+	.close = acc_hid_close,
+	.raw_request = acc_hid_raw_request,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+		int id, int desc_len)
+{
+	struct acc_hid_dev *hdev;
+
+	hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+	if (!hdev)
+		return NULL;
+	hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+	if (!hdev->report_desc) {
+		kfree(hdev);
+		return NULL;
+	}
+	hdev->dev = dev;
+	hdev->id = id;
+	hdev->report_desc_len = desc_len;
+
+	return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+	struct acc_hid_dev *hid;
+
+	list_for_each_entry(hid, list, list) {
+		if (hid->id == id)
+			return hid;
+	}
+	return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	/* report descriptor length must be > 0 */
+	if (desc_length <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* replace HID if one already exists with this ID */
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (hid)
+		list_move(&hid->list, &dev->dead_hid_list);
+
+	hid = acc_hid_new(dev, id, desc_length);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	list_add(&hid->list, &dev->new_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* schedule work to register the HID device */
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	list_move(&hid->list, &dev->dead_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+		dev->rx_req[i] = NULL;
+	}
+
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	ssize_t r = count;
+	ssize_t data_length;
+	unsigned xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%zu)\n", count);
+
+	if (dev->disconnected) {
+		pr_debug("acc_read disconnected");
+		return -ENODEV;
+	}
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+	if (!dev->rx_req[0]) {
+		pr_warn("acc_read: USB request already handled/freed");
+		r = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Calculate the data length by considering termination character.
+	 * Then compansite the difference of rounding up to
+	 * integer multiple of maxpacket size.
+	 */
+	data_length = count;
+	data_length += dev->ep_out->maxpacket - 1;
+	data_length -= data_length % dev->ep_out->maxpacket;
+
+	if (dev->rx_done) {
+		// last req cancelled. try to get it.
+		req = dev->rx_req[0];
+		goto copy_data;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = data_length;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		ret = usb_ep_dequeue(dev->ep_out, req);
+		if (ret != 0) {
+			// cancel failed. There can be a data already received.
+			// it will be retrieved in the next read.
+			pr_debug("acc_read: cancelling failed %d", ret);
+		}
+		goto done;
+	}
+
+copy_data:
+	dev->rx_done = 0;
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %u\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret;
+
+	pr_debug("acc_write(%zu)\n", count);
+
+	if (!dev->online || dev->disconnected) {
+		pr_debug("acc_write disconnected or not online");
+		return -ENODEV;
+	}
+
+	while (count > 0) {
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!dev->online || dev->disconnected) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE) {
+			xfer = BULK_BUFFER_SIZE;
+			/* ZLP, They will be more TX requests so not yet. */
+			req->zero = 0;
+		} else {
+			xfer = count;
+			/* If the data length is a multple of the
+			 * maxpacket size then send a zero length packet(ZLP).
+			*/
+			req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+		}
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %zd\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	case ACCESSORY_IS_START_REQUESTED:
+		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	if (!dev)
+		return -ENODEV;
+
+	if (atomic_xchg(&dev->open_excl, 1)) {
+		put_acc_dev(dev);
+		return -EBUSY;
+	}
+
+	dev->disconnected = 0;
+	fp->private_data = dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	struct acc_dev *dev = fp->private_data;
+
+	if (!dev)
+		return -ENOENT;
+
+	/* indicate that we are disconnected
+	 * still could be online so don't touch online flag
+	 */
+	dev->disconnected = 1;
+
+	fp->private_data = NULL;
+	WARN_ON(!atomic_xchg(&dev->open_excl, 0));
+	put_acc_dev(dev);
+	return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret)
+		return ret;
+	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+	{ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+	{ }
+};
+
+static struct hid_driver acc_hid_driver = {
+	.name = "USB accessory",
+	.id_table = acc_hid_table,
+	.probe = acc_hid_probe,
+};
+
+static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req)
+{
+	/*
+	 * Default no-op function when nothing needs to be done for the
+	 * setup request
+	 */
+}
+
+int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = get_acc_dev();
+	int	value = -EOPNOTSUPP;
+	struct acc_hid_dev *hid;
+	int offset;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long flags;
+
+	/*
+	 * If instance is not created which is the case in power off charging
+	 * mode, dev will be NULL. Hence return error if it is the case.
+	 */
+	if (!dev)
+		return -ENODEV;
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			dev->start_requested = 1;
+			schedule_delayed_work(
+				&dev->start_work, msecs_to_jiffies(10));
+			value = 0;
+			cdev->req->complete = acc_complete_setup_noop;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			schedule_work(&dev->sendstring_work);
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			cdev->req->complete = acc_complete_setup_noop;
+			value = 0;
+		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
+			value = acc_register_hid(dev, w_value, w_index);
+		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
+			value = acc_unregister_hid(dev, w_value);
+		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->new_hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			offset = w_index;
+			if (offset != hid->report_desc_offset
+				|| offset + w_length > hid->report_desc_len) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_set_hid_report_desc;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_send_hid_event;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			schedule_work(&dev->getprotocol_work);
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+			cdev->req->complete = acc_complete_setup_noop;
+			/* clear any string left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+			dev->start_requested = 0;
+			dev->audio_mode = 0;
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+err:
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	put_acc_dev(dev);
+	return value;
+}
+EXPORT_SYMBOL_GPL(acc_ctrlrequest);
+
+static int
+__acc_function_bind(struct usb_configuration *c,
+			struct usb_function *f, bool configfs)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	if (configfs) {
+		if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+			ret = usb_string_id(c->cdev);
+			if (ret < 0)
+				return ret;
+			acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+			acc_interface_desc.iInterface = ret;
+		}
+		dev->cdev = c->cdev;
+	}
+	ret = hid_register_driver(&acc_hid_driver);
+	if (ret)
+		return ret;
+
+	dev->start_requested = 0;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static int
+acc_function_bind_configfs(struct usb_configuration *c,
+			struct usb_function *f) {
+	return __acc_function_bind(c, f, true);
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+	struct acc_hid_dev *hid;
+	struct list_head *entry, *temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(entry, temp, &dev->hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+	hid_unregister_driver(&acc_hid_driver);
+	kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	dev->online = 0;		/* clear online flag */
+	wake_up(&dev->read_wq);		/* unblock reads on closure */
+	wake_up(&dev->write_wq);	/* likewise for writes */
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+		dev->rx_req[i] = NULL;
+	}
+
+	acc_hid_unbind(dev);
+}
+
+static void acc_getprotocol_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=GETPROTOCOL", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void acc_sendstring_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=SENDSTRING", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+	struct hid_device *hid;
+	int ret;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	hid->ll_driver = &acc_hid_ll_driver;
+	hid->dev.parent = acc_device.this_device;
+
+	hid->bus = BUS_USB;
+	hid->vendor = HID_ANY_ID;
+	hid->product = HID_ANY_ID;
+	hid->driver_data = hdev;
+	ret = hid_add_device(hid);
+	if (ret) {
+		pr_err("can't add hid device: %d\n", ret);
+		hid_destroy_device(hid);
+		return ret;
+	}
+
+	hdev->hid = hid;
+	return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+	kfree(hid->report_desc);
+	kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+	struct acc_dev *dev = get_acc_dev();
+	struct list_head	*entry, *temp;
+	struct acc_hid_dev *hid;
+	struct list_head	new_list, dead_list;
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	INIT_LIST_HEAD(&new_list);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* copy hids that are ready for initialization to new_list */
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (hid->report_desc_offset == hid->report_desc_len)
+			list_move(&hid->list, &new_list);
+	}
+
+	if (list_empty(&dev->dead_hid_list)) {
+		INIT_LIST_HEAD(&dead_list);
+	} else {
+		/* move all of dev->dead_hid_list to dead_list */
+		dead_list.prev = dev->dead_hid_list.prev;
+		dead_list.next = dev->dead_hid_list.next;
+		dead_list.next->prev = &dead_list;
+		dead_list.prev->next = &dead_list;
+		INIT_LIST_HEAD(&dev->dead_hid_list);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* register new HID devices */
+	list_for_each_safe(entry, temp, &new_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (acc_hid_init(hid)) {
+			pr_err("can't add HID device %p\n", hid);
+			acc_hid_delete(hid);
+		} else {
+			spin_lock_irqsave(&dev->lock, flags);
+			list_move(&hid->list, &dev->hid_list);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	}
+
+	/* remove dead HID devices */
+	list_for_each_safe(entry, temp, &dead_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		if (hid->hid)
+			hid_destroy_device(hid->hid);
+		acc_hid_delete(hid);
+	}
+
+	put_acc_dev(dev);
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+	dev->disconnected = 0; /* if online then not disconnected */
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev); /* this now only sets disconnected */
+	dev->online = 0; /* so now need to clear online flag here too */
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev_ref *ref = &_acc_dev_ref;
+	struct acc_dev *dev;
+	int ret;
+
+	if (kref_read(&ref->kref))
+		return -EBUSY;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->hid_list);
+	INIT_LIST_HEAD(&dev->new_hid_list);
+	INIT_LIST_HEAD(&dev->dead_hid_list);
+	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+	INIT_WORK(&dev->hid_work, acc_hid_work);
+	INIT_WORK(&dev->getprotocol_work, acc_getprotocol_work);
+	INIT_WORK(&dev->sendstring_work, acc_sendstring_work);
+
+	dev->ref = ref;
+	if (cmpxchg_relaxed(&ref->acc_dev, NULL, dev)) {
+		ret = -EBUSY;
+		goto err_free_dev;
+	}
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err_zap_ptr;
+
+	kref_init(&ref->kref);
+	return 0;
+
+err_zap_ptr:
+	ref->acc_dev = NULL;
+err_free_dev:
+	kfree(dev);
+	pr_err("USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+void acc_disconnect(void)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	if (!dev)
+		return;
+
+	/* unregister all HID devices if USB is disconnected */
+	kill_all_hid_devices(dev);
+	put_acc_dev(dev);
+}
+EXPORT_SYMBOL_GPL(acc_disconnect);
+
+static void acc_cleanup(void)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	misc_deregister(&acc_device);
+	put_acc_dev(dev);
+	put_acc_dev(dev); /* Pairs with kref_init() in acc_setup() */
+}
+static struct acc_instance *to_acc_instance(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct acc_instance,
+		func_inst.group);
+}
+
+static void acc_attr_release(struct config_item *item)
+{
+	struct acc_instance *fi_acc = to_acc_instance(item);
+
+	usb_put_function_instance(&fi_acc->func_inst);
+}
+
+static struct configfs_item_operations acc_item_ops = {
+	.release        = acc_attr_release,
+};
+
+static struct config_item_type acc_func_type = {
+	.ct_item_ops    = &acc_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct acc_instance *to_fi_acc(struct usb_function_instance *fi)
+{
+	return container_of(fi, struct acc_instance, func_inst);
+}
+
+static int acc_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+	struct acc_instance *fi_acc;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_acc = to_fi_acc(fi);
+	fi_acc->name = ptr;
+	return 0;
+}
+
+static void acc_free_inst(struct usb_function_instance *fi)
+{
+	struct acc_instance *fi_acc;
+
+	fi_acc = to_fi_acc(fi);
+	kfree(fi_acc->name);
+	acc_cleanup();
+}
+
+static struct usb_function_instance *acc_alloc_inst(void)
+{
+	struct acc_instance *fi_acc;
+	int err;
+
+	fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL);
+	if (!fi_acc)
+		return ERR_PTR(-ENOMEM);
+	fi_acc->func_inst.set_inst_name = acc_set_inst_name;
+	fi_acc->func_inst.free_func_inst = acc_free_inst;
+
+	err = acc_setup();
+	if (err) {
+		kfree(fi_acc);
+		return ERR_PTR(err);
+	}
+
+	config_group_init_type_name(&fi_acc->func_inst.group,
+					"", &acc_func_type);
+	return  &fi_acc->func_inst;
+}
+
+static void acc_free(struct usb_function *f)
+{
+	struct acc_dev *dev = func_to_dev(f);
+
+	put_acc_dev(dev);
+}
+
+int acc_ctrlrequest_configfs(struct usb_function *f,
+			const struct usb_ctrlrequest *ctrl) {
+	if (f->config != NULL && f->config->cdev != NULL)
+		return acc_ctrlrequest(f->config->cdev, ctrl);
+	else
+		return -1;
+}
+
+static struct usb_function *acc_alloc(struct usb_function_instance *fi)
+{
+	struct acc_dev *dev = get_acc_dev();
+
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.fs_descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.ss_descriptors = ss_acc_descs;
+	dev->function.ssp_descriptors = ssp_acc_descs;
+	dev->function.bind = acc_function_bind_configfs;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+	dev->function.free_func = acc_free;
+	dev->function.setup = acc_ctrlrequest_configfs;
+
+	return &dev->function;
+}
+DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
new file mode 100644
index 0000000..c768a52
--- /dev/null
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -0,0 +1,1071 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <linux/pm_qos.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE	0
+#define AUDIO_AS_INTERFACE	1
+#define AUDIO_NUM_INTERFACES	2
+#define MAX_INST_NAME_LEN     40
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+	+ UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		AUDIO_AC_INTERFACE,
+		[1] =		AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_TERMINAL_STREAMING,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&fs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+	.info =			SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_BATCH |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.rate_min		= SAMPLE_RATE,
+	.rate_max		= SAMPLE_RATE,
+
+	.buffer_bytes_max =	1024 * 1024,
+	.period_bytes_min =	64,
+	.period_bytes_max =	512 * 1024,
+	.periods_min =		2,
+	.periods_max =		1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+	int	card;
+	int	device;
+};
+
+struct audio_dev {
+	struct usb_function		func;
+	struct snd_card			*card;
+	struct snd_pcm			*pcm;
+	struct snd_pcm_substream *substream;
+
+	struct list_head		idle_reqs;
+	struct usb_ep			*in_ep;
+
+	spinlock_t			lock;
+
+	/* beginning, end and current position in our buffer */
+	void				*buffer_start;
+	void				*buffer_end;
+	void				*buffer_pos;
+
+	/* byte size of a "period" */
+	unsigned int			period;
+	/* bytes sent since last call to snd_pcm_period_elapsed */
+	unsigned int			period_offset;
+	/* time we started playing */
+	ktime_t				start_time;
+	/* number of frames sent since start_time */
+	s64				frames_sent;
+	struct audio_source_config	*config;
+	/* for creating and issuing QoS requests */
+	struct pm_qos_request pm_qos;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+	struct audio_source_config *config;
+	struct device *audio_device;
+};
+
+static void audio_source_attr_release(struct config_item *item);
+
+static struct configfs_item_operations audio_source_item_ops = {
+	.release        = audio_source_attr_release,
+};
+
+static struct config_item_type audio_source_func_type = {
+	.ct_item_ops    = &audio_source_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+	&dev_attr_pcm,
+	NULL
+};
+
+/*--------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+	req->length = buffer_size;
+	return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	list_add_tail(&req->list, &audio->idle_reqs);
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	if (list_empty(&audio->idle_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&audio->idle_reqs, struct usb_request,
+				list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&audio->lock, flags);
+	return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+	struct snd_pcm_runtime *runtime;
+	struct usb_request *req;
+	int length, length1, length2, ret;
+	s64 msecs;
+	s64 frames;
+	ktime_t now;
+
+	/* audio->substream will be null if we have been closed */
+	if (!audio->substream)
+		return;
+	/* audio->buffer_pos will be null if we have been stopped */
+	if (!audio->buffer_pos)
+		return;
+
+	runtime = audio->substream->runtime;
+
+	/* compute number of frames to send */
+	now = ktime_get();
+	msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)),
+			1000000);
+	frames = div_s64((msecs * SAMPLE_RATE), 1000);
+
+	/* Readjust our frames_sent if we fall too far behind.
+	 * If we get too far behind it is better to drop some frames than
+	 * to keep sending data too fast in an attempt to catch up.
+	 */
+	if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+		audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+	frames -= audio->frames_sent;
+
+	/* We need to send something to keep the pipeline going */
+	if (frames <= 0)
+		frames = FRAMES_PER_MSEC;
+
+	while (frames > 0) {
+		req = audio_req_get(audio);
+		if (!req)
+			break;
+
+		length = frames_to_bytes(runtime, frames);
+		if (length > IN_EP_MAX_PACKET_SIZE)
+			length = IN_EP_MAX_PACKET_SIZE;
+
+		if (audio->buffer_pos + length > audio->buffer_end)
+			length1 = audio->buffer_end - audio->buffer_pos;
+		else
+			length1 = length;
+		memcpy(req->buf, audio->buffer_pos, length1);
+		if (length1 < length) {
+			/* Wrap around and copy remaining length
+			 * at beginning of buffer.
+			 */
+			length2 = length - length1;
+			memcpy(req->buf + length1, audio->buffer_start,
+					length2);
+			audio->buffer_pos = audio->buffer_start + length2;
+		} else {
+			audio->buffer_pos += length1;
+			if (audio->buffer_pos >= audio->buffer_end)
+				audio->buffer_pos = audio->buffer_start;
+		}
+
+		req->length = length;
+		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("usb_ep_queue failed ret: %d\n", ret);
+			audio_req_put(audio, req);
+			break;
+		}
+
+		frames -= bytes_to_frames(runtime, length);
+		audio->frames_sent += bytes_to_frames(runtime, length);
+	}
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct audio_dev *audio = req->context;
+
+	pr_debug("audio_data_complete req->status %d req->actual %d\n",
+		req->status, req->actual);
+
+	audio_req_put(audio, req);
+
+	if (!audio->buffer_start || req->status)
+		return;
+
+	audio->period_offset += req->actual;
+	if (audio->period_offset >= audio->period) {
+		snd_pcm_period_elapsed(audio->substream);
+		audio->period_offset = 0;
+	}
+	audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+	u16 ep = le16_to_cpu(ctrl->wIndex);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+	case UAC_SET_MIN:
+	case UAC_SET_MAX:
+	case UAC_SET_RES:
+		value = len;
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u8 *buf = cdev->req->buf;
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+		switch (ctrl->bRequest) {
+		case UAC_GET_CUR:
+		case UAC_GET_MIN:
+		case UAC_GET_MAX:
+		case UAC_GET_RES:
+			/* return our sample rate */
+			buf[0] = (u8)SAMPLE_RATE;
+			buf[1] = (u8)(SAMPLE_RATE >> 8);
+			buf[2] = (u8)(SAMPLE_RATE >> 16);
+			value = 3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		req->complete = audio_control_complete;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+	if (ret)
+		return ret;
+
+	usb_ep_enable(audio->in_ep);
+	return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+	struct audio_dev	*audio = func_to_audio(f);
+
+	pr_debug("audio_disable\n");
+	usb_ep_disable(audio->in_ep);
+}
+
+static void audio_free_func(struct usb_function *f)
+{
+	/* no-op */
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = 2;
+	as_type_i_desc.bNrChannels = 2;
+
+	/* Set sample rates */
+	rate = SAMPLE_RATE;
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+}
+
+
+static int snd_card_setup(struct usb_configuration *c,
+	struct audio_source_config *config);
+static struct audio_source_instance *to_fi_audio_source(
+	const struct usb_function_instance *fi);
+
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct audio_dev *audio = func_to_audio(f);
+	int status;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	int i;
+	int err;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		err = snd_card_setup(c, config);
+		if (err)
+			return err;
+	}
+
+	audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AC_INTERFACE */
+	ac_header_desc.baInterfaceNr[0] = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AS_INTERFACE */
+	ac_header_desc.baInterfaceNr[1] = status;
+
+	status = -ENODEV;
+
+	/* allocate our endpoint */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	ep->driver_data = audio; /* claim */
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		hs_as_in_ep_desc.bEndpointAddress =
+			fs_as_in_ep_desc.bEndpointAddress;
+
+	f->fs_descriptors = fs_audio_desc;
+	f->hs_descriptors = hs_audio_desc;
+
+	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+		if (req) {
+			req->context = audio;
+			req->complete = audio_data_complete;
+			audio_req_put(audio, req);
+		} else
+			status = -ENOMEM;
+	}
+
+fail:
+	return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_request *req;
+
+	while ((req = audio_req_get(audio)))
+		audio_request_free(req, audio->in_ep);
+
+	snd_card_free_when_closed(audio->card);
+	audio->card = NULL;
+	audio->pcm = NULL;
+	audio->substream = NULL;
+	audio->in_ep = NULL;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		config->card = -1;
+		config->device = -1;
+	}
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+	audio->start_time = ktime_get();
+	audio->frames_sent = 0;
+	audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->buffer_start = 0;
+	audio->buffer_end = 0;
+	audio->buffer_pos = 0;
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = substream->private_data;
+
+	runtime->private_data = audio;
+	runtime->hw = audio_hw_info;
+	snd_pcm_limit_hw_rates(runtime);
+	runtime->hw.channels_max = 2;
+
+	audio->substream = substream;
+
+	/* Add the QoS request and set the latency to 0 */
+	cpu_latency_qos_add_request(&audio->pm_qos, 0);
+
+	return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct audio_dev *audio = substream->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+
+	/* Remove the QoS request */
+	cpu_latency_qos_remove_request(&audio->pm_qos);
+
+	audio->substream = NULL;
+	spin_unlock_irqrestore(&audio->lock, flags);
+
+	return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	unsigned int channels = params_channels(params);
+	unsigned int rate = params_rate(params);
+
+	if (rate != SAMPLE_RATE)
+		return -EINVAL;
+	if (channels != 2)
+		return -EINVAL;
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+		params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+
+	audio->period = snd_pcm_lib_period_bytes(substream);
+	audio->period_offset = 0;
+	audio->buffer_start = runtime->dma_area;
+	audio->buffer_end = audio->buffer_start
+		+ snd_pcm_lib_buffer_bytes(substream);
+	audio->buffer_pos = audio->buffer_start;
+
+	return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+	ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+	/* return offset of next frame to fill in our buffer */
+	return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct audio_dev *audio = substream->runtime->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		audio_pcm_playback_start(audio);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		audio_pcm_playback_stop(audio);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct audio_dev _audio_dev = {
+	.func = {
+		.name = "audio_source",
+		.bind = audio_bind,
+		.unbind = audio_unbind,
+		.set_alt = audio_set_alt,
+		.setup = audio_setup,
+		.disable = audio_disable,
+		.free_func = audio_free_func,
+	},
+	.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+	.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+	.open		= audio_pcm_open,
+	.close		= audio_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= audio_pcm_hw_params,
+	.hw_free	= audio_pcm_hw_free,
+	.prepare	= audio_pcm_prepare,
+	.trigger	= audio_pcm_playback_trigger,
+	.pointer	= audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	int err;
+
+	config->card = -1;
+	config->device = -1;
+
+	audio = &_audio_dev;
+
+	err = snd_card_setup(c, config);
+	if (err)
+		return err;
+
+	err = usb_add_function(c, &audio->func);
+	if (err)
+		goto add_fail;
+
+	return 0;
+
+add_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static int snd_card_setup(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	audio = &_audio_dev;
+
+	err = snd_card_new(&c->cdev->gadget->dev,
+			SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			THIS_MODULE, 0, &card);
+	if (err)
+		return err;
+
+	err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+	if (err)
+		goto pcm_fail;
+
+	pcm->private_data = audio;
+	pcm->info_flags = 0;
+	audio->pcm = pcm;
+
+	strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				NULL, 0, 64 * 1024);
+
+	strlcpy(card->driver, "audio_source", sizeof(card->driver));
+	strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+	strlcpy(card->longname, "USB accessory audio source",
+		sizeof(card->longname));
+
+	err = snd_card_register(card);
+	if (err)
+		goto register_fail;
+
+	config->card = pcm->card->number;
+	config->device = pcm->device;
+	audio->card = card;
+	return 0;
+
+register_fail:
+pcm_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static struct audio_source_instance *to_audio_source_instance(
+					struct config_item *item)
+{
+	return container_of(to_config_group(item), struct audio_source_instance,
+		func_inst.group);
+}
+
+static struct audio_source_instance *to_fi_audio_source(
+					const struct usb_function_instance *fi)
+{
+	return container_of(fi, struct audio_source_instance, func_inst);
+}
+
+static void audio_source_attr_release(struct config_item *item)
+{
+	struct audio_source_instance *fi_audio = to_audio_source_instance(item);
+
+	usb_put_function_instance(&fi_audio->func_inst);
+}
+
+static int audio_source_set_inst_name(struct usb_function_instance *fi,
+					const char *name)
+{
+	struct audio_source_instance *fi_audio;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_audio = to_fi_audio_source(fi);
+	fi_audio->name = ptr;
+
+	return 0;
+}
+
+static void audio_source_free_inst(struct usb_function_instance *fi)
+{
+	struct audio_source_instance *fi_audio;
+
+	fi_audio = to_fi_audio_source(fi);
+	device_destroy(fi_audio->audio_device->class,
+			fi_audio->audio_device->devt);
+	kfree(fi_audio->name);
+	kfree(fi_audio->config);
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct audio_source_instance *fi_audio = dev_get_drvdata(dev);
+	struct audio_source_config *config = fi_audio->config;
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+struct device *create_function_device(char *name);
+
+static struct usb_function_instance *audio_source_alloc_inst(void)
+{
+	struct audio_source_instance *fi_audio;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	struct device *dev;
+	void *err_ptr;
+	int err = 0;
+
+	fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
+	if (!fi_audio)
+		return ERR_PTR(-ENOMEM);
+
+	fi_audio->func_inst.set_inst_name = audio_source_set_inst_name;
+	fi_audio->func_inst.free_func_inst = audio_source_free_inst;
+
+	fi_audio->config = kzalloc(sizeof(struct audio_source_config),
+							GFP_KERNEL);
+	if (!fi_audio->config) {
+		err_ptr = ERR_PTR(-ENOMEM);
+		goto fail_audio;
+	}
+
+	config_group_init_type_name(&fi_audio->func_inst.group, "",
+						&audio_source_func_type);
+	dev = create_function_device("f_audio_source");
+
+	if (IS_ERR(dev)) {
+		err_ptr = dev;
+		goto fail_audio_config;
+	}
+
+	fi_audio->config->card = -1;
+	fi_audio->config->device = -1;
+	fi_audio->audio_device = dev;
+
+	attrs = audio_source_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			err_ptr = ERR_PTR(-EINVAL);
+			goto fail_device;
+		}
+	}
+
+	dev_set_drvdata(dev, fi_audio);
+	_audio_dev.config = fi_audio->config;
+
+	return  &fi_audio->func_inst;
+
+fail_device:
+	device_destroy(dev->class, dev->devt);
+fail_audio_config:
+	kfree(fi_audio->config);
+fail_audio:
+	kfree(fi_audio);
+	return err_ptr;
+
+}
+
+static struct usb_function *audio_source_alloc(struct usb_function_instance *fi)
+{
+	return &_audio_dev.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst,
+			audio_source_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 71a1a26..0e78da6 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1224,6 +1224,65 @@
 	}
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+extern struct device *create_function_device(char *name);
+static ssize_t alsa_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_function_instance *fi_midi = dev_get_drvdata(dev);
+	struct f_midi *midi;
+
+	if (!fi_midi->f)
+		dev_warn(dev, "f_midi: function not set\n");
+
+	if (fi_midi && fi_midi->f) {
+		midi = func_to_midi(fi_midi->f);
+		if (midi->rmidi && midi->card && midi->rmidi->card)
+			return sprintf(buf, "%d %d\n",
+			midi->rmidi->card->number, midi->rmidi->device);
+	}
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", -1, -1);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL);
+
+static struct device_attribute *alsa_function_attributes[] = {
+	&dev_attr_alsa,
+	NULL
+};
+
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	struct device *dev;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err = 0;
+
+	dev = create_function_device("f_midi");
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	attrs = alsa_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			device_destroy(dev->class, dev->devt);
+			return -EINVAL;
+		}
+	}
+	dev_set_drvdata(dev, fi);
+	return 0;
+}
+#else
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	return 0;
+}
+#endif
+
 static struct usb_function_instance *f_midi_alloc_inst(void)
 {
 	struct f_midi_opts *opts;
@@ -1242,6 +1301,11 @@
 	opts->out_ports = 1;
 	opts->refcnt = 1;
 
+	if (create_alsa_device(&opts->func_inst)) {
+		kfree(opts);
+		return ERR_PTR(-ENODEV);
+	}
+
 	config_group_init_type_name(&opts->func_inst.group, "",
 				    &midi_func_type);
 
@@ -1262,6 +1326,7 @@
 		kfifo_free(&midi->in_req_fifo);
 		kfree(midi);
 		free = true;
+		opts->func_inst.f = NULL;
 	}
 	mutex_unlock(&opts->lock);
 
@@ -1349,6 +1414,7 @@
 	midi->func.disable	= f_midi_disable;
 	midi->func.free_func	= f_midi_free;
 
+	fi->f = &midi->func;
 	return &midi->func;
 
 midi_free:
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 34f80b7..0001024 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -153,7 +153,7 @@
 	 If unsure, say 'N'.
 
 config VIRTIO_DMA_SHARED_BUFFER
-	tristate
+	tristate "Virtio DMA shared buffer support"
 	depends on DMA_SHARED_BUFFER
 	help
 	 This option adds a flavor of dma buffers that are backed by
diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c
index 2521a750..19a843f 100644
--- a/drivers/virtio/virtio_dma_buf.c
+++ b/drivers/virtio/virtio_dma_buf.c
@@ -25,11 +25,14 @@
 			     const struct virtio_dma_buf_ops, ops);
 
 	if (!exp_info->ops ||
-	    exp_info->ops->attach != &virtio_dma_buf_attach ||
 	    !virtio_ops->get_uuid) {
 		return ERR_PTR(-EINVAL);
 	}
 
+	if (!(IS_ENABLED(CONFIG_CFI_CLANG) && IS_ENABLED(CONFIG_MODULES)) &&
+	    exp_info->ops->attach != &virtio_dma_buf_attach)
+		return ERR_PTR(-EINVAL);
+
 	return dma_buf_export(exp_info);
 }
 EXPORT_SYMBOL(virtio_dma_buf_export);
@@ -60,6 +63,9 @@
  */
 bool is_virtio_dma_buf(struct dma_buf *dma_buf)
 {
+	if (IS_ENABLED(CONFIG_CFI_CLANG) && IS_ENABLED(CONFIG_MODULES))
+		return true;
+
 	return dma_buf->ops->attach == &virtio_dma_buf_attach;
 }
 EXPORT_SYMBOL(is_virtio_dma_buf);
diff --git a/fs/Kconfig b/fs/Kconfig
index a6313a9..fd3cd36 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -127,6 +127,7 @@
 source "fs/autofs/Kconfig"
 source "fs/fuse/Kconfig"
 source "fs/overlayfs/Kconfig"
+source "fs/incfs/Kconfig"
 
 menu "Caches"
 
diff --git a/fs/Makefile b/fs/Makefile
index 84c5e4c..ceb0782 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -115,6 +115,7 @@
 obj-$(CONFIG_FUSE_FS)		+= fuse/
 obj-$(CONFIG_OVERLAY_FS)	+= overlayfs/
 obj-$(CONFIG_ORANGEFS_FS)       += orangefs/
+obj-$(CONFIG_INCREMENTAL_FS)	+= incfs/
 obj-$(CONFIG_UDF_FS)		+= udf/
 obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
 obj-$(CONFIG_OMFS_FS)		+= omfs/
diff --git a/fs/OWNERS b/fs/OWNERS
new file mode 100644
index 0000000..7780f6b
--- /dev/null
+++ b/fs/OWNERS
@@ -0,0 +1 @@
+per-file {crypto,verity}/**=ebiggers@google.com
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 4ef3f71..4fcca79f 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -69,6 +69,14 @@
 }
 EXPORT_SYMBOL(fscrypt_free_bounce_page);
 
+/*
+ * Generate the IV for the given logical block number within the given file.
+ * For filenames encryption, lblk_num == 0.
+ *
+ * Keep this in sync with fscrypt_limit_io_blocks().  fscrypt_limit_io_blocks()
+ * needs to know about any IV generation methods where the low bits of IV don't
+ * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
+ */
 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
 			 const struct fscrypt_info *ci)
 {
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 5b0a9e6..664568e 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -27,6 +27,27 @@
  */
 #define FSCRYPT_MIN_KEY_SIZE	16
 
+/* Maximum size of a standard fscrypt master key */
+#define FSCRYPT_MAX_STANDARD_KEY_SIZE	64
+
+/* Maximum size of a hardware-wrapped fscrypt master key */
+#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE	BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE
+
+/*
+ * Maximum size of an fscrypt master key across both key types.
+ * This should just use max(), but max() doesn't work in a struct definition.
+ */
+#define FSCRYPT_MAX_ANY_KEY_SIZE \
+	(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE > FSCRYPT_MAX_STANDARD_KEY_SIZE ? \
+	 FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE : FSCRYPT_MAX_STANDARD_KEY_SIZE)
+
+/*
+ * FSCRYPT_MAX_KEY_SIZE is defined in the UAPI header, but the addition of
+ * hardware-wrapped keys has made it misleading as it's only for standard keys.
+ * Don't use it in kernel code; use one of the above constants instead.
+ */
+#undef FSCRYPT_MAX_KEY_SIZE
+
 #define FSCRYPT_CONTEXT_V1	1
 #define FSCRYPT_CONTEXT_V2	2
 
@@ -335,7 +356,8 @@
 
 /* inline_crypt.c */
 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+				   bool is_hw_wrapped_key);
 
 static inline bool
 fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
@@ -345,10 +367,16 @@
 
 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
 				     const u8 *raw_key,
+				     unsigned int raw_key_size,
+				     bool is_hw_wrapped,
 				     const struct fscrypt_info *ci);
 
 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key);
 
+int fscrypt_derive_sw_secret(struct super_block *sb, const u8 *wrapped_key,
+			     unsigned int wrapped_key_size,
+			     u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
 /*
  * Check whether the crypto transform or blk-crypto key has been allocated in
  * @prep_key, depending on which encryption implementation the file will use.
@@ -359,7 +387,7 @@
 {
 	/*
 	 * The two smp_load_acquire()'s here pair with the smp_store_release()'s
-	 * in fscrypt_prepare_inline_crypt_key() and fscrypt_prepare_key().
+	 * in fscrypt_prepare_inline_crypt_key() and __fscrypt_prepare_key().
 	 * I.e., in some cases (namely, if this prep_key is a per-mode
 	 * encryption key) another task can publish blk_key or tfm concurrently,
 	 * executing a RELEASE barrier.  We need to use smp_load_acquire() here
@@ -372,7 +400,8 @@
 
 #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
-static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+						 bool is_hw_wrapped_key)
 {
 	return 0;
 }
@@ -385,7 +414,8 @@
 
 static inline int
 fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
-				 const u8 *raw_key,
+				 const u8 *raw_key, unsigned int raw_key_size,
+				 bool is_hw_wrapped,
 				 const struct fscrypt_info *ci)
 {
 	WARN_ON(1);
@@ -397,6 +427,15 @@
 {
 }
 
+static inline int
+fscrypt_derive_sw_secret(struct super_block *sb, const u8 *wrapped_key,
+			 unsigned int wrapped_key_size,
+			 u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+	fscrypt_warn(NULL, "kernel doesn't support hardware-wrapped keys");
+	return -EOPNOTSUPP;
+}
+
 static inline bool
 fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
 			const struct fscrypt_info *ci)
@@ -413,20 +452,38 @@
 struct fscrypt_master_key_secret {
 
 	/*
-	 * For v2 policy keys: HKDF context keyed by this master key.
-	 * For v1 policy keys: not set (hkdf.hmac_tfm == NULL).
+	 * The KDF with which subkeys of this key can be derived.
+	 *
+	 * For v1 policy keys, this isn't applicable and won't be set.
+	 * Otherwise, this KDF will be keyed by this master key if
+	 * ->is_hw_wrapped=false, or by the "software secret" that hardware
+	 * derived from this master key if ->is_hw_wrapped=true.
 	 */
 	struct fscrypt_hkdf	hkdf;
 
 	/*
+	 * True if this key is a hardware-wrapped key; false if this key is a
+	 * standard key (i.e. a "software key").  For v1 policy keys this will
+	 * always be false, as v1 policy support is a legacy feature which
+	 * doesn't support newer functionality such as hardware-wrapped keys.
+	 */
+	bool			is_hw_wrapped;
+
+	/*
 	 * Size of the raw key in bytes.  This remains set even if ->raw was
 	 * zeroized due to no longer being needed.  I.e. we still remember the
 	 * size of the key even if we don't need to remember the key itself.
 	 */
 	u32			size;
 
-	/* For v1 policy keys: the raw key.  Wiped for v2 policy keys. */
-	u8			raw[FSCRYPT_MAX_KEY_SIZE];
+	/*
+	 * The raw key which userspace provided, when still needed.  This can be
+	 * either a standard key or a hardware-wrapped key, as indicated by
+	 * ->is_hw_wrapped.  In the case of a standard, v2 policy key, there is
+	 * no need to remember the raw key separately from ->hkdf so this field
+	 * will be zeroized as soon as ->hkdf is initialized.
+	 */
+	u8			raw[FSCRYPT_MAX_ANY_KEY_SIZE];
 
 } __randomize_layout;
 
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
index 7607d18b..41e7c9b 100644
--- a/fs/crypto/hkdf.c
+++ b/fs/crypto/hkdf.c
@@ -4,7 +4,9 @@
  * Function"), aka RFC 5869.  See also the original paper (Krawczyk 2010):
  * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
  *
- * This is used to derive keys from the fscrypt master keys.
+ * This is used to derive keys from the fscrypt master keys (or from the
+ * "software secrets" which hardware derives from the fscrypt master keys, in
+ * the case that the fscrypt master keys are hardware-wrapped keys).
  *
  * Copyright 2019 Google LLC
  */
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index c57bebf..a6a47a7 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -13,10 +13,12 @@
  */
 
 #include <linux/blk-crypto.h>
+#include <linux/blk-crypto-profile.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
+#include <linux/uio.h>
 
 #include "fscrypt_private.h"
 
@@ -64,7 +66,8 @@
 }
 
 /* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+				   bool is_hw_wrapped_key)
 {
 	const struct inode *inode = ci->ci_inode;
 	struct super_block *sb = inode->i_sb;
@@ -105,6 +108,9 @@
 	crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
 	crypto_cfg.data_unit_size = sb->s_blocksize;
 	crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
+	crypto_cfg.key_type =
+		is_hw_wrapped_key ? BLK_CRYPTO_KEY_TYPE_HW_WRAPPED :
+		BLK_CRYPTO_KEY_TYPE_STANDARD;
 	num_devs = fscrypt_get_num_devices(sb);
 	devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
 	if (!devs)
@@ -125,11 +131,15 @@
 
 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
 				     const u8 *raw_key,
+				     unsigned int raw_key_size,
+				     bool is_hw_wrapped,
 				     const struct fscrypt_info *ci)
 {
 	const struct inode *inode = ci->ci_inode;
 	struct super_block *sb = inode->i_sb;
 	enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
+	enum blk_crypto_key_type key_type = is_hw_wrapped ?
+		BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_STANDARD;
 	int num_devs = fscrypt_get_num_devices(sb);
 	int queue_refs = 0;
 	struct fscrypt_blk_crypto_key *blk_key;
@@ -143,7 +153,8 @@
 	blk_key->num_devs = num_devs;
 	fscrypt_get_devices(sb, num_devs, blk_key->devs);
 
-	err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
+	err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
+				  key_type, crypto_mode,
 				  fscrypt_get_dun_bytes(ci), sb->s_blocksize);
 	if (err) {
 		fscrypt_err(inode, "error %d initializing blk-crypto key", err);
@@ -203,6 +214,55 @@
 	}
 }
 
+/*
+ * Ask the inline encryption hardware to derive the software secret from a
+ * hardware-wrapped key.  Returns -EOPNOTSUPP if hardware-wrapped keys aren't
+ * supported on this filesystem or hardware.
+ */
+int fscrypt_derive_sw_secret(struct super_block *sb, const u8 *wrapped_key,
+			     unsigned int wrapped_key_size,
+			     u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+	struct blk_crypto_profile *profile;
+	int num_devs;
+
+	/* The filesystem must be mounted with -o inlinecrypt */
+	if (!(sb->s_flags & SB_INLINECRYPT))
+		return -EOPNOTSUPP;
+
+	/*
+	 * Hardware-wrapped keys might be specific to a particular storage
+	 * device, so for now we don't allow them to be used if the filesystem
+	 * uses block devices with different crypto profiles.  This way, there
+	 * is no ambiguity about which ->derive_sw_secret method to call.
+	 */
+	profile = bdev_get_queue(sb->s_bdev)->crypto_profile;
+	num_devs = fscrypt_get_num_devices(sb);
+	if (num_devs > 1) {
+		struct request_queue **devs =
+			kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
+		int i;
+
+		if (!devs)
+			return -ENOMEM;
+
+		fscrypt_get_devices(sb, num_devs, devs);
+
+		for (i = 0; i < num_devs; i++) {
+			if (devs[i]->crypto_profile != profile) {
+				fscrypt_warn(NULL,
+					     "unsupported multi-device configuration for hardware-wrapped keys");
+				kfree(devs);
+				return -EOPNOTSUPP;
+			}
+		}
+		kfree(devs);
+	}
+
+	return blk_crypto_derive_sw_secret(profile, wrapped_key,
+					   wrapped_key_size, sw_secret);
+}
+
 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
 {
 	return inode->i_crypt_info->ci_inlinecrypt;
@@ -238,6 +298,8 @@
  * otherwise fscrypt_mergeable_bio() won't work as intended.
  *
  * The encryption context will be freed automatically when the bio is freed.
+ *
+ * This function also handles setting bi_skip_dm_default_key when needed.
  */
 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
 			       u64 first_lblk, gfp_t gfp_mask)
@@ -245,6 +307,9 @@
 	const struct fscrypt_info *ci;
 	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 
+	if (fscrypt_inode_should_skip_dm_default_key(inode))
+		bio_set_skip_dm_default_key(bio);
+
 	if (!fscrypt_inode_uses_inline_crypto(inode))
 		return;
 	ci = inode->i_crypt_info;
@@ -315,6 +380,9 @@
  *
  * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
  *
+ * This function also returns false if the next part of the I/O would need to
+ * have a different value for the bi_skip_dm_default_key flag.
+ *
  * Return: true iff the I/O is mergeable
  */
 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -325,6 +393,9 @@
 
 	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
 		return false;
+	if (bio_should_skip_dm_default_key(bio) !=
+	    fscrypt_inode_should_skip_dm_default_key(inode))
+		return false;
 	if (!bc)
 		return true;
 
@@ -358,8 +429,83 @@
 	u64 next_lblk;
 
 	if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
-		return !bio->bi_crypt_context;
+		return !bio->bi_crypt_context &&
+		       !bio_should_skip_dm_default_key(bio);
 
 	return fscrypt_mergeable_bio(bio, inode, next_lblk);
 }
 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
+
+/**
+ * fscrypt_dio_supported() - check whether a direct I/O request is unsupported
+ *			     due to encryption constraints
+ * @iocb: the file and position the I/O is targeting
+ * @iter: the I/O data segment(s)
+ *
+ * Return: true if direct I/O is supported
+ */
+bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
+{
+	const struct inode *inode = file_inode(iocb->ki_filp);
+	const unsigned int blocksize = i_blocksize(inode);
+
+	/* If the file is unencrypted, no veto from us. */
+	if (!fscrypt_needs_contents_encryption(inode))
+		return true;
+
+	/* We only support direct I/O with inline crypto, not fs-layer crypto */
+	if (!fscrypt_inode_uses_inline_crypto(inode))
+		return false;
+
+	/*
+	 * Since the granularity of encryption is filesystem blocks, the I/O
+	 * must be block aligned -- not just disk sector aligned.
+	 */
+	if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
+
+/**
+ * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
+ * @inode: the file on which I/O is being done
+ * @lblk: the block at which the I/O is being started from
+ * @nr_blocks: the number of blocks we want to submit starting at @pos
+ *
+ * Determine the limit to the number of blocks that can be submitted in the bio
+ * targeting @pos without causing a data unit number (DUN) discontinuity.
+ *
+ * This is normally just @nr_blocks, as normally the DUNs just increment along
+ * with the logical blocks.  (Or the file is not encrypted.)
+ *
+ * In rare cases, fscrypt can be using an IV generation method that allows the
+ * DUN to wrap around within logically continuous blocks, and that wraparound
+ * will occur.  If this happens, a value less than @nr_blocks will be returned
+ * so that the wraparound doesn't occur in the middle of the bio.
+ *
+ * Return: the actual number of blocks that can be submitted
+ */
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
+{
+	const struct fscrypt_info *ci = inode->i_crypt_info;
+	u32 dun;
+
+	if (!fscrypt_inode_uses_inline_crypto(inode))
+		return nr_blocks;
+
+	if (nr_blocks <= 1)
+		return nr_blocks;
+
+	if (!(fscrypt_policy_flags(&ci->ci_policy) &
+	      FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
+		return nr_blocks;
+
+	/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
+
+	dun = ci->ci_hashed_ino + lblk;
+
+	return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
+}
+EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 0b3ffbb..1ae62b5 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -103,11 +103,11 @@
 					struct key_preparsed_payload *prep)
 {
 	/*
-	 * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for
-	 * each key, regardless of the exact key size.  The amount of memory
-	 * actually used is greater than the size of the raw key anyway.
+	 * We just charge FSCRYPT_MAX_STANDARD_KEY_SIZE bytes to the user's key
+	 * quota for each key, regardless of the exact key size.  The amount of
+	 * memory actually used is greater than the size of the raw key anyway.
 	 */
-	return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE);
+	return key_payload_reserve(key, FSCRYPT_MAX_STANDARD_KEY_SIZE);
 }
 
 static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m)
@@ -479,20 +479,36 @@
 	int err;
 
 	if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
-		err = fscrypt_init_hkdf(&secret->hkdf, secret->raw,
-					secret->size);
+		u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE];
+		u8 *kdf_key = secret->raw;
+		unsigned int kdf_key_size = secret->size;
+		u8 keyid_kdf_ctx = HKDF_CONTEXT_KEY_IDENTIFIER;
+
+		/*
+		 * For standard keys, the fscrypt master key is used directly as
+		 * the fscrypt KDF key.  For hardware-wrapped keys, we have to
+		 * pass the master key to the hardware to derive the KDF key,
+		 * which is then only used to derive non-file-contents subkeys.
+		 */
+		if (secret->is_hw_wrapped) {
+			err = fscrypt_derive_sw_secret(sb, secret->raw,
+						       secret->size, sw_secret);
+			if (err)
+				return err;
+			kdf_key = sw_secret;
+			kdf_key_size = sizeof(sw_secret);
+		}
+		err = fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size);
+		/*
+		 * Now that the KDF context is initialized, the raw KDF key is
+		 * no longer needed.
+		 */
+		memzero_explicit(kdf_key, kdf_key_size);
 		if (err)
 			return err;
 
-		/*
-		 * Now that the HKDF context is initialized, the raw key is no
-		 * longer needed.
-		 */
-		memzero_explicit(secret->raw, secret->size);
-
 		/* Calculate the key identifier */
-		err = fscrypt_hkdf_expand(&secret->hkdf,
-					  HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0,
+		err = fscrypt_hkdf_expand(&secret->hkdf, keyid_kdf_ctx, NULL, 0,
 					  key_spec->u.identifier,
 					  FSCRYPT_KEY_IDENTIFIER_SIZE);
 		if (err)
@@ -506,7 +522,7 @@
 	const struct fscrypt_provisioning_key_payload *payload = prep->data;
 
 	if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
-	    prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
+	    prep->datalen > sizeof(*payload) + FSCRYPT_MAX_ANY_KEY_SIZE)
 		return -EINVAL;
 
 	if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
@@ -655,15 +671,30 @@
 		return -EACCES;
 
 	memset(&secret, 0, sizeof(secret));
+
+	if (arg.__flags) {
+		if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+			return -EINVAL;
+		if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
+			return -EINVAL;
+		secret.is_hw_wrapped = true;
+	}
+
 	if (arg.key_id) {
 		if (arg.raw_size != 0)
 			return -EINVAL;
 		err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
 		if (err)
 			goto out_wipe_secret;
+		err = -EINVAL;
+		if (secret.size > FSCRYPT_MAX_STANDARD_KEY_SIZE &&
+		    !secret.is_hw_wrapped)
+			goto out_wipe_secret;
 	} else {
 		if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
-		    arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+		    arg.raw_size > (secret.is_hw_wrapped ?
+				    FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE :
+				    FSCRYPT_MAX_STANDARD_KEY_SIZE))
 			return -EINVAL;
 		secret.size = arg.raw_size;
 		err = -EFAULT;
@@ -696,15 +727,15 @@
 int fscrypt_add_test_dummy_key(struct super_block *sb,
 			       struct fscrypt_key_specifier *key_spec)
 {
-	static u8 test_key[FSCRYPT_MAX_KEY_SIZE];
+	static u8 test_key[FSCRYPT_MAX_STANDARD_KEY_SIZE];
 	struct fscrypt_master_key_secret secret;
 	int err;
 
-	get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE);
+	get_random_once(test_key, FSCRYPT_MAX_STANDARD_KEY_SIZE);
 
 	memset(&secret, 0, sizeof(secret));
-	secret.size = FSCRYPT_MAX_KEY_SIZE;
-	memcpy(secret.raw, test_key, FSCRYPT_MAX_KEY_SIZE);
+	secret.size = FSCRYPT_MAX_STANDARD_KEY_SIZE;
+	memcpy(secret.raw, test_key, FSCRYPT_MAX_STANDARD_KEY_SIZE);
 
 	err = add_master_key(sb, &secret, key_spec);
 	wipe_master_key_secret(&secret);
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index eede186..e668c31 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -124,15 +124,23 @@
  * Prepare the crypto transform object or blk-crypto key in @prep_key, given the
  * raw key, encryption mode (@ci->ci_mode), flag indicating which encryption
  * implementation (fs-layer or blk-crypto) will be used (@ci->ci_inlinecrypt),
- * and IV generation method (@ci->ci_policy.flags).
+ * and IV generation method (@ci->ci_policy.flags).  The raw key can be either a
+ * standard key or a hardware-wrapped key, as indicated by @is_hw_wrapped; it
+ * can only be a hardware-wrapped key if blk-crypto will be used.
  */
-int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
-			const u8 *raw_key, const struct fscrypt_info *ci)
+static int __fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
+				 const u8 *raw_key, unsigned int raw_key_size,
+				 bool is_hw_wrapped,
+				 const struct fscrypt_info *ci)
 {
 	struct crypto_skcipher *tfm;
 
 	if (fscrypt_using_inline_encryption(ci))
-		return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
+		return fscrypt_prepare_inline_crypt_key(prep_key,
+				raw_key, raw_key_size, is_hw_wrapped, ci);
+
+	if (WARN_ON(is_hw_wrapped || raw_key_size != ci->ci_mode->keysize))
+		return -EINVAL;
 
 	tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
 	if (IS_ERR(tfm))
@@ -147,6 +155,13 @@
 	return 0;
 }
 
+int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
+			const u8 *raw_key, const struct fscrypt_info *ci)
+{
+	return __fscrypt_prepare_key(prep_key, raw_key, ci->ci_mode->keysize,
+				     false, ci);
+}
+
 /* Destroy a crypto transform object and/or blk-crypto key. */
 void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key)
 {
@@ -171,14 +186,29 @@
 	struct fscrypt_mode *mode = ci->ci_mode;
 	const u8 mode_num = mode - fscrypt_modes;
 	struct fscrypt_prepared_key *prep_key;
-	u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+	u8 mode_key[FSCRYPT_MAX_STANDARD_KEY_SIZE];
 	u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
 	unsigned int hkdf_infolen = 0;
+	bool use_hw_wrapped_key = false;
 	int err;
 
 	if (WARN_ON(mode_num > FSCRYPT_MODE_MAX))
 		return -EINVAL;
 
+	if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) {
+		/* Using a hardware-wrapped key for file contents encryption */
+		if (!fscrypt_using_inline_encryption(ci)) {
+			if (sb->s_flags & SB_INLINECRYPT)
+				fscrypt_warn(ci->ci_inode,
+					     "Hardware-wrapped key required, but no suitable inline encryption hardware is available");
+			else
+				fscrypt_warn(ci->ci_inode,
+					     "Hardware-wrapped keys require inline encryption (-o inlinecrypt)");
+			return -EINVAL;
+		}
+		use_hw_wrapped_key = true;
+	}
+
 	prep_key = &keys[mode_num];
 	if (fscrypt_is_key_prepared(prep_key, ci)) {
 		ci->ci_enc_key = *prep_key;
@@ -190,6 +220,14 @@
 	if (fscrypt_is_key_prepared(prep_key, ci))
 		goto done_unlock;
 
+	if (use_hw_wrapped_key) {
+		err = __fscrypt_prepare_key(prep_key, mk->mk_secret.raw,
+					    mk->mk_secret.size, true, ci);
+		if (err)
+			goto out_unlock;
+		goto done_unlock;
+	}
+
 	BUILD_BUG_ON(sizeof(mode_num) != 1);
 	BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
 	BUILD_BUG_ON(sizeof(hkdf_info) != 17);
@@ -312,6 +350,14 @@
 {
 	int err;
 
+	if (mk->mk_secret.is_hw_wrapped &&
+	    !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+					FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) {
+		fscrypt_warn(ci->ci_inode,
+			     "Hardware-wrapped keys are only supported with IV_INO_LBLK policies");
+		return -EINVAL;
+	}
+
 	if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
 		/*
 		 * DIRECT_KEY: instead of deriving per-file encryption keys, the
@@ -338,7 +384,7 @@
 		   FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
 		err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk);
 	} else {
-		u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
+		u8 derived_key[FSCRYPT_MAX_STANDARD_KEY_SIZE];
 
 		err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
 					  HKDF_CONTEXT_PER_FILE_ENC_KEY,
@@ -421,10 +467,6 @@
 	struct fscrypt_key_specifier mk_spec;
 	int err;
 
-	err = fscrypt_select_encryption_impl(ci);
-	if (err)
-		return err;
-
 	switch (ci->ci_policy.version) {
 	case FSCRYPT_POLICY_V1:
 		mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
@@ -449,6 +491,10 @@
 		    ci->ci_policy.version != FSCRYPT_POLICY_V1)
 			return PTR_ERR(key);
 
+		err = fscrypt_select_encryption_impl(ci, false);
+		if (err)
+			return err;
+
 		/*
 		 * As a legacy fallback for v1 policies, search for the key in
 		 * the current task's subscribed keyrings too.  Don't move this
@@ -472,8 +518,20 @@
 		goto out_release_key;
 	}
 
+	err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped);
+	if (err)
+		goto out_release_key;
+
 	switch (ci->ci_policy.version) {
 	case FSCRYPT_POLICY_V1:
+		if (WARN_ON(mk->mk_secret.is_hw_wrapped)) {
+			/*
+			 * This should never happen, as adding a v1 policy key
+			 * that is hardware-wrapped isn't allowed.
+			 */
+			err = -EINVAL;
+			goto out_release_key;
+		}
 		err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
 		break;
 	case FSCRYPT_POLICY_V2:
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 2762c53..b2f9031 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -118,7 +118,8 @@
 	payload = (const struct fscrypt_key *)ukp->data;
 
 	if (ukp->datalen != sizeof(struct fscrypt_key) ||
-	    payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) {
+	    payload->size < 1 ||
+	    payload->size > FSCRYPT_MAX_STANDARD_KEY_SIZE) {
 		fscrypt_warn(NULL,
 			     "key with description '%s' has invalid payload",
 			     key->description);
@@ -148,7 +149,7 @@
 	const struct fscrypt_mode	*dk_mode;
 	struct fscrypt_prepared_key	dk_key;
 	u8				dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
-	u8				dk_raw[FSCRYPT_MAX_KEY_SIZE];
+	u8				dk_raw[FSCRYPT_MAX_STANDARD_KEY_SIZE];
 };
 
 static void free_direct_key(struct fscrypt_direct_key *dk)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 6544435..6e63deb3 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/fs.h>
+#include <linux/fscrypt.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
@@ -391,6 +392,7 @@
 	      sector_t first_sector, int nr_vecs)
 {
 	struct bio *bio;
+	struct inode *inode = dio->inode;
 
 	/*
 	 * bio_alloc() is guaranteed to return a bio when allowed to sleep and
@@ -398,6 +400,9 @@
 	 */
 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
+	fscrypt_set_bio_crypt_ctx(bio, inode,
+				  sdio->cur_page_fs_offset >> inode->i_blkbits,
+				  GFP_KERNEL);
 	bio_set_dev(bio, bdev);
 	bio->bi_iter.bi_sector = first_sector;
 	bio_set_op_attrs(bio, dio->op, dio->op_flags);
@@ -761,9 +766,17 @@
 		 * current logical offset in the file does not equal what would
 		 * be the next logical offset in the bio, submit the bio we
 		 * have.
+		 *
+		 * When fscrypt inline encryption is used, data unit number
+		 * (DUN) contiguity is also required.  Normally that's implied
+		 * by logical contiguity.  However, certain IV generation
+		 * methods (e.g. IV_INO_LBLK_32) don't guarantee it.  So, we
+		 * must explicitly check fscrypt_mergeable_bio() too.
 		 */
 		if (sdio->final_block_in_bio != sdio->cur_page_block ||
-		    cur_offset != bio_next_offset)
+		    cur_offset != bio_next_offset ||
+		    !fscrypt_mergeable_bio(sdio->bio, dio->inode,
+					   cur_offset >> dio->inode->i_blkbits))
 			dio_bio_submit(dio, sdio);
 	}
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 06f4c5a..ae562aa 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1846,8 +1847,8 @@
 		write_unlock_irq(&ep->lock);
 
 		if (!eavail)
-			timed_out = !schedule_hrtimeout_range(to, slack,
-							      HRTIMER_MODE_ABS);
+			timed_out = !freezable_schedule_hrtimeout_range(to, slack,
+									HRTIMER_MODE_ABS);
 		__set_current_state(TASK_RUNNING);
 
 		/*
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4c5f410..d731f0b 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -36,9 +36,11 @@
 #include "acl.h"
 #include "truncate.h"
 
-static bool ext4_dio_supported(struct inode *inode)
+static bool ext4_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
 {
-	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
+	struct inode *inode = file_inode(iocb->ki_filp);
+
+	if (!fscrypt_dio_supported(iocb, iter))
 		return false;
 	if (fsverity_active(inode))
 		return false;
@@ -61,7 +63,7 @@
 		inode_lock_shared(inode);
 	}
 
-	if (!ext4_dio_supported(inode)) {
+	if (!ext4_dio_supported(iocb, to)) {
 		inode_unlock_shared(inode);
 		/*
 		 * Fallback to buffered I/O if the operation being performed on
@@ -511,7 +513,7 @@
 	}
 
 	/* Fallback to buffered I/O if the inode does not support direct I/O. */
-	if (!ext4_dio_supported(inode)) {
+	if (!ext4_dio_supported(iocb, from)) {
 		if (ilock_shared)
 			inode_unlock_shared(inode);
 		else
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 39a1ab12..ec2b21f 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -13,6 +13,7 @@
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
+#include <trace/events/android_fs.h>
 
 #define EXT4_XATTR_SYSTEM_DATA	"data"
 #define EXT4_MIN_INLINE_DATA_SIZE	((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -509,6 +510,17 @@
 		return -EAGAIN;
 	}
 
+	if (trace_android_fs_dataread_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, page_offset(page),
+						PAGE_SIZE, current->pid,
+						path, current->comm);
+	}
+
 	/*
 	 * Current inline data can only exist in the 1st page,
 	 * So for all the other pages, just set them uptodate.
@@ -520,6 +532,8 @@
 		SetPageUptodate(page);
 	}
 
+	trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE);
+
 	up_read(&EXT4_I(inode)->xattr_sem);
 
 	unlock_page(page);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bfd3545..4439e4e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -48,6 +48,7 @@
 #include "truncate.h"
 
 #include <trace/events/ext4.h>
+#include <trace/events/android_fs.h>
 
 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
 			      struct ext4_inode_info *ei)
@@ -1144,6 +1145,16 @@
 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 		return -EIO;
 
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_ext4_write_begin(inode, pos, len, flags);
 	/*
 	 * Reserve one block more for addition to orphan list in case
@@ -1286,6 +1297,7 @@
 	int i_size_changed = 0;
 	bool verity = ext4_verity_in_progress(inode);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_write_end(inode, pos, len, copied);
 
 	if (ext4_has_inline_data(inode))
@@ -1389,6 +1401,7 @@
 	int size_changed = 0;
 	bool verity = ext4_verity_in_progress(inode);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
@@ -2942,6 +2955,16 @@
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid,
+						 path, current->comm);
+	}
 	trace_ext4_da_write_begin(inode, pos, len, flags);
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -3026,6 +3049,7 @@
 		return ext4_write_end(file, mapping, pos,
 				      len, copied, page, fsdata);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_da_write_end(inode, pos, len, copied);
 
 	if (write_mode != CONVERT_INLINE_DATA &&
@@ -3420,6 +3444,14 @@
 	if (ret < 0)
 		return ret;
 out:
+
+	/*
+	 * When inline encryption is enabled, sometimes I/O to an encrypted file
+	 * has to be broken up to guarantee DUN contiguity. Handle this by
+	 * limiting the length of the mapping returned.
+	 */
+	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+
 	ext4_set_iomap(inode, iomap, &map, offset, length);
 
 	return 0;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 3db9234..fdabff5 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -46,6 +46,7 @@
 #include <linux/cleancache.h>
 
 #include "ext4.h"
+#include <trace/events/android_fs.h>
 
 #define NUM_PREALLOC_POST_READ_CTXS	128
 
@@ -159,6 +160,17 @@
 	return bio->bi_private && !bio->bi_status;
 }
 
+static void
+ext4_trace_read_completion(struct bio *bio)
+{
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+	if (first_page != NULL)
+		trace_android_fs_dataread_end(first_page->mapping->host,
+					      page_offset(first_page),
+					      bio->bi_iter.bi_size);
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -173,6 +185,9 @@
  */
 static void mpage_end_io(struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled())
+		ext4_trace_read_completion(bio);
+
 	if (bio_post_read_required(bio)) {
 		struct bio_post_read_ctx *ctx = bio->bi_private;
 
@@ -221,6 +236,30 @@
 	return i_size_read(inode);
 }
 
+static void
+ext4_submit_bio_read(struct bio *bio)
+{
+	if (trace_android_fs_dataread_start_enabled()) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
+	submit_bio(bio);
+}
+
 int ext4_mpage_readpages(struct inode *inode,
 		struct readahead_control *rac, struct page *page)
 {
@@ -363,7 +402,7 @@
 		if (bio && (last_block_in_bio != blocks[0] - 1 ||
 			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
 		submit_and_realloc:
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (bio == NULL) {
@@ -389,14 +428,14 @@
 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 		     (relative_block == map.m_len)) ||
 		    (first_hole != blocks_per_page)) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		} else
 			last_block_in_bio = blocks[blocks_per_page - 1];
 		goto next_page;
 	confused:
 		if (bio) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (!PageUptodate(page))
@@ -408,7 +447,7 @@
 			put_page(page);
 	}
 	if (bio)
-		submit_bio(bio);
+		ext4_submit_bio_read(bio);
 	return 0;
 }
 
diff --git a/fs/f2fs/OWNERS b/fs/f2fs/OWNERS
new file mode 100644
index 0000000..6a5c0116
--- /dev/null
+++ b/fs/f2fs/OWNERS
@@ -0,0 +1 @@
+jaegeuk@google.com
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9f754aa..3bd0cc2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -27,6 +27,7 @@
 #include "segment.h"
 #include "iostat.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 #define NUM_PREALLOC_POST_READ_CTXS	128
 
@@ -425,6 +426,8 @@
 	 */
 	if (!fio || !fio->encrypted_page)
 		fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
+	else if (fscrypt_inode_should_skip_dm_default_key(inode))
+		bio_set_skip_dm_default_key(bio);
 }
 
 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -436,7 +439,9 @@
 	 * read/write raw data without encryption.
 	 */
 	if (fio && fio->encrypted_page)
-		return !bio_has_crypt_ctx(bio);
+		return !bio_has_crypt_ctx(bio) &&
+			(bio_should_skip_dm_default_key(bio) ==
+			 fscrypt_inode_should_skip_dm_default_key(inode));
 
 	return fscrypt_mergeable_bio(bio, inode, next_idx);
 }
@@ -3424,6 +3429,22 @@
 	block_t blkaddr = NULL_ADDR;
 	int err = 0;
 
+	/*
+	 * Should avoid quota operations which can make deadlock:
+	 * kswapd -> f2fs_evict_inode -> dquot_drop ->
+	 *   f2fs_dquot_commit -> f2fs_write_begin ->
+	 *   d_obtain_alias -> __d_alloc -> kmem_cache_alloc(GFP_KERNEL)
+	 */
+	if (trace_android_fs_datawrite_start_enabled() && !IS_NOQUOTA(inode)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_f2fs_write_begin(inode, pos, len, flags);
 
 	if (!f2fs_is_checkpoint_ready(sbi)) {
@@ -3554,6 +3575,7 @@
 {
 	struct inode *inode = page->mapping->host;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_f2fs_write_end(inode, pos, len, copied);
 
 	/*
@@ -3687,6 +3709,29 @@
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (rw == READ)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, offset,
+						count, current->pid, path,
+						current->comm);
+	}
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (rw == WRITE)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, offset, count,
+						 current->pid, path,
+						 current->comm);
+	}
+
 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
 
@@ -3742,6 +3787,13 @@
 	}
 
 out:
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (rw == READ))
+		trace_android_fs_dataread_end(inode, offset, count);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (rw == WRITE))
+		trace_android_fs_datawrite_end(inode, offset, count);
+
 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
 
 	return err;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ce9fc9f..64bcb81 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4346,7 +4346,11 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int rw = iov_iter_rw(iter);
 
-	if (f2fs_post_read_required(inode))
+	if (!fscrypt_dio_supported(iocb, iter))
+		return true;
+	if (fsverity_active(inode))
+		return true;
+	if (f2fs_compressed_file(inode))
 		return true;
 
 	/* disallow direct IO if any of devices has unaligned blksize */
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index ea08f0d..928fea6 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -13,6 +13,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 bool f2fs_may_inline_data(struct inode *inode)
 {
@@ -86,14 +87,29 @@
 {
 	struct page *ipage;
 
+	if (trace_android_fs_dataread_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, page_offset(page),
+						PAGE_SIZE, current->pid,
+						path, current->comm);
+	}
+
 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage)) {
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		unlock_page(page);
 		return PTR_ERR(ipage);
 	}
 
 	if (!f2fs_has_inline_data(inode)) {
 		f2fs_put_page(ipage, 1);
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		return -EAGAIN;
 	}
 
@@ -105,6 +121,8 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	f2fs_put_page(ipage, 1);
+	trace_android_fs_dataread_end(inode, page_offset(page),
+				      PAGE_SIZE);
 	unlock_page(page);
 	return 0;
 }
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 0c48b35..d9e1b47 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
 
 fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o
+fuse-y += passthrough.o
 fuse-$(CONFIG_FUSE_DAX) += dax.o
 
 virtiofs-y := virtio_fs.o
diff --git a/fs/fuse/OWNERS b/fs/fuse/OWNERS
new file mode 100644
index 0000000..5ee6098
--- /dev/null
+++ b/fs/fuse/OWNERS
@@ -0,0 +1 @@
+balsini@google.com
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cd54a52..94b5a5e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -14,6 +14,7 @@
 #include <linux/sched/signal.h>
 #include <linux/uio.h>
 #include <linux/miscdevice.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/slab.h>
@@ -1920,6 +1921,14 @@
 		err = copy_out_args(cs, req->args, nbytes);
 	fuse_copy_finish(cs);
 
+	if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) {
+		char *path = (char *)req->args->out_args[0].value;
+
+		path[req->args->out_args[0].size - 1] = 0;
+		req->out.h.error =
+			kern_path(path, 0, req->args->canonical_path);
+	}
+
 	spin_lock(&fpq->lock);
 	clear_bit(FR_LOCKED, &req->flags);
 	if (!fpq->connected)
@@ -2266,7 +2275,8 @@
 				 * uses the same ioctl handler.
 				 */
 				if (old->f_op == file->f_op &&
-				    old->f_cred->user_ns == file->f_cred->user_ns)
+				    old->f_cred->user_ns ==
+					    file->f_cred->user_ns)
 					fud = fuse_get_dev(old);
 
 				if (fud) {
@@ -2278,6 +2288,15 @@
 			}
 		}
 		break;
+	case FUSE_DEV_IOC_PASSTHROUGH_OPEN:
+		res = -EFAULT;
+		if (!get_user(oldfd, (__u32 __user *)arg)) {
+			res = -EINVAL;
+			fud = fuse_get_dev(file);
+			if (fud)
+				res = fuse_passthrough_open(fud, oldfd);
+		}
+		break;
 	default:
 		res = -ENOTTY;
 		break;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0654bfe..28ec832 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -328,6 +328,45 @@
 	return mnt;
 }
 
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path,
+				       struct path *canonical_path)
+{
+	struct inode *inode = d_inode(path->dentry);
+	//struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
+	FUSE_ARGS(args);
+	char *path_name;
+	int err;
+
+	path_name = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!path_name)
+		goto default_path;
+
+	args.opcode = FUSE_CANONICAL_PATH;
+	args.nodeid = get_node_id(inode);
+	args.in_numargs = 0;
+	args.out_numargs = 1;
+	args.out_args[0].size = PATH_MAX;
+	args.out_args[0].value = path_name;
+	args.canonical_path = canonical_path;
+	args.out_argvar = 1;
+
+	err = fuse_simple_request(fm, &args);
+	free_page((unsigned long)path_name);
+	if (err > 0)
+		return;
+default_path:
+	canonical_path->dentry = path->dentry;
+	canonical_path->mnt = path->mnt;
+	path_get(canonical_path);
+}
+
 const struct dentry_operations fuse_dentry_operations = {
 	.d_revalidate	= fuse_dentry_revalidate,
 	.d_delete	= fuse_dentry_delete,
@@ -336,6 +375,7 @@
 	.d_release	= fuse_dentry_release,
 #endif
 	.d_automount	= fuse_dentry_automount,
+	.d_canonical_path = fuse_dentry_canonical_path,
 };
 
 const struct dentry_operations fuse_root_dentry_operations = {
@@ -468,6 +508,7 @@
 {
 	int err;
 	struct inode *inode;
+	struct fuse_conn *fc = get_fuse_conn(dir);
 	struct fuse_mount *fm = get_fuse_mount(dir);
 	FUSE_ARGS(args);
 	struct fuse_forget_link *forget;
@@ -529,6 +570,7 @@
 	ff->fh = outopen.fh;
 	ff->nodeid = outentry.nodeid;
 	ff->open_flags = outopen.open_flags;
+	fuse_passthrough_setup(fc, ff, &outopen);
 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
 	if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9d6c5f6..17ddf6b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -146,7 +146,7 @@
 		if (!err) {
 			ff->fh = outarg.fh;
 			ff->open_flags = outarg.open_flags;
-
+			fuse_passthrough_setup(fc, ff, &outarg);
 		} else if (err != -ENOSYS) {
 			fuse_file_free(ff);
 			return ERR_PTR(err);
@@ -304,6 +304,8 @@
 	struct fuse_release_args *ra = ff->release_args;
 	int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
 
+	fuse_passthrough_release(&ff->passthrough);
+
 	fuse_prepare_release(fi, ff, open_flags, opcode);
 
 	if (ff->flock) {
@@ -1576,7 +1578,9 @@
 	if (FUSE_IS_DAX(inode))
 		return fuse_dax_read_iter(iocb, to);
 
-	if (!(ff->open_flags & FOPEN_DIRECT_IO))
+	if (ff->passthrough.filp)
+		return fuse_passthrough_read_iter(iocb, to);
+	else if (!(ff->open_flags & FOPEN_DIRECT_IO))
 		return fuse_cache_read_iter(iocb, to);
 	else
 		return fuse_direct_read_iter(iocb, to);
@@ -1594,7 +1598,9 @@
 	if (FUSE_IS_DAX(inode))
 		return fuse_dax_write_iter(iocb, from);
 
-	if (!(ff->open_flags & FOPEN_DIRECT_IO))
+	if (ff->passthrough.filp)
+		return fuse_passthrough_write_iter(iocb, from);
+	else if (!(ff->open_flags & FOPEN_DIRECT_IO))
 		return fuse_cache_write_iter(iocb, from);
 	else
 		return fuse_direct_write_iter(iocb, from);
@@ -2403,6 +2409,9 @@
 	if (FUSE_IS_DAX(file_inode(file)))
 		return fuse_dax_mmap(file, vma);
 
+	if (ff->passthrough.filp)
+		return fuse_passthrough_mmap(file, vma);
+
 	if (ff->open_flags & FOPEN_DIRECT_IO) {
 		/* Can't provide the coherency needed for MAP_SHARED */
 		if (vma->vm_flags & VM_MAYSHARE)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 198637b..e56e9c5 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -173,6 +173,17 @@
 struct fuse_mount;
 struct fuse_release_args;
 
+/**
+ * Reference to lower filesystem file for read/write operations handled in
+ * passthrough mode.
+ * This struct also tracks the credentials to be used for handling read/write
+ * operations.
+ */
+struct fuse_passthrough {
+	struct file *filp;
+	struct cred *cred;
+};
+
 /** FUSE specific file data */
 struct fuse_file {
 	/** Fuse connection for this file */
@@ -218,6 +229,9 @@
 
 	} readdir;
 
+	/** Container for data related to the passthrough functionality */
+	struct fuse_passthrough passthrough;
+
 	/** RB node to be linked on fuse_conn->polled_files */
 	struct rb_node polled_node;
 
@@ -263,6 +277,9 @@
 	struct fuse_in_arg in_args[3];
 	struct fuse_arg out_args[2];
 	void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
+
+	/* Path used for completing d_canonical_path */
+	struct path *canonical_path;
 };
 
 struct fuse_args_pages {
@@ -366,10 +383,8 @@
 	/** Used to wake up the task waiting for completion of request*/
 	wait_queue_head_t waitq;
 
-#if IS_ENABLED(CONFIG_VIRTIO_FS)
 	/** virtio-fs's physically contiguous buffer for in and out args */
 	void *argbuf;
-#endif
 
 	/** fuse_mount this request belongs to */
 	struct fuse_mount *fm;
@@ -762,6 +777,9 @@
 	/* Auto-mount submounts announced by the server */
 	unsigned int auto_submounts:1;
 
+	/** Passthrough mode for read/write IO */
+	unsigned int passthrough:1;
+
 	/* Propagate syncfs() to server */
 	unsigned int sync_fs:1;
 
@@ -811,6 +829,12 @@
 
 	/* New writepages go into this bucket */
 	struct fuse_sync_bucket __rcu *curr_bucket;
+
+	/** IDR for passthrough requests */
+	struct idr passthrough_req;
+
+	/** Protects passthrough_req */
+	spinlock_t passthrough_req_lock;
 };
 
 /*
@@ -1292,4 +1316,13 @@
 void fuse_file_release(struct inode *inode, struct fuse_file *ff,
 		       unsigned int open_flags, fl_owner_t id, bool isdir);
 
+/* passthrough.c */
+int fuse_passthrough_open(struct fuse_dev *fud, u32 lower_fd);
+int fuse_passthrough_setup(struct fuse_conn *fc, struct fuse_file *ff,
+			   struct fuse_open_out *openarg);
+void fuse_passthrough_release(struct fuse_passthrough *passthrough);
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma);
+
 #endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8b89e3ba..f7dd8e9 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -807,6 +807,7 @@
 	memset(fc, 0, sizeof(*fc));
 	spin_lock_init(&fc->lock);
 	spin_lock_init(&fc->bg_lock);
+	spin_lock_init(&fc->passthrough_req_lock);
 	init_rwsem(&fc->killsb);
 	refcount_set(&fc->count, 1);
 	atomic_set(&fc->dev_count, 1);
@@ -815,6 +816,7 @@
 	INIT_LIST_HEAD(&fc->bg_queue);
 	INIT_LIST_HEAD(&fc->entry);
 	INIT_LIST_HEAD(&fc->devices);
+	idr_init(&fc->passthrough_req);
 	atomic_set(&fc->num_waiting, 0);
 	fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
 	fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
@@ -1174,6 +1176,12 @@
 				fc->handle_killpriv_v2 = 1;
 				fm->sb->s_flags |= SB_NOSEC;
 			}
+			if (arg->flags & FUSE_PASSTHROUGH) {
+				fc->passthrough = 1;
+				/* Prevent further stacking */
+				fm->sb->s_stack_depth =
+					FILESYSTEM_MAX_STACK_DEPTH;
+			}
 			if (arg->flags & FUSE_SETXATTR_EXT)
 				fc->setxattr_ext = 1;
 		} else {
@@ -1219,6 +1227,7 @@
 		FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
 		FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
 		FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
+		FUSE_PASSTHROUGH |
 		FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT;
 #ifdef CONFIG_FUSE_DAX
 	if (fm->fc->dax)
@@ -1247,9 +1256,21 @@
 }
 EXPORT_SYMBOL_GPL(fuse_send_init);
 
+static int free_fuse_passthrough(int id, void *p, void *data)
+{
+	struct fuse_passthrough *passthrough = (struct fuse_passthrough *)p;
+
+	fuse_passthrough_release(passthrough);
+	kfree(p);
+
+	return 0;
+}
+
 void fuse_free_conn(struct fuse_conn *fc)
 {
 	WARN_ON(!list_empty(&fc->devices));
+	idr_for_each(&fc->passthrough_req, free_fuse_passthrough, NULL);
+	idr_destroy(&fc->passthrough_req);
 	kfree_rcu(fc, rcu);
 }
 EXPORT_SYMBOL_GPL(fuse_free_conn);
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 0000000..630fa28
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "fuse_i.h"
+
+#include <linux/file.h>
+#include <linux/fuse.h>
+#include <linux/idr.h>
+#include <linux/uio.h>
+
+#define PASSTHROUGH_IOCB_MASK                                                  \
+	(IOCB_APPEND | IOCB_DSYNC | IOCB_HIPRI | IOCB_NOWAIT | IOCB_SYNC)
+
+struct fuse_aio_req {
+	struct kiocb iocb;
+	struct kiocb *iocb_fuse;
+};
+
+static void fuse_file_accessed(struct file *dst_file, struct file *src_file)
+{
+	struct inode *dst_inode;
+	struct inode *src_inode;
+
+	if (dst_file->f_flags & O_NOATIME)
+		return;
+
+	dst_inode = file_inode(dst_file);
+	src_inode = file_inode(src_file);
+
+	if ((!timespec64_equal(&dst_inode->i_mtime, &src_inode->i_mtime) ||
+	     !timespec64_equal(&dst_inode->i_ctime, &src_inode->i_ctime))) {
+		dst_inode->i_mtime = src_inode->i_mtime;
+		dst_inode->i_ctime = src_inode->i_ctime;
+	}
+
+	touch_atime(&dst_file->f_path);
+}
+
+static void fuse_copyattr(struct file *dst_file, struct file *src_file)
+{
+	struct inode *dst = file_inode(dst_file);
+	struct inode *src = file_inode(src_file);
+
+	dst->i_atime = src->i_atime;
+	dst->i_mtime = src->i_mtime;
+	dst->i_ctime = src->i_ctime;
+	i_size_write(dst, i_size_read(src));
+}
+
+static void fuse_aio_cleanup_handler(struct fuse_aio_req *aio_req)
+{
+	struct kiocb *iocb = &aio_req->iocb;
+	struct kiocb *iocb_fuse = aio_req->iocb_fuse;
+
+	if (iocb->ki_flags & IOCB_WRITE) {
+		__sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
+				      SB_FREEZE_WRITE);
+		file_end_write(iocb->ki_filp);
+		fuse_copyattr(iocb_fuse->ki_filp, iocb->ki_filp);
+	}
+
+	iocb_fuse->ki_pos = iocb->ki_pos;
+	kfree(aio_req);
+}
+
+static void fuse_aio_rw_complete(struct kiocb *iocb, long res)
+{
+	struct fuse_aio_req *aio_req =
+		container_of(iocb, struct fuse_aio_req, iocb);
+	struct kiocb *iocb_fuse = aio_req->iocb_fuse;
+
+	fuse_aio_cleanup_handler(aio_req);
+	iocb_fuse->ki_complete(iocb_fuse, res);
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse,
+				   struct iov_iter *iter)
+{
+	ssize_t ret;
+	const struct cred *old_cred;
+	struct file *fuse_filp = iocb_fuse->ki_filp;
+	struct fuse_file *ff = fuse_filp->private_data;
+	struct file *passthrough_filp = ff->passthrough.filp;
+
+	if (!iov_iter_count(iter))
+		return 0;
+
+	old_cred = override_creds(ff->passthrough.cred);
+	if (is_sync_kiocb(iocb_fuse)) {
+		ret = vfs_iter_read(passthrough_filp, iter, &iocb_fuse->ki_pos,
+				    iocb_to_rw_flags(iocb_fuse->ki_flags,
+						     PASSTHROUGH_IOCB_MASK));
+	} else {
+		struct fuse_aio_req *aio_req;
+
+		aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL);
+		if (!aio_req) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		aio_req->iocb_fuse = iocb_fuse;
+		kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp);
+		aio_req->iocb.ki_complete = fuse_aio_rw_complete;
+		ret = call_read_iter(passthrough_filp, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			fuse_aio_cleanup_handler(aio_req);
+	}
+out:
+	revert_creds(old_cred);
+
+	fuse_file_accessed(fuse_filp, passthrough_filp);
+
+	return ret;
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse,
+				    struct iov_iter *iter)
+{
+	ssize_t ret;
+	const struct cred *old_cred;
+	struct file *fuse_filp = iocb_fuse->ki_filp;
+	struct fuse_file *ff = fuse_filp->private_data;
+	struct inode *fuse_inode = file_inode(fuse_filp);
+	struct file *passthrough_filp = ff->passthrough.filp;
+	struct inode *passthrough_inode = file_inode(passthrough_filp);
+
+	if (!iov_iter_count(iter))
+		return 0;
+
+	inode_lock(fuse_inode);
+
+	fuse_copyattr(fuse_filp, passthrough_filp);
+
+	old_cred = override_creds(ff->passthrough.cred);
+	if (is_sync_kiocb(iocb_fuse)) {
+		file_start_write(passthrough_filp);
+		ret = vfs_iter_write(passthrough_filp, iter, &iocb_fuse->ki_pos,
+				     iocb_to_rw_flags(iocb_fuse->ki_flags,
+						      PASSTHROUGH_IOCB_MASK));
+		file_end_write(passthrough_filp);
+		if (ret > 0)
+			fuse_copyattr(fuse_filp, passthrough_filp);
+	} else {
+		struct fuse_aio_req *aio_req;
+
+		aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL);
+		if (!aio_req) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		file_start_write(passthrough_filp);
+		__sb_writers_release(passthrough_inode->i_sb, SB_FREEZE_WRITE);
+
+		aio_req->iocb_fuse = iocb_fuse;
+		kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp);
+		aio_req->iocb.ki_complete = fuse_aio_rw_complete;
+		ret = call_write_iter(passthrough_filp, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			fuse_aio_cleanup_handler(aio_req);
+	}
+out:
+	revert_creds(old_cred);
+	inode_unlock(fuse_inode);
+
+	return ret;
+}
+
+ssize_t fuse_passthrough_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int ret;
+	const struct cred *old_cred;
+	struct fuse_file *ff = file->private_data;
+	struct file *passthrough_filp = ff->passthrough.filp;
+
+	if (!passthrough_filp->f_op->mmap)
+		return -ENODEV;
+
+	if (WARN_ON(file != vma->vm_file))
+		return -EIO;
+
+	vma->vm_file = get_file(passthrough_filp);
+
+	old_cred = override_creds(ff->passthrough.cred);
+	ret = call_mmap(vma->vm_file, vma);
+	revert_creds(old_cred);
+
+	if (ret)
+		fput(passthrough_filp);
+	else
+		fput(file);
+
+	fuse_file_accessed(file, passthrough_filp);
+
+	return ret;
+}
+
+int fuse_passthrough_open(struct fuse_dev *fud, u32 lower_fd)
+{
+	int res;
+	struct file *passthrough_filp;
+	struct fuse_conn *fc = fud->fc;
+	struct inode *passthrough_inode;
+	struct super_block *passthrough_sb;
+	struct fuse_passthrough *passthrough;
+
+	if (!fc->passthrough)
+		return -EPERM;
+
+	passthrough_filp = fget(lower_fd);
+	if (!passthrough_filp) {
+		pr_err("FUSE: invalid file descriptor for passthrough.\n");
+		return -EBADF;
+	}
+
+	if (!passthrough_filp->f_op->read_iter ||
+	    !passthrough_filp->f_op->write_iter) {
+		pr_err("FUSE: passthrough file misses file operations.\n");
+		res = -EBADF;
+		goto err_free_file;
+	}
+
+	passthrough_inode = file_inode(passthrough_filp);
+	passthrough_sb = passthrough_inode->i_sb;
+	if (passthrough_sb->s_stack_depth >= FILESYSTEM_MAX_STACK_DEPTH) {
+		pr_err("FUSE: fs stacking depth exceeded for passthrough\n");
+		res = -EINVAL;
+		goto err_free_file;
+	}
+
+	passthrough = kmalloc(sizeof(struct fuse_passthrough), GFP_KERNEL);
+	if (!passthrough) {
+		res = -ENOMEM;
+		goto err_free_file;
+	}
+
+	passthrough->filp = passthrough_filp;
+	passthrough->cred = prepare_creds();
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&fc->passthrough_req_lock);
+	res = idr_alloc(&fc->passthrough_req, passthrough, 1, 0, GFP_ATOMIC);
+	spin_unlock(&fc->passthrough_req_lock);
+	idr_preload_end();
+
+	if (res > 0)
+		return res;
+
+	fuse_passthrough_release(passthrough);
+	kfree(passthrough);
+
+err_free_file:
+	fput(passthrough_filp);
+
+	return res;
+}
+
+int fuse_passthrough_setup(struct fuse_conn *fc, struct fuse_file *ff,
+			   struct fuse_open_out *openarg)
+{
+	struct fuse_passthrough *passthrough;
+	int passthrough_fh = openarg->passthrough_fh;
+
+	if (!fc->passthrough)
+		return -EPERM;
+
+	/* Default case, passthrough is not requested */
+	if (passthrough_fh <= 0)
+		return -EINVAL;
+
+	spin_lock(&fc->passthrough_req_lock);
+	passthrough = idr_remove(&fc->passthrough_req, passthrough_fh);
+	spin_unlock(&fc->passthrough_req_lock);
+
+	if (!passthrough)
+		return -EINVAL;
+
+	ff->passthrough = *passthrough;
+	kfree(passthrough);
+
+	return 0;
+}
+
+void fuse_passthrough_release(struct fuse_passthrough *passthrough)
+{
+	if (passthrough->filp) {
+		fput(passthrough->filp);
+		passthrough->filp = NULL;
+	}
+	if (passthrough->cred) {
+		put_cred(passthrough->cred);
+		passthrough->cred = NULL;
+	}
+}
diff --git a/fs/incfs/Kconfig b/fs/incfs/Kconfig
new file mode 100644
index 0000000..5f15000
--- /dev/null
+++ b/fs/incfs/Kconfig
@@ -0,0 +1,13 @@
+config INCREMENTAL_FS
+	tristate "Incremental file system support"
+	depends on BLOCK
+	select DECOMPRESS_LZ4
+	select DECOMPRESS_ZSTD
+	select CRYPTO_SHA256
+	help
+	  Incremental FS is a read-only virtual file system that facilitates execution
+	  of programs while their binaries are still being lazily downloaded over the
+	  network, USB or pigeon post.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called incrementalfs.
diff --git a/fs/incfs/Makefile b/fs/incfs/Makefile
new file mode 100644
index 0000000..05795d1
--- /dev/null
+++ b/fs/incfs/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INCREMENTAL_FS)	+= incrementalfs.o
+
+incrementalfs-y := \
+	data_mgmt.o \
+	format.o \
+	integrity.o \
+	main.o \
+	pseudo_files.o \
+	sysfs.o \
+	vfs.o
+
+incrementalfs-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/incfs/OWNERS b/fs/incfs/OWNERS
new file mode 100644
index 0000000..1b97669
--- /dev/null
+++ b/fs/incfs/OWNERS
@@ -0,0 +1,2 @@
+akailash@google.com
+paullawrence@google.com
diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c
new file mode 100644
index 0000000..a3c782a
--- /dev/null
+++ b/fs/incfs/data_mgmt.c
@@ -0,0 +1,1890 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/fsverity.h>
+#include <linux/gfp.h>
+#include <linux/kobject.h>
+#include <linux/ktime.h>
+#include <linux/lz4.h>
+#include <linux/mm.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "sysfs.h"
+#include "verity.h"
+
+static int incfs_scan_metadata_chain(struct data_file *df);
+
+static void log_wake_up_all(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work);
+	wake_up_all(&rl->ml_notif_wq);
+}
+
+static void zstd_free_workspace(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct mount_info *mi =
+		container_of(dw, struct mount_info, mi_zstd_cleanup_work);
+
+	mutex_lock(&mi->mi_zstd_workspace_mutex);
+	kvfree(mi->mi_zstd_workspace);
+	mi->mi_zstd_workspace = NULL;
+	mi->mi_zstd_stream = NULL;
+	mutex_unlock(&mi->mi_zstd_workspace_mutex);
+}
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+					  struct mount_options *options,
+					  struct path *backing_dir_path)
+{
+	struct mount_info *mi = NULL;
+	int error = 0;
+	struct incfs_sysfs_node *node;
+
+	mi = kzalloc(sizeof(*mi), GFP_NOFS);
+	if (!mi)
+		return ERR_PTR(-ENOMEM);
+
+	mi->mi_sb = sb;
+	mi->mi_backing_dir_path = *backing_dir_path;
+	mi->mi_owner = get_current_cred();
+	path_get(&mi->mi_backing_dir_path);
+	mutex_init(&mi->mi_dir_struct_mutex);
+	init_waitqueue_head(&mi->mi_pending_reads_notif_wq);
+	init_waitqueue_head(&mi->mi_log.ml_notif_wq);
+	init_waitqueue_head(&mi->mi_blocks_written_notif_wq);
+	atomic_set(&mi->mi_blocks_written, 0);
+	INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all);
+	spin_lock_init(&mi->mi_log.rl_lock);
+	spin_lock_init(&mi->pending_read_lock);
+	INIT_LIST_HEAD(&mi->mi_reads_list_head);
+	spin_lock_init(&mi->mi_per_uid_read_timeouts_lock);
+	mutex_init(&mi->mi_zstd_workspace_mutex);
+	INIT_DELAYED_WORK(&mi->mi_zstd_cleanup_work, zstd_free_workspace);
+	mutex_init(&mi->mi_le_mutex);
+
+	node = incfs_add_sysfs_node(options->sysfs_name, mi);
+	if (IS_ERR(node)) {
+		error = PTR_ERR(node);
+		goto err;
+	}
+	mi->mi_sysfs_node = node;
+
+	error = incfs_realloc_mount_info(mi, options);
+	if (error)
+		goto err;
+
+	return mi;
+
+err:
+	incfs_free_mount_info(mi);
+	return ERR_PTR(error);
+}
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+			     struct mount_options *options)
+{
+	void *new_buffer = NULL;
+	void *old_buffer;
+	size_t new_buffer_size = 0;
+
+	if (options->read_log_pages != mi->mi_options.read_log_pages) {
+		struct read_log_state log_state;
+		/*
+		 * Even though having two buffers allocated at once isn't
+		 * usually good, allocating a multipage buffer under a spinlock
+		 * is even worse, so let's optimize for the shorter lock
+		 * duration. It's not end of the world if we fail to increase
+		 * the buffer size anyway.
+		 */
+		if (options->read_log_pages > 0) {
+			new_buffer_size = PAGE_SIZE * options->read_log_pages;
+			new_buffer = kzalloc(new_buffer_size, GFP_NOFS);
+			if (!new_buffer)
+				return -ENOMEM;
+		}
+
+		spin_lock(&mi->mi_log.rl_lock);
+		old_buffer = mi->mi_log.rl_ring_buf;
+		mi->mi_log.rl_ring_buf = new_buffer;
+		mi->mi_log.rl_size = new_buffer_size;
+		log_state = (struct read_log_state){
+			.generation_id = mi->mi_log.rl_head.generation_id + 1,
+		};
+		mi->mi_log.rl_head = log_state;
+		mi->mi_log.rl_tail = log_state;
+		spin_unlock(&mi->mi_log.rl_lock);
+
+		kfree(old_buffer);
+	}
+
+	if (options->sysfs_name && !mi->mi_sysfs_node)
+		mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
+							 mi);
+	else if (!options->sysfs_name && mi->mi_sysfs_node) {
+		incfs_free_sysfs_node(mi->mi_sysfs_node);
+		mi->mi_sysfs_node = NULL;
+	} else if (options->sysfs_name &&
+		strcmp(options->sysfs_name,
+		       kobject_name(&mi->mi_sysfs_node->isn_sysfs_node))) {
+		incfs_free_sysfs_node(mi->mi_sysfs_node);
+		mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
+							 mi);
+	}
+
+	if (IS_ERR(mi->mi_sysfs_node)) {
+		int err = PTR_ERR(mi->mi_sysfs_node);
+
+		mi->mi_sysfs_node = NULL;
+		return err;
+	}
+
+	mi->mi_options = *options;
+	return 0;
+}
+
+void incfs_free_mount_info(struct mount_info *mi)
+{
+	int i;
+	if (!mi)
+		return;
+
+	flush_delayed_work(&mi->mi_log.ml_wakeup_work);
+	flush_delayed_work(&mi->mi_zstd_cleanup_work);
+
+	dput(mi->mi_index_dir);
+	dput(mi->mi_incomplete_dir);
+	path_put(&mi->mi_backing_dir_path);
+	mutex_destroy(&mi->mi_dir_struct_mutex);
+	mutex_destroy(&mi->mi_zstd_workspace_mutex);
+	put_cred(mi->mi_owner);
+	kfree(mi->mi_log.rl_ring_buf);
+	for (i = 0; i < ARRAY_SIZE(mi->pseudo_file_xattr); ++i)
+		kfree(mi->pseudo_file_xattr[i].data);
+	kfree(mi->mi_per_uid_read_timeouts);
+	incfs_free_sysfs_node(mi->mi_sysfs_node);
+	kfree(mi);
+}
+
+static void data_file_segment_init(struct data_file_segment *segment)
+{
+	init_waitqueue_head(&segment->new_data_arrival_wq);
+	init_rwsem(&segment->rwsem);
+	INIT_LIST_HEAD(&segment->reads_list_head);
+}
+
+char *file_id_to_str(incfs_uuid_t id)
+{
+	char *result = kmalloc(1 + sizeof(id.bytes) * 2, GFP_NOFS);
+	char *end;
+
+	if (!result)
+		return NULL;
+
+	end = bin2hex(result, id.bytes, sizeof(id.bytes));
+	*end = 0;
+	return result;
+}
+
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name)
+{
+	struct inode *inode;
+	struct dentry *result = NULL;
+
+	if (!parent)
+		return ERR_PTR(-EFAULT);
+
+	inode = d_inode(parent);
+	inode_lock_nested(inode, I_MUTEX_PARENT);
+	result = lookup_one_len(name, parent, strlen(name));
+	inode_unlock(inode);
+
+	if (IS_ERR(result))
+		pr_warn("%s err:%ld\n", __func__, PTR_ERR(result));
+
+	return result;
+}
+
+static struct data_file *handle_mapped_file(struct mount_info *mi,
+					    struct data_file *df)
+{
+	char *file_id_str;
+	struct dentry *index_file_dentry;
+	struct path path;
+	struct file *bf;
+	struct data_file *result = NULL;
+	const struct cred *old_cred;
+
+	file_id_str = file_id_to_str(df->df_id);
+	if (!file_id_str)
+		return ERR_PTR(-ENOENT);
+
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+						file_id_str);
+	kfree(file_id_str);
+	if (!index_file_dentry)
+		return ERR_PTR(-ENOENT);
+	if (IS_ERR(index_file_dentry))
+		return (struct data_file *)index_file_dentry;
+	if (!d_really_is_positive(index_file_dentry)) {
+		result = ERR_PTR(-ENOENT);
+		goto out;
+	}
+
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = index_file_dentry
+	};
+
+	old_cred = override_creds(mi->mi_owner);
+	bf = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+			 current_cred());
+	revert_creds(old_cred);
+
+	if (IS_ERR(bf)) {
+		result = (struct data_file *)bf;
+		goto out;
+	}
+
+	result = incfs_open_data_file(mi, bf);
+	fput(bf);
+	if (IS_ERR(result))
+		goto out;
+
+	result->df_mapped_offset = df->df_metadata_off;
+
+out:
+	dput(index_file_dentry);
+	return result;
+}
+
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf)
+{
+	struct data_file *df = NULL;
+	struct backing_file_context *bfc = NULL;
+	int md_records;
+	u64 size;
+	int error = 0;
+	int i;
+
+	if (!bf || !mi)
+		return ERR_PTR(-EFAULT);
+
+	if (!S_ISREG(bf->f_inode->i_mode))
+		return ERR_PTR(-EBADF);
+
+	bfc = incfs_alloc_bfc(mi, bf);
+	if (IS_ERR(bfc))
+		return ERR_CAST(bfc);
+
+	df = kzalloc(sizeof(*df), GFP_NOFS);
+	if (!df) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	mutex_init(&df->df_enable_verity);
+
+	df->df_backing_file_context = bfc;
+	df->df_mount_info = mi;
+	for (i = 0; i < ARRAY_SIZE(df->df_segments); i++)
+		data_file_segment_init(&df->df_segments[i]);
+
+	error = incfs_read_file_header(bfc, &df->df_metadata_off, &df->df_id,
+				       &size, &df->df_header_flags);
+
+	if (error)
+		goto out;
+
+	df->df_size = size;
+	if (size > 0)
+		df->df_data_block_count = get_blocks_count_for_size(size);
+
+	if (df->df_header_flags & INCFS_FILE_MAPPED) {
+		struct data_file *mapped_df = handle_mapped_file(mi, df);
+
+		incfs_free_data_file(df);
+		return mapped_df;
+	}
+
+	md_records = incfs_scan_metadata_chain(df);
+	if (md_records < 0)
+		error = md_records;
+
+out:
+	if (error) {
+		incfs_free_bfc(bfc);
+		if (df)
+			df->df_backing_file_context = NULL;
+		incfs_free_data_file(df);
+		return ERR_PTR(error);
+	}
+	return df;
+}
+
+void incfs_free_data_file(struct data_file *df)
+{
+	u32 data_blocks_written, hash_blocks_written;
+
+	if (!df)
+		return;
+
+	data_blocks_written = atomic_read(&df->df_data_blocks_written);
+	hash_blocks_written = atomic_read(&df->df_hash_blocks_written);
+
+	if (data_blocks_written != df->df_initial_data_blocks_written ||
+	    hash_blocks_written != df->df_initial_hash_blocks_written) {
+		struct backing_file_context *bfc = df->df_backing_file_context;
+		int error = -1;
+
+		if (bfc && !mutex_lock_interruptible(&bfc->bc_mutex)) {
+			error = incfs_write_status_to_backing_file(
+						df->df_backing_file_context,
+						df->df_status_offset,
+						data_blocks_written,
+						hash_blocks_written);
+			mutex_unlock(&bfc->bc_mutex);
+		}
+
+		if (error)
+			/* Nothing can be done, just warn */
+			pr_warn("incfs: failed to write status to backing file\n");
+	}
+
+	incfs_free_mtree(df->df_hash_tree);
+	incfs_free_bfc(df->df_backing_file_context);
+	kfree(df->df_signature);
+	kfree(df->df_verity_file_digest.data);
+	kfree(df->df_verity_signature);
+	mutex_destroy(&df->df_enable_verity);
+	kfree(df);
+}
+
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+				struct inode *inode,
+				struct file *backing_file)
+{
+	struct inode_info *node = get_incfs_node(inode);
+	struct data_file *df = NULL;
+	int err = 0;
+
+	inode_lock(inode);
+	if (S_ISREG(inode->i_mode)) {
+		if (!node->n_file) {
+			df = incfs_open_data_file(mi, backing_file);
+
+			if (IS_ERR(df))
+				err = PTR_ERR(df);
+			else
+				node->n_file = df;
+		}
+	} else
+		err = -EBADF;
+	inode_unlock(inode);
+	return err;
+}
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf)
+{
+	struct dir_file *dir = NULL;
+
+	if (!S_ISDIR(bf->f_inode->i_mode))
+		return ERR_PTR(-EBADF);
+
+	dir = kzalloc(sizeof(*dir), GFP_NOFS);
+	if (!dir)
+		return ERR_PTR(-ENOMEM);
+
+	dir->backing_dir = get_file(bf);
+	dir->mount_info = mi;
+	return dir;
+}
+
+void incfs_free_dir_file(struct dir_file *dir)
+{
+	if (!dir)
+		return;
+	if (dir->backing_dir)
+		fput(dir->backing_dir);
+	kfree(dir);
+}
+
+static ssize_t zstd_decompress_safe(struct mount_info *mi,
+				    struct mem_range src, struct mem_range dst)
+{
+	ssize_t result;
+	ZSTD_inBuffer inbuf = {.src = src.data,	.size = src.len};
+	ZSTD_outBuffer outbuf = {.dst = dst.data, .size = dst.len};
+
+	result = mutex_lock_interruptible(&mi->mi_zstd_workspace_mutex);
+	if (result)
+		return result;
+
+	if (!mi->mi_zstd_stream) {
+		unsigned int workspace_size = zstd_dstream_workspace_bound(
+						INCFS_DATA_FILE_BLOCK_SIZE);
+		void *workspace = kvmalloc(workspace_size, GFP_NOFS);
+		ZSTD_DStream *stream;
+
+		if (!workspace) {
+			result = -ENOMEM;
+			goto out;
+		}
+
+		stream = zstd_init_dstream(INCFS_DATA_FILE_BLOCK_SIZE, workspace,
+				  workspace_size);
+		if (!stream) {
+			kvfree(workspace);
+			result = -EIO;
+			goto out;
+		}
+
+		mi->mi_zstd_workspace = workspace;
+		mi->mi_zstd_stream = stream;
+	}
+
+	result = zstd_decompress_stream(mi->mi_zstd_stream, &outbuf, &inbuf) ?
+		-EBADMSG : outbuf.pos;
+
+	mod_delayed_work(system_wq, &mi->mi_zstd_cleanup_work,
+			 msecs_to_jiffies(5000));
+
+out:
+	mutex_unlock(&mi->mi_zstd_workspace_mutex);
+	return result;
+}
+
+static ssize_t decompress(struct mount_info *mi,
+			  struct mem_range src, struct mem_range dst, int alg)
+{
+	int result;
+
+	switch (alg) {
+	case INCFS_BLOCK_COMPRESSED_LZ4:
+		result = LZ4_decompress_safe(src.data, dst.data, src.len,
+					     dst.len);
+		if (result < 0)
+			return -EBADMSG;
+		return result;
+
+	case INCFS_BLOCK_COMPRESSED_ZSTD:
+		return zstd_decompress_safe(mi, src, dst);
+
+	default:
+		WARN_ON(true);
+		return -EOPNOTSUPP;
+	}
+}
+
+static void log_read_one_record(struct read_log *rl, struct read_log_state *rs)
+{
+	union log_record *record =
+		(union log_record *)((u8 *)rl->rl_ring_buf + rs->next_offset);
+	size_t record_size;
+
+	switch (record->full_record.type) {
+	case FULL:
+		rs->base_record = record->full_record;
+		record_size = sizeof(record->full_record);
+		break;
+
+	case SAME_FILE:
+		rs->base_record.block_index =
+			record->same_file.block_index;
+		rs->base_record.absolute_ts_us +=
+			record->same_file.relative_ts_us;
+		rs->base_record.uid = record->same_file.uid;
+		record_size = sizeof(record->same_file);
+		break;
+
+	case SAME_FILE_CLOSE_BLOCK:
+		rs->base_record.block_index +=
+			record->same_file_close_block.block_index_delta;
+		rs->base_record.absolute_ts_us +=
+			record->same_file_close_block.relative_ts_us;
+		record_size = sizeof(record->same_file_close_block);
+		break;
+
+	case SAME_FILE_CLOSE_BLOCK_SHORT:
+		rs->base_record.block_index +=
+			record->same_file_close_block_short.block_index_delta;
+		rs->base_record.absolute_ts_us +=
+		   record->same_file_close_block_short.relative_ts_tens_us * 10;
+		record_size = sizeof(record->same_file_close_block_short);
+		break;
+
+	case SAME_FILE_NEXT_BLOCK:
+		++rs->base_record.block_index;
+		rs->base_record.absolute_ts_us +=
+			record->same_file_next_block.relative_ts_us;
+		record_size = sizeof(record->same_file_next_block);
+		break;
+
+	case SAME_FILE_NEXT_BLOCK_SHORT:
+		++rs->base_record.block_index;
+		rs->base_record.absolute_ts_us +=
+		    record->same_file_next_block_short.relative_ts_tens_us * 10;
+		record_size = sizeof(record->same_file_next_block_short);
+		break;
+	}
+
+	rs->next_offset += record_size;
+	if (rs->next_offset > rl->rl_size - sizeof(*record)) {
+		rs->next_offset = 0;
+		++rs->current_pass_no;
+	}
+	++rs->current_record_no;
+}
+
+static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
+			   int block_index)
+{
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state *head, *tail;
+	s64 now_us;
+	s64 relative_us;
+	union log_record record;
+	size_t record_size;
+	uid_t uid = current_uid().val;
+	int block_delta;
+	bool same_file, same_uid;
+	bool next_block, close_block, very_close_block;
+	bool close_time, very_close_time, very_very_close_time;
+
+	/*
+	 * This may read the old value, but it's OK to delay the logging start
+	 * right after the configuration update.
+	 */
+	if (READ_ONCE(log->rl_size) == 0)
+		return;
+
+	now_us = ktime_to_us(ktime_get());
+
+	spin_lock(&log->rl_lock);
+	if (log->rl_size == 0) {
+		spin_unlock(&log->rl_lock);
+		return;
+	}
+
+	head = &log->rl_head;
+	tail = &log->rl_tail;
+	relative_us = now_us - head->base_record.absolute_ts_us;
+
+	same_file = !memcmp(id, &head->base_record.file_id,
+			    sizeof(incfs_uuid_t));
+	same_uid = uid == head->base_record.uid;
+
+	block_delta = block_index - head->base_record.block_index;
+	next_block = block_delta == 1;
+	very_close_block = block_delta >= S8_MIN && block_delta <= S8_MAX;
+	close_block = block_delta >= S16_MIN && block_delta <= S16_MAX;
+
+	very_very_close_time = relative_us < (1 << 5) * 10;
+	very_close_time = relative_us < (1 << 13);
+	close_time = relative_us < (1 << 16);
+
+	if (same_file && same_uid && next_block && very_very_close_time) {
+		record.same_file_next_block_short =
+			(struct same_file_next_block_short){
+				.type = SAME_FILE_NEXT_BLOCK_SHORT,
+				.relative_ts_tens_us = div_s64(relative_us, 10),
+			};
+		record_size = sizeof(struct same_file_next_block_short);
+	} else if (same_file && same_uid && next_block && very_close_time) {
+		record.same_file_next_block = (struct same_file_next_block){
+			.type = SAME_FILE_NEXT_BLOCK,
+			.relative_ts_us = relative_us,
+		};
+		record_size = sizeof(struct same_file_next_block);
+	} else if (same_file && same_uid && very_close_block &&
+		   very_very_close_time) {
+		record.same_file_close_block_short =
+			(struct same_file_close_block_short){
+				.type = SAME_FILE_CLOSE_BLOCK_SHORT,
+				.relative_ts_tens_us = div_s64(relative_us, 10),
+				.block_index_delta = block_delta,
+			};
+		record_size = sizeof(struct same_file_close_block_short);
+	} else if (same_file && same_uid && close_block && very_close_time) {
+		record.same_file_close_block = (struct same_file_close_block){
+				.type = SAME_FILE_CLOSE_BLOCK,
+				.relative_ts_us = relative_us,
+				.block_index_delta = block_delta,
+			};
+		record_size = sizeof(struct same_file_close_block);
+	} else if (same_file && close_time) {
+		record.same_file = (struct same_file){
+			.type = SAME_FILE,
+			.block_index = block_index,
+			.relative_ts_us = relative_us,
+			.uid = uid,
+		};
+		record_size = sizeof(struct same_file);
+	} else {
+		record.full_record = (struct full_record){
+			.type = FULL,
+			.block_index = block_index,
+			.file_id = *id,
+			.absolute_ts_us = now_us,
+			.uid = uid,
+		};
+		head->base_record.file_id = *id;
+		record_size = sizeof(struct full_record);
+	}
+
+	head->base_record.block_index = block_index;
+	head->base_record.absolute_ts_us = now_us;
+
+	/* Advance tail beyond area we are going to overwrite */
+	while (tail->current_pass_no < head->current_pass_no &&
+	       tail->next_offset < head->next_offset + record_size)
+		log_read_one_record(log, tail);
+
+	memcpy(((u8 *)log->rl_ring_buf) + head->next_offset, &record,
+	       record_size);
+	head->next_offset += record_size;
+	if (head->next_offset > log->rl_size - sizeof(record)) {
+		head->next_offset = 0;
+		++head->current_pass_no;
+	}
+	++head->current_record_no;
+
+	spin_unlock(&log->rl_lock);
+	schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16));
+}
+
+static int validate_hash_tree(struct backing_file_context *bfc, struct file *f,
+			      int block_index, struct mem_range data, u8 *buf)
+{
+	struct data_file *df = get_incfs_data_file(f);
+	u8 stored_digest[INCFS_MAX_HASH_SIZE] = {};
+	u8 calculated_digest[INCFS_MAX_HASH_SIZE] = {};
+	struct mtree *tree = NULL;
+	struct incfs_df_signature *sig = NULL;
+	int digest_size;
+	int hash_block_index = block_index;
+	int lvl;
+	int res;
+	loff_t hash_block_offset[INCFS_MAX_MTREE_LEVELS];
+	size_t hash_offset_in_block[INCFS_MAX_MTREE_LEVELS];
+	int hash_per_block;
+	pgoff_t file_pages;
+
+	/*
+	 * Memory barrier to make sure tree is fully present if added via enable
+	 * verity
+	 */
+	tree = smp_load_acquire(&df->df_hash_tree);
+	sig = df->df_signature;
+	if (!tree || !sig)
+		return 0;
+
+	digest_size = tree->alg->digest_size;
+	hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	for (lvl = 0; lvl < tree->depth; lvl++) {
+		loff_t lvl_off = tree->hash_level_suboffset[lvl];
+
+		hash_block_offset[lvl] =
+			lvl_off + round_down(hash_block_index * digest_size,
+					     INCFS_DATA_FILE_BLOCK_SIZE);
+		hash_offset_in_block[lvl] = hash_block_index * digest_size %
+					    INCFS_DATA_FILE_BLOCK_SIZE;
+		hash_block_index /= hash_per_block;
+	}
+
+	memcpy(stored_digest, tree->root_hash, digest_size);
+
+	file_pages = DIV_ROUND_UP(df->df_size, INCFS_DATA_FILE_BLOCK_SIZE);
+	for (lvl = tree->depth - 1; lvl >= 0; lvl--) {
+		pgoff_t hash_page =
+			file_pages +
+			hash_block_offset[lvl] / INCFS_DATA_FILE_BLOCK_SIZE;
+		struct page *page = find_get_page_flags(
+			f->f_inode->i_mapping, hash_page, FGP_ACCESSED);
+
+		if (page && PageChecked(page)) {
+			u8 *addr = kmap_atomic(page);
+
+			memcpy(stored_digest, addr + hash_offset_in_block[lvl],
+			       digest_size);
+			kunmap_atomic(addr);
+			put_page(page);
+			continue;
+		}
+
+		if (page)
+			put_page(page);
+
+		res = incfs_kread(bfc, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+				  hash_block_offset[lvl] + sig->hash_offset);
+		if (res < 0)
+			return res;
+		if (res != INCFS_DATA_FILE_BLOCK_SIZE)
+			return -EIO;
+		res = incfs_calc_digest(tree->alg,
+					range(buf, INCFS_DATA_FILE_BLOCK_SIZE),
+					range(calculated_digest, digest_size));
+		if (res)
+			return res;
+
+		if (memcmp(stored_digest, calculated_digest, digest_size)) {
+			int i;
+			bool zero = true;
+
+			pr_warn("incfs: Hash mismatch lvl:%d blk:%d\n",
+				lvl, block_index);
+			for (i = 0; i < digest_size; i++)
+				if (stored_digest[i]) {
+					zero = false;
+					break;
+				}
+
+			if (zero)
+				pr_debug("Note saved_digest all zero - did you forget to load the hashes?\n");
+			return -EBADMSG;
+		}
+
+		memcpy(stored_digest, buf + hash_offset_in_block[lvl],
+		       digest_size);
+
+		page = grab_cache_page(f->f_inode->i_mapping, hash_page);
+		if (page) {
+			u8 *addr = kmap_atomic(page);
+
+			memcpy(addr, buf, INCFS_DATA_FILE_BLOCK_SIZE);
+			kunmap_atomic(addr);
+			SetPageChecked(page);
+			unlock_page(page);
+			put_page(page);
+		}
+	}
+
+	res = incfs_calc_digest(tree->alg, data,
+				range(calculated_digest, digest_size));
+	if (res)
+		return res;
+
+	if (memcmp(stored_digest, calculated_digest, digest_size)) {
+		pr_debug("Leaf hash mismatch blk:%d\n", block_index);
+		return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static struct data_file_segment *get_file_segment(struct data_file *df,
+						  int block_index)
+{
+	int seg_idx = block_index % ARRAY_SIZE(df->df_segments);
+
+	return &df->df_segments[seg_idx];
+}
+
+static bool is_data_block_present(struct data_file_block *block)
+{
+	return (block->db_backing_file_data_offset != 0) &&
+	       (block->db_stored_size != 0);
+}
+
+static void convert_data_file_block(struct incfs_blockmap_entry *bme,
+				    struct data_file_block *res_block)
+{
+	u16 flags = le16_to_cpu(bme->me_flags);
+
+	res_block->db_backing_file_data_offset =
+		le16_to_cpu(bme->me_data_offset_hi);
+	res_block->db_backing_file_data_offset <<= 32;
+	res_block->db_backing_file_data_offset |=
+		le32_to_cpu(bme->me_data_offset_lo);
+	res_block->db_stored_size = le16_to_cpu(bme->me_data_size);
+	res_block->db_comp_alg = flags & INCFS_BLOCK_COMPRESSED_MASK;
+}
+
+static int get_data_file_block(struct data_file *df, int index,
+			       struct data_file_block *res_block)
+{
+	struct incfs_blockmap_entry bme = {};
+	struct backing_file_context *bfc = NULL;
+	loff_t blockmap_off = 0;
+	int error = 0;
+
+	if (!df || !res_block)
+		return -EFAULT;
+
+	blockmap_off = df->df_blockmap_off;
+	bfc = df->df_backing_file_context;
+
+	if (index < 0 || blockmap_off == 0)
+		return -EINVAL;
+
+	error = incfs_read_blockmap_entry(bfc, index, blockmap_off, &bme);
+	if (error)
+		return error;
+
+	convert_data_file_block(&bme, res_block);
+	return 0;
+}
+
+static int check_room_for_one_range(u32 size, u32 size_out)
+{
+	if (size_out + sizeof(struct incfs_filled_range) > size)
+		return -ERANGE;
+	return 0;
+}
+
+static int copy_one_range(struct incfs_filled_range *range, void __user *buffer,
+			  u32 size, u32 *size_out)
+{
+	int error = check_room_for_one_range(size, *size_out);
+	if (error)
+		return error;
+
+	if (copy_to_user(((char __user *)buffer) + *size_out, range,
+				sizeof(*range)))
+		return -EFAULT;
+
+	*size_out += sizeof(*range);
+	return 0;
+}
+
+#define READ_BLOCKMAP_ENTRIES 512
+int incfs_get_filled_blocks(struct data_file *df,
+			    struct incfs_file_data *fd,
+			    struct incfs_get_filled_blocks_args *arg)
+{
+	int error = 0;
+	bool in_range = false;
+	struct incfs_filled_range range;
+	void __user *buffer = u64_to_user_ptr(arg->range_buffer);
+	u32 size = arg->range_buffer_size;
+	u32 end_index =
+		arg->end_index ? arg->end_index : df->df_total_block_count;
+	u32 *size_out = &arg->range_buffer_size_out;
+	int i = READ_BLOCKMAP_ENTRIES - 1;
+	int entries_read = 0;
+	struct incfs_blockmap_entry *bme;
+	int data_blocks_filled = 0;
+	int hash_blocks_filled = 0;
+
+	*size_out = 0;
+	if (end_index > df->df_total_block_count)
+		end_index = df->df_total_block_count;
+	arg->total_blocks_out = df->df_total_block_count;
+	arg->data_blocks_out = df->df_data_block_count;
+
+	if (atomic_read(&df->df_data_blocks_written) ==
+	    df->df_data_block_count) {
+		pr_debug("File marked full, fast get_filled_blocks");
+		if (arg->start_index > end_index) {
+			arg->index_out = arg->start_index;
+			return 0;
+		}
+		arg->index_out = arg->start_index;
+
+		error = check_room_for_one_range(size, *size_out);
+		if (error)
+			return error;
+
+		range = (struct incfs_filled_range){
+			.begin = arg->start_index,
+			.end = end_index,
+		};
+
+		error = copy_one_range(&range, buffer, size, size_out);
+		if (error)
+			return error;
+		arg->index_out = end_index;
+		return 0;
+	}
+
+	bme = kzalloc(sizeof(*bme) * READ_BLOCKMAP_ENTRIES,
+		      GFP_NOFS | __GFP_COMP);
+	if (!bme)
+		return -ENOMEM;
+
+	for (arg->index_out = arg->start_index; arg->index_out < end_index;
+	     ++arg->index_out) {
+		struct data_file_block dfb;
+
+		if (++i == READ_BLOCKMAP_ENTRIES) {
+			entries_read = incfs_read_blockmap_entries(
+				df->df_backing_file_context, bme,
+				arg->index_out, READ_BLOCKMAP_ENTRIES,
+				df->df_blockmap_off);
+			if (entries_read < 0) {
+				error = entries_read;
+				break;
+			}
+
+			i = 0;
+		}
+
+		if (i >= entries_read) {
+			error = -EIO;
+			break;
+		}
+
+		convert_data_file_block(bme + i, &dfb);
+
+		if (is_data_block_present(&dfb)) {
+			if (arg->index_out >= df->df_data_block_count)
+				++hash_blocks_filled;
+			else
+				++data_blocks_filled;
+		}
+
+		if (is_data_block_present(&dfb) == in_range)
+			continue;
+
+		if (!in_range) {
+			error = check_room_for_one_range(size, *size_out);
+			if (error)
+				break;
+			in_range = true;
+			range.begin = arg->index_out;
+		} else {
+			range.end = arg->index_out;
+			error = copy_one_range(&range, buffer, size, size_out);
+			if (error) {
+				/* there will be another try out of the loop,
+				 * it will reset the index_out if it fails too
+				 */
+				break;
+			}
+			in_range = false;
+		}
+	}
+
+	if (in_range) {
+		range.end = arg->index_out;
+		error = copy_one_range(&range, buffer, size, size_out);
+		if (error)
+			arg->index_out = range.begin;
+	}
+
+	if (arg->start_index == 0) {
+		fd->fd_get_block_pos = 0;
+		fd->fd_filled_data_blocks = 0;
+		fd->fd_filled_hash_blocks = 0;
+	}
+
+	if (arg->start_index == fd->fd_get_block_pos) {
+		fd->fd_get_block_pos = arg->index_out + 1;
+		fd->fd_filled_data_blocks += data_blocks_filled;
+		fd->fd_filled_hash_blocks += hash_blocks_filled;
+	}
+
+	if (fd->fd_get_block_pos == df->df_total_block_count + 1) {
+		if (fd->fd_filled_data_blocks >
+		   atomic_read(&df->df_data_blocks_written))
+			atomic_set(&df->df_data_blocks_written,
+				   fd->fd_filled_data_blocks);
+
+		if (fd->fd_filled_hash_blocks >
+		   atomic_read(&df->df_hash_blocks_written))
+			atomic_set(&df->df_hash_blocks_written,
+				   fd->fd_filled_hash_blocks);
+	}
+
+	kfree(bme);
+	return error;
+}
+
+static bool is_read_done(struct pending_read *read)
+{
+	return atomic_read_acquire(&read->done) != 0;
+}
+
+static void set_read_done(struct pending_read *read)
+{
+	atomic_set_release(&read->done, 1);
+}
+
+/*
+ * Notifies a given data file about pending read from a given block.
+ * Returns a new pending read entry.
+ */
+static struct pending_read *add_pending_read(struct data_file *df,
+					     int block_index)
+{
+	struct pending_read *result = NULL;
+	struct data_file_segment *segment = NULL;
+	struct mount_info *mi = NULL;
+
+	segment = get_file_segment(df, block_index);
+	mi = df->df_mount_info;
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return NULL;
+
+	result->file_id = df->df_id;
+	result->block_index = block_index;
+	result->timestamp_us = ktime_to_us(ktime_get());
+	result->uid = current_uid().val;
+
+	spin_lock(&mi->pending_read_lock);
+
+	result->serial_number = ++mi->mi_last_pending_read_number;
+	mi->mi_pending_reads_count++;
+
+	list_add_rcu(&result->mi_reads_list, &mi->mi_reads_list_head);
+	list_add_rcu(&result->segment_reads_list, &segment->reads_list_head);
+
+	spin_unlock(&mi->pending_read_lock);
+
+	wake_up_all(&mi->mi_pending_reads_notif_wq);
+	return result;
+}
+
+static void free_pending_read_entry(struct rcu_head *entry)
+{
+	struct pending_read *read;
+
+	read = container_of(entry, struct pending_read, rcu);
+
+	kfree(read);
+}
+
+/* Notifies a given data file that pending read is completed. */
+static void remove_pending_read(struct data_file *df, struct pending_read *read)
+{
+	struct mount_info *mi = NULL;
+
+	if (!df || !read) {
+		WARN_ON(!df);
+		WARN_ON(!read);
+		return;
+	}
+
+	mi = df->df_mount_info;
+
+	spin_lock(&mi->pending_read_lock);
+
+	list_del_rcu(&read->mi_reads_list);
+	list_del_rcu(&read->segment_reads_list);
+
+	mi->mi_pending_reads_count--;
+
+	spin_unlock(&mi->pending_read_lock);
+
+	/* Don't free. Wait for readers */
+	call_rcu(&read->rcu, free_pending_read_entry);
+}
+
+static void notify_pending_reads(struct mount_info *mi,
+		struct data_file_segment *segment,
+		int index)
+{
+	struct pending_read *entry = NULL;
+
+	/* Notify pending reads waiting for this block. */
+	rcu_read_lock();
+	list_for_each_entry_rcu(entry, &segment->reads_list_head,
+						segment_reads_list) {
+		if (entry->block_index == index)
+			set_read_done(entry);
+	}
+	rcu_read_unlock();
+	wake_up_all(&segment->new_data_arrival_wq);
+
+	atomic_inc(&mi->mi_blocks_written);
+	wake_up_all(&mi->mi_blocks_written_notif_wq);
+}
+
+static int usleep_interruptible(u32 us)
+{
+	/* See:
+	 * https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
+	 * for explanation
+	 */
+	if (us < 10) {
+		udelay(us);
+		return 0;
+	} else if (us < 20000) {
+		usleep_range(us, us + us / 10);
+		return 0;
+	} else
+		return msleep_interruptible(us / 1000);
+}
+
+static int wait_for_data_block(struct data_file *df, int block_index,
+			       struct data_file_block *res_block,
+			       struct incfs_read_data_file_timeouts *timeouts)
+{
+	struct data_file_block block = {};
+	struct data_file_segment *segment = NULL;
+	struct pending_read *read = NULL;
+	struct mount_info *mi = NULL;
+	int error;
+	int wait_res = 0;
+	unsigned int delayed_pending_us = 0, delayed_min_us = 0;
+	bool delayed_pending = false;
+
+	if (!df || !res_block)
+		return -EFAULT;
+
+	if (block_index < 0 || block_index >= df->df_data_block_count)
+		return -EINVAL;
+
+	if (df->df_blockmap_off <= 0 || !df->df_mount_info)
+		return -ENODATA;
+
+	mi = df->df_mount_info;
+	segment = get_file_segment(df, block_index);
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	/* Look up the given block */
+	error = get_data_file_block(df, block_index, &block);
+
+	up_read(&segment->rwsem);
+
+	if (error)
+		return error;
+
+	/* If the block was found, just return it. No need to wait. */
+	if (is_data_block_present(&block)) {
+		*res_block = block;
+		if (timeouts && timeouts->min_time_us) {
+			delayed_min_us = timeouts->min_time_us;
+			error = usleep_interruptible(delayed_min_us);
+			goto out;
+		}
+		return 0;
+	} else {
+		/* If it's not found, create a pending read */
+		if (timeouts && timeouts->max_pending_time_us) {
+			read = add_pending_read(df, block_index);
+			if (!read)
+				return -ENOMEM;
+		} else {
+			log_block_read(mi, &df->df_id, block_index);
+			return -ETIME;
+		}
+	}
+
+	/* Rest of function only applies if timeouts != NULL */
+	if (!timeouts) {
+		pr_warn("incfs: timeouts unexpectedly NULL\n");
+		return -EFSCORRUPTED;
+	}
+
+	/* Wait for notifications about block's arrival */
+	wait_res =
+		wait_event_interruptible_timeout(segment->new_data_arrival_wq,
+			(is_read_done(read)),
+			usecs_to_jiffies(timeouts->max_pending_time_us));
+
+	/* Woke up, the pending read is no longer needed. */
+	remove_pending_read(df, read);
+
+	if (wait_res == 0) {
+		/* Wait has timed out */
+		log_block_read(mi, &df->df_id, block_index);
+		return -ETIME;
+	}
+	if (wait_res < 0) {
+		/*
+		 * Only ERESTARTSYS is really expected here when a signal
+		 * comes while we wait.
+		 */
+		return wait_res;
+	}
+
+	delayed_pending = true;
+	delayed_pending_us = timeouts->max_pending_time_us -
+				jiffies_to_usecs(wait_res);
+	if (timeouts->min_pending_time_us > delayed_pending_us) {
+		delayed_min_us = timeouts->min_pending_time_us -
+					     delayed_pending_us;
+		error = usleep_interruptible(delayed_min_us);
+		if (error)
+			return error;
+	}
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	/*
+	 * Re-read blocks info now, it has just arrived and
+	 * should be available.
+	 */
+	error = get_data_file_block(df, block_index, &block);
+	if (!error) {
+		if (is_data_block_present(&block))
+			*res_block = block;
+		else {
+			/*
+			 * Somehow wait finished successfully but block still
+			 * can't be found. It's not normal.
+			 */
+			pr_warn("incfs: Wait succeeded but block not found.\n");
+			error = -ENODATA;
+		}
+	}
+	up_read(&segment->rwsem);
+
+out:
+	if (error)
+		return error;
+
+	if (delayed_pending) {
+		mi->mi_reads_delayed_pending++;
+		mi->mi_reads_delayed_pending_us +=
+			delayed_pending_us;
+	}
+
+	if (delayed_min_us) {
+		mi->mi_reads_delayed_min++;
+		mi->mi_reads_delayed_min_us += delayed_min_us;
+	}
+
+	return 0;
+}
+
+static int incfs_update_sysfs_error(struct file *file, int index, int result,
+				struct mount_info *mi, struct data_file *df)
+{
+	int error;
+
+	if (result >= 0)
+		return 0;
+
+	error = mutex_lock_interruptible(&mi->mi_le_mutex);
+	if (error)
+		return error;
+
+	mi->mi_le_file_id = df->df_id;
+	mi->mi_le_time_us = ktime_to_us(ktime_get());
+	mi->mi_le_page = index;
+	mi->mi_le_errno = result;
+	mi->mi_le_uid = current_uid().val;
+	mutex_unlock(&mi->mi_le_mutex);
+
+	return 0;
+}
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+			int index, struct mem_range tmp,
+			struct incfs_read_data_file_timeouts *timeouts)
+{
+	loff_t pos;
+	ssize_t result;
+	size_t bytes_to_read;
+	struct mount_info *mi = NULL;
+	struct backing_file_context *bfc = NULL;
+	struct data_file_block block = {};
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!dst.data || !df || !tmp.data)
+		return -EFAULT;
+
+	if (tmp.len < 2 * INCFS_DATA_FILE_BLOCK_SIZE)
+		return -ERANGE;
+
+	mi = df->df_mount_info;
+	bfc = df->df_backing_file_context;
+
+	result = wait_for_data_block(df, index, &block, timeouts);
+	if (result < 0)
+		goto out;
+
+	pos = block.db_backing_file_data_offset;
+	if (block.db_comp_alg == COMPRESSION_NONE) {
+		bytes_to_read = min(dst.len, block.db_stored_size);
+		result = incfs_kread(bfc, dst.data, bytes_to_read, pos);
+
+		/* Some data was read, but not enough */
+		if (result >= 0 && result != bytes_to_read)
+			result = -EIO;
+	} else {
+		bytes_to_read = min(tmp.len, block.db_stored_size);
+		result = incfs_kread(bfc, tmp.data, bytes_to_read, pos);
+		if (result == bytes_to_read) {
+			result =
+				decompress(mi, range(tmp.data, bytes_to_read),
+					   dst, block.db_comp_alg);
+			if (result < 0) {
+				const char *name =
+				    bfc->bc_file->f_path.dentry->d_name.name;
+
+				pr_warn_once("incfs: Decompression error. %s",
+					     name);
+			}
+		} else if (result >= 0) {
+			/* Some data was read, but not enough */
+			result = -EIO;
+		}
+	}
+
+	if (result > 0) {
+		int err = validate_hash_tree(bfc, f, index, dst, tmp.data);
+
+		if (err < 0)
+			result = err;
+	}
+
+	if (result >= 0)
+		log_block_read(mi, &df->df_id, index);
+
+out:
+	if (result == -ETIME)
+		mi->mi_reads_failed_timed_out++;
+	else if (result == -EBADMSG)
+		mi->mi_reads_failed_hash_verification++;
+	else if (result < 0)
+		mi->mi_reads_failed_other++;
+
+	incfs_update_sysfs_error(f, index, result, mi, df);
+
+	return result;
+}
+
+ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
+				      struct data_file *df, size_t offset)
+{
+	struct backing_file_context *bfc = NULL;
+	struct incfs_df_signature *sig = NULL;
+	size_t to_read = dst.len;
+
+	if (!dst.data || !df)
+		return -EFAULT;
+
+	sig = df->df_signature;
+	bfc = df->df_backing_file_context;
+
+	if (offset > sig->hash_size)
+		return -ERANGE;
+
+	if (offset + to_read > sig->hash_size)
+		to_read = sig->hash_size - offset;
+
+	return incfs_kread(bfc, dst.data, to_read, sig->hash_offset + offset);
+}
+
+int incfs_process_new_data_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data)
+{
+	struct mount_info *mi = NULL;
+	struct backing_file_context *bfc = NULL;
+	struct data_file_segment *segment = NULL;
+	struct data_file_block existing_block = {};
+	u16 flags = 0;
+	int error = 0;
+
+	if (!df || !block)
+		return -EFAULT;
+
+	bfc = df->df_backing_file_context;
+	mi = df->df_mount_info;
+
+	if (block->block_index >= df->df_data_block_count)
+		return -ERANGE;
+
+	segment = get_file_segment(df, block->block_index);
+	if (!segment)
+		return -EFAULT;
+
+	if (block->compression == COMPRESSION_LZ4)
+		flags |= INCFS_BLOCK_COMPRESSED_LZ4;
+	else if (block->compression == COMPRESSION_ZSTD)
+		flags |= INCFS_BLOCK_COMPRESSED_ZSTD;
+	else if (block->compression)
+		return -EINVAL;
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	error = get_data_file_block(df, block->block_index, &existing_block);
+
+	up_read(&segment->rwsem);
+
+	if (error)
+		return error;
+	if (is_data_block_present(&existing_block)) {
+		/* Block is already present, nothing to do here */
+		return 0;
+	}
+
+	error = down_write_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (!error) {
+		error = incfs_write_data_block_to_backing_file(
+			bfc, range(data, block->data_len), block->block_index,
+			df->df_blockmap_off, flags);
+		mutex_unlock(&bfc->bc_mutex);
+	}
+	if (!error) {
+		notify_pending_reads(mi, segment, block->block_index);
+		atomic_inc(&df->df_data_blocks_written);
+	}
+
+	up_write(&segment->rwsem);
+
+	if (error)
+		pr_debug("%d error: %d\n", block->block_index, error);
+	return error;
+}
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst)
+{
+	struct backing_file_context *bfc = df->df_backing_file_context;
+	struct incfs_df_signature *sig;
+	int read_res = 0;
+
+	if (!dst.data)
+		return -EFAULT;
+
+	sig = df->df_signature;
+	if (!sig)
+		return 0;
+
+	if (dst.len < sig->sig_size)
+		return -E2BIG;
+
+	read_res = incfs_kread(bfc, dst.data, sig->sig_size, sig->sig_offset);
+
+	if (read_res < 0)
+		return read_res;
+
+	if (read_res != sig->sig_size)
+		return -EIO;
+
+	return read_res;
+}
+
+int incfs_process_new_hash_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data)
+{
+	struct backing_file_context *bfc = NULL;
+	struct mount_info *mi = NULL;
+	struct mtree *hash_tree = NULL;
+	struct incfs_df_signature *sig = NULL;
+	loff_t hash_area_base = 0;
+	loff_t hash_area_size = 0;
+	int error = 0;
+
+	if (!df || !block)
+		return -EFAULT;
+
+	if (!(block->flags & INCFS_BLOCK_FLAGS_HASH))
+		return -EINVAL;
+
+	bfc = df->df_backing_file_context;
+	mi = df->df_mount_info;
+
+	if (!df)
+		return -ENOENT;
+
+	hash_tree = df->df_hash_tree;
+	sig = df->df_signature;
+	if (!hash_tree || !sig || sig->hash_offset == 0)
+		return -ENOTSUPP;
+
+	hash_area_base = sig->hash_offset;
+	hash_area_size = sig->hash_size;
+	if (hash_area_size < block->block_index * INCFS_DATA_FILE_BLOCK_SIZE
+				+ block->data_len) {
+		/* Hash block goes beyond dedicated hash area of this file. */
+		return -ERANGE;
+	}
+
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (!error) {
+		error = incfs_write_hash_block_to_backing_file(
+			bfc, range(data, block->data_len), block->block_index,
+			hash_area_base, df->df_blockmap_off, df->df_size);
+		mutex_unlock(&bfc->bc_mutex);
+	}
+	if (!error)
+		atomic_inc(&df->df_hash_blocks_written);
+
+	return error;
+}
+
+static int process_blockmap_md(struct incfs_blockmap *bm,
+			       struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+	int error = 0;
+	loff_t base_off = le64_to_cpu(bm->m_base_offset);
+	u32 block_count = le32_to_cpu(bm->m_block_count);
+
+	if (!df)
+		return -EFAULT;
+
+	if (df->df_data_block_count > block_count)
+		return -EBADMSG;
+
+	df->df_total_block_count = block_count;
+	df->df_blockmap_off = base_off;
+	return error;
+}
+
+static int process_file_signature_md(struct incfs_file_signature *sg,
+				struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+	struct mtree *hash_tree = NULL;
+	int error = 0;
+	struct incfs_df_signature *signature =
+		kzalloc(sizeof(*signature), GFP_NOFS);
+	void *buf = NULL;
+	ssize_t read;
+
+	if (!signature)
+		return -ENOMEM;
+
+	if (!df || !df->df_backing_file_context ||
+	    !df->df_backing_file_context->bc_file) {
+		error = -ENOENT;
+		goto out;
+	}
+
+	signature->hash_offset = le64_to_cpu(sg->sg_hash_tree_offset);
+	signature->hash_size = le32_to_cpu(sg->sg_hash_tree_size);
+	signature->sig_offset = le64_to_cpu(sg->sg_sig_offset);
+	signature->sig_size = le32_to_cpu(sg->sg_sig_size);
+
+	buf = kzalloc(signature->sig_size, GFP_NOFS);
+	if (!buf) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	read = incfs_kread(df->df_backing_file_context, buf,
+			   signature->sig_size, signature->sig_offset);
+	if (read < 0) {
+		error = read;
+		goto out;
+	}
+
+	if (read != signature->sig_size) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	hash_tree = incfs_alloc_mtree(range(buf, signature->sig_size),
+				      df->df_data_block_count);
+	if (IS_ERR(hash_tree)) {
+		error = PTR_ERR(hash_tree);
+		hash_tree = NULL;
+		goto out;
+	}
+	if (hash_tree->hash_tree_area_size != signature->hash_size) {
+		error = -EINVAL;
+		goto out;
+	}
+	if (signature->hash_size > 0 &&
+	    handler->md_record_offset <= signature->hash_offset) {
+		error = -EINVAL;
+		goto out;
+	}
+	if (handler->md_record_offset <= signature->sig_offset) {
+		error = -EINVAL;
+		goto out;
+	}
+	df->df_hash_tree = hash_tree;
+	hash_tree = NULL;
+	df->df_signature = signature;
+	signature = NULL;
+out:
+	incfs_free_mtree(hash_tree);
+	kfree(signature);
+	kfree(buf);
+
+	return error;
+}
+
+static int process_status_md(struct incfs_status *is,
+			     struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+
+	df->df_initial_data_blocks_written =
+		le32_to_cpu(is->is_data_blocks_written);
+	atomic_set(&df->df_data_blocks_written,
+		   df->df_initial_data_blocks_written);
+
+	df->df_initial_hash_blocks_written =
+		le32_to_cpu(is->is_hash_blocks_written);
+	atomic_set(&df->df_hash_blocks_written,
+		   df->df_initial_hash_blocks_written);
+
+	df->df_status_offset = handler->md_record_offset;
+	return 0;
+}
+
+static int process_file_verity_signature_md(
+		struct incfs_file_verity_signature *vs,
+		struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+	struct incfs_df_verity_signature *verity_signature;
+
+	if (!df)
+		return -EFAULT;
+
+	verity_signature = kzalloc(sizeof(*verity_signature), GFP_NOFS);
+	if (!verity_signature)
+		return -ENOMEM;
+
+	verity_signature->offset = le64_to_cpu(vs->vs_offset);
+	verity_signature->size = le32_to_cpu(vs->vs_size);
+	if (verity_signature->size > FS_VERITY_MAX_SIGNATURE_SIZE) {
+		kfree(verity_signature);
+		return -EFAULT;
+	}
+
+	df->df_verity_signature = verity_signature;
+	return 0;
+}
+
+static int incfs_scan_metadata_chain(struct data_file *df)
+{
+	struct metadata_handler *handler = NULL;
+	int result = 0;
+	int records_count = 0;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+	int nondata_block_count;
+
+	if (!df || !df->df_backing_file_context)
+		return -EFAULT;
+
+	bfc = df->df_backing_file_context;
+
+	handler = kzalloc(sizeof(*handler), GFP_NOFS);
+	if (!handler)
+		return -ENOMEM;
+
+	handler->md_record_offset = df->df_metadata_off;
+	handler->context = df;
+	handler->handle_blockmap = process_blockmap_md;
+	handler->handle_signature = process_file_signature_md;
+	handler->handle_status = process_status_md;
+	handler->handle_verity_signature = process_file_verity_signature_md;
+
+	while (handler->md_record_offset > 0) {
+		error = incfs_read_next_metadata_record(bfc, handler);
+		if (error) {
+			pr_warn("incfs: Error during reading incfs-metadata record. Offset: %lld Record #%d Error code: %d\n",
+				handler->md_record_offset, records_count + 1,
+				-error);
+			break;
+		}
+		records_count++;
+	}
+	if (error) {
+		pr_warn("incfs: Error %d after reading %d incfs-metadata records.\n",
+			 -error, records_count);
+		result = error;
+	} else
+		result = records_count;
+
+	nondata_block_count = df->df_total_block_count -
+		df->df_data_block_count;
+	if (df->df_hash_tree) {
+		int hash_block_count = get_blocks_count_for_size(
+			df->df_hash_tree->hash_tree_area_size);
+
+		/*
+		 * Files that were created with a hash tree have the hash tree
+		 * included in the block map, i.e. nondata_block_count ==
+		 * hash_block_count.  Files whose hash tree was added by
+		 * FS_IOC_ENABLE_VERITY will still have the original block
+		 * count, i.e. nondata_block_count == 0.
+		 */
+		if (nondata_block_count != hash_block_count &&
+		    nondata_block_count != 0)
+			result = -EINVAL;
+	} else if (nondata_block_count != 0) {
+		result = -EINVAL;
+	}
+
+	kfree(handler);
+	return result;
+}
+
+/*
+ * Quickly checks if there are pending reads with a serial number larger
+ * than a given one.
+ */
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number)
+{
+	bool result = false;
+
+	spin_lock(&mi->pending_read_lock);
+	result = (mi->mi_last_pending_read_number > last_number) &&
+		(mi->mi_pending_reads_count > 0);
+	spin_unlock(&mi->pending_read_lock);
+	return result;
+}
+
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+				struct incfs_pending_read_info *reads,
+				struct incfs_pending_read_info2 *reads2,
+				int reads_size, int *new_max_sn)
+{
+	int reported_reads = 0;
+	struct pending_read *entry = NULL;
+
+	if (!mi)
+		return -EFAULT;
+
+	if (reads_size <= 0)
+		return 0;
+
+	if (!incfs_fresh_pending_reads_exist(mi, sn_lowerbound))
+		return 0;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(entry, &mi->mi_reads_list_head, mi_reads_list) {
+		if (entry->serial_number <= sn_lowerbound)
+			continue;
+
+		if (reads) {
+			reads[reported_reads].file_id = entry->file_id;
+			reads[reported_reads].block_index = entry->block_index;
+			reads[reported_reads].serial_number =
+				entry->serial_number;
+			reads[reported_reads].timestamp_us =
+				entry->timestamp_us;
+		}
+
+		if (reads2) {
+			reads2[reported_reads].file_id = entry->file_id;
+			reads2[reported_reads].block_index = entry->block_index;
+			reads2[reported_reads].serial_number =
+				entry->serial_number;
+			reads2[reported_reads].timestamp_us =
+				entry->timestamp_us;
+			reads2[reported_reads].uid = entry->uid;
+		}
+
+		if (entry->serial_number > *new_max_sn)
+			*new_max_sn = entry->serial_number;
+
+		reported_reads++;
+		if (reported_reads >= reads_size)
+			break;
+	}
+
+	rcu_read_unlock();
+
+	return reported_reads;
+}
+
+struct read_log_state incfs_get_log_state(struct mount_info *mi)
+{
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state result;
+
+	spin_lock(&log->rl_lock);
+	result = log->rl_head;
+	spin_unlock(&log->rl_lock);
+	return result;
+}
+
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+				     const struct read_log_state *state)
+{
+	struct read_log *log = &mi->mi_log;
+	u32 generation;
+	u64 head_no, tail_no;
+
+	spin_lock(&log->rl_lock);
+	tail_no = log->rl_tail.current_record_no;
+	head_no = log->rl_head.current_record_no;
+	generation = log->rl_head.generation_id;
+	spin_unlock(&log->rl_lock);
+
+	if (generation != state->generation_id)
+		return head_no - tail_no;
+	else
+		return head_no - max_t(u64, tail_no, state->current_record_no);
+}
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+			       struct read_log_state *state,
+			       struct incfs_pending_read_info *reads,
+			       struct incfs_pending_read_info2 *reads2,
+			       int reads_size)
+{
+	int dst_idx;
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state *head, *tail;
+
+	spin_lock(&log->rl_lock);
+	head = &log->rl_head;
+	tail = &log->rl_tail;
+
+	if (state->generation_id != head->generation_id) {
+		pr_debug("read ptr is wrong generation: %u/%u",
+			 state->generation_id, head->generation_id);
+
+		*state = (struct read_log_state){
+			.generation_id = head->generation_id,
+		};
+	}
+
+	if (state->current_record_no < tail->current_record_no) {
+		pr_debug("read ptr is behind, moving: %u/%u -> %u/%u\n",
+			 (u32)state->next_offset,
+			 (u32)state->current_pass_no,
+			 (u32)tail->next_offset, (u32)tail->current_pass_no);
+
+		*state = *tail;
+	}
+
+	for (dst_idx = 0; dst_idx < reads_size; dst_idx++) {
+		if (state->current_record_no == head->current_record_no)
+			break;
+
+		log_read_one_record(log, state);
+
+		if (reads)
+			reads[dst_idx] = (struct incfs_pending_read_info) {
+				.file_id = state->base_record.file_id,
+				.block_index = state->base_record.block_index,
+				.serial_number = state->current_record_no,
+				.timestamp_us =
+					state->base_record.absolute_ts_us,
+			};
+
+		if (reads2)
+			reads2[dst_idx] = (struct incfs_pending_read_info2) {
+				.file_id = state->base_record.file_id,
+				.block_index = state->base_record.block_index,
+				.serial_number = state->current_record_no,
+				.timestamp_us =
+					state->base_record.absolute_ts_us,
+				.uid = state->base_record.uid,
+			};
+	}
+
+	spin_unlock(&log->rl_lock);
+	return dst_idx;
+}
+
diff --git a/fs/incfs/data_mgmt.h b/fs/incfs/data_mgmt.h
new file mode 100644
index 0000000..84db038
--- /dev/null
+++ b/fs/incfs/data_mgmt.h
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_DATA_MGMT_H
+#define _INCFS_DATA_MGMT_H
+
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/zstd.h>
+#include <crypto/hash.h>
+#include <linux/rwsem.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+#include "pseudo_files.h"
+
+#define SEGMENTS_PER_FILE 3
+
+enum LOG_RECORD_TYPE {
+	FULL,
+	SAME_FILE,
+	SAME_FILE_CLOSE_BLOCK,
+	SAME_FILE_CLOSE_BLOCK_SHORT,
+	SAME_FILE_NEXT_BLOCK,
+	SAME_FILE_NEXT_BLOCK_SHORT,
+};
+
+struct full_record {
+	enum LOG_RECORD_TYPE type : 3; /* FULL */
+	u32 block_index : 29;
+	incfs_uuid_t file_id;
+	u64 absolute_ts_us;
+	uid_t uid;
+} __packed; /* 32 bytes */
+
+struct same_file {
+	enum LOG_RECORD_TYPE type : 3; /* SAME_FILE */
+	u32 block_index : 29;
+	uid_t uid;
+	u16 relative_ts_us; /* max 2^16 us ~= 64 ms */
+} __packed; /* 10 bytes */
+
+struct same_file_close_block {
+	enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_CLOSE_BLOCK */
+	u16 relative_ts_us : 13; /* max 2^13 us ~= 8 ms */
+	s16 block_index_delta;
+} __packed; /* 4 bytes */
+
+struct same_file_close_block_short {
+	enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_CLOSE_BLOCK_SHORT */
+	u8 relative_ts_tens_us : 5; /* max 2^5*10 us ~= 320 us */
+	s8 block_index_delta;
+} __packed; /* 2 bytes */
+
+struct same_file_next_block {
+	enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_NEXT_BLOCK */
+	u16 relative_ts_us : 13; /* max 2^13 us ~= 8 ms */
+} __packed; /* 2 bytes */
+
+struct same_file_next_block_short {
+	enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_NEXT_BLOCK_SHORT */
+	u8 relative_ts_tens_us : 5; /* max 2^5*10 us ~= 320 us */
+} __packed; /* 1 byte */
+
+union log_record {
+	struct full_record full_record;
+	struct same_file same_file;
+	struct same_file_close_block same_file_close_block;
+	struct same_file_close_block_short same_file_close_block_short;
+	struct same_file_next_block same_file_next_block;
+	struct same_file_next_block_short same_file_next_block_short;
+};
+
+struct read_log_state {
+	/* Log buffer generation id, incremented on configuration changes */
+	u32 generation_id;
+
+	/* Offset in rl_ring_buf to write into. */
+	u32 next_offset;
+
+	/* Current number of writer passes over rl_ring_buf */
+	u32 current_pass_no;
+
+	/* Current full_record to diff against */
+	struct full_record base_record;
+
+	/* Current record number counting from configuration change */
+	u64 current_record_no;
+};
+
+/* A ring buffer to save records about data blocks which were recently read. */
+struct read_log {
+	void *rl_ring_buf;
+
+	int rl_size;
+
+	struct read_log_state rl_head;
+
+	struct read_log_state rl_tail;
+
+	/* A lock to protect the above fields */
+	spinlock_t rl_lock;
+
+	/* A queue of waiters who want to be notified about reads */
+	wait_queue_head_t ml_notif_wq;
+
+	/* A work item to wake up those waiters without slowing down readers */
+	struct delayed_work ml_wakeup_work;
+};
+
+struct mount_options {
+	unsigned int read_timeout_ms;
+	unsigned int readahead_pages;
+	unsigned int read_log_pages;
+	unsigned int read_log_wakeup_count;
+	bool report_uid;
+	char *sysfs_name;
+};
+
+struct mount_info {
+	struct super_block *mi_sb;
+
+	struct path mi_backing_dir_path;
+
+	struct dentry *mi_index_dir;
+
+	struct dentry *mi_incomplete_dir;
+
+	const struct cred *mi_owner;
+
+	struct mount_options mi_options;
+
+	/* This mutex is to be taken before create, rename, delete */
+	struct mutex mi_dir_struct_mutex;
+
+	/*
+	 * A queue of waiters who want to be notified about new pending reads.
+	 */
+	wait_queue_head_t mi_pending_reads_notif_wq;
+
+	/*
+	 * Protects - RCU safe:
+	 *  - reads_list_head
+	 *  - mi_pending_reads_count
+	 *  - mi_last_pending_read_number
+	 *  - data_file_segment.reads_list_head
+	 */
+	spinlock_t pending_read_lock;
+
+	/* List of active pending_read objects */
+	struct list_head mi_reads_list_head;
+
+	/* Total number of items in reads_list_head */
+	int mi_pending_reads_count;
+
+	/*
+	 * Last serial number that was assigned to a pending read.
+	 * 0 means no pending reads have been seen yet.
+	 */
+	int mi_last_pending_read_number;
+
+	/* Temporary buffer for read logger. */
+	struct read_log mi_log;
+
+	/* SELinux needs special xattrs on our pseudo files */
+	struct mem_range pseudo_file_xattr[PSEUDO_FILE_COUNT];
+
+	/* A queue of waiters who want to be notified about blocks_written */
+	wait_queue_head_t mi_blocks_written_notif_wq;
+
+	/* Number of blocks written since mount */
+	atomic_t mi_blocks_written;
+
+	/* Per UID read timeouts */
+	spinlock_t mi_per_uid_read_timeouts_lock;
+	struct incfs_per_uid_read_timeouts *mi_per_uid_read_timeouts;
+	int mi_per_uid_read_timeouts_size;
+
+	/* zstd workspace */
+	struct mutex mi_zstd_workspace_mutex;
+	void *mi_zstd_workspace;
+	ZSTD_DStream *mi_zstd_stream;
+	struct delayed_work mi_zstd_cleanup_work;
+
+	/* sysfs node */
+	struct incfs_sysfs_node *mi_sysfs_node;
+
+	/* Last error information */
+	struct mutex	mi_le_mutex;
+	incfs_uuid_t	mi_le_file_id;
+	u64		mi_le_time_us;
+	u32		mi_le_page;
+	u32		mi_le_errno;
+	uid_t		mi_le_uid;
+
+	/* Number of reads timed out */
+	u32 mi_reads_failed_timed_out;
+
+	/* Number of reads failed because hash verification failed */
+	u32 mi_reads_failed_hash_verification;
+
+	/* Number of reads failed for another reason */
+	u32 mi_reads_failed_other;
+
+	/* Number of reads delayed because page had to be fetched */
+	u32 mi_reads_delayed_pending;
+
+	/* Total time waiting for pages to be fetched */
+	u64 mi_reads_delayed_pending_us;
+
+	/*
+	 * Number of reads delayed because of per-uid min_time_us or
+	 * min_pending_time_us settings
+	 */
+	u32 mi_reads_delayed_min;
+
+	/* Total time waiting because of per-uid min_time_us or
+	 * min_pending_time_us settings.
+	 *
+	 * Note that if a read is initially delayed because we have to wait for
+	 * the page, then further delayed because of min_pending_time_us
+	 * setting, this counter gets incremented by only the further delay
+	 * time.
+	 */
+	u64 mi_reads_delayed_min_us;
+};
+
+struct data_file_block {
+	loff_t db_backing_file_data_offset;
+
+	size_t db_stored_size;
+
+	enum incfs_compression_alg db_comp_alg;
+};
+
+struct pending_read {
+	incfs_uuid_t file_id;
+
+	s64 timestamp_us;
+
+	atomic_t done;
+
+	int block_index;
+
+	int serial_number;
+
+	uid_t uid;
+
+	struct list_head mi_reads_list;
+
+	struct list_head segment_reads_list;
+
+	struct rcu_head rcu;
+};
+
+struct data_file_segment {
+	wait_queue_head_t new_data_arrival_wq;
+
+	/* Protects reads and writes from the blockmap */
+	struct rw_semaphore rwsem;
+
+	/* List of active pending_read objects belonging to this segment */
+	/* Protected by mount_info.pending_reads_mutex */
+	struct list_head reads_list_head;
+};
+
+/*
+ * Extra info associated with a file. Just a few bytes set by a user.
+ */
+struct file_attr {
+	loff_t fa_value_offset;
+
+	size_t fa_value_size;
+
+	u32 fa_crc;
+};
+
+
+struct data_file {
+	struct backing_file_context *df_backing_file_context;
+
+	struct mount_info *df_mount_info;
+
+	incfs_uuid_t df_id;
+
+	/*
+	 * Array of segments used to reduce lock contention for the file.
+	 * Segment is chosen for a block depends on the block's index.
+	 */
+	struct data_file_segment df_segments[SEGMENTS_PER_FILE];
+
+	/* Base offset of the first metadata record. */
+	loff_t df_metadata_off;
+
+	/* Base offset of the block map. */
+	loff_t df_blockmap_off;
+
+	/* File size in bytes */
+	loff_t df_size;
+
+	/* File header flags */
+	u32 df_header_flags;
+
+	/* File size in DATA_FILE_BLOCK_SIZE blocks */
+	int df_data_block_count;
+
+	/* Total number of blocks, data + hash */
+	int df_total_block_count;
+
+	/* For mapped files, the offset into the actual file */
+	loff_t df_mapped_offset;
+
+	/* Number of data blocks written to file */
+	atomic_t df_data_blocks_written;
+
+	/* Number of data blocks in the status block */
+	u32 df_initial_data_blocks_written;
+
+	/* Number of hash blocks written to file */
+	atomic_t df_hash_blocks_written;
+
+	/* Number of hash blocks in the status block */
+	u32 df_initial_hash_blocks_written;
+
+	/* Offset to status metadata header */
+	loff_t df_status_offset;
+
+	/*
+	 * Mutex acquired while enabling verity. Note that df_hash_tree is set
+	 * by enable verity.
+	 *
+	 * The backing file mutex bc_mutex  may be taken while this mutex is
+	 * held.
+	 */
+	struct mutex df_enable_verity;
+
+	/*
+	 * Set either at construction time or during enabling verity. In the
+	 * latter case, set via smp_store_release, so use smp_load_acquire to
+	 * read it.
+	 */
+	struct mtree *df_hash_tree;
+
+	/* Guaranteed set if df_hash_tree is set. */
+	struct incfs_df_signature *df_signature;
+
+	/*
+	 * The verity file digest, set when verity is enabled and the file has
+	 * been opened
+	 */
+	struct mem_range df_verity_file_digest;
+
+	struct incfs_df_verity_signature *df_verity_signature;
+};
+
+struct dir_file {
+	struct mount_info *mount_info;
+
+	struct file *backing_dir;
+};
+
+struct inode_info {
+	struct mount_info *n_mount_info; /* A mount, this file belongs to */
+
+	struct inode *n_backing_inode;
+
+	struct data_file *n_file;
+
+	struct inode n_vfs_inode;
+};
+
+struct dentry_info {
+	struct path backing_path;
+};
+
+enum FILL_PERMISSION {
+	CANT_FILL = 0,
+	CAN_FILL = 1,
+};
+
+struct incfs_file_data {
+	/* Does this file handle have INCFS_IOC_FILL_BLOCKS permission */
+	enum FILL_PERMISSION fd_fill_permission;
+
+	/* If INCFS_IOC_GET_FILLED_BLOCKS has been called, where are we */
+	int fd_get_block_pos;
+
+	/* And how many filled blocks are there up to that point */
+	int fd_filled_data_blocks;
+	int fd_filled_hash_blocks;
+};
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+					  struct mount_options *options,
+					  struct path *backing_dir_path);
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+			     struct mount_options *options);
+
+void incfs_free_mount_info(struct mount_info *mi);
+
+char *file_id_to_str(incfs_uuid_t id);
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name);
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf);
+void incfs_free_data_file(struct data_file *df);
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf);
+void incfs_free_dir_file(struct dir_file *dir);
+
+struct incfs_read_data_file_timeouts {
+	u32 min_time_us;
+	u32 min_pending_time_us;
+	u32 max_pending_time_us;
+};
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+			int index, struct mem_range tmp,
+			struct incfs_read_data_file_timeouts *timeouts);
+
+ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
+				      struct data_file *df, size_t offset);
+
+int incfs_get_filled_blocks(struct data_file *df,
+			    struct incfs_file_data *fd,
+			    struct incfs_get_filled_blocks_args *arg);
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
+
+int incfs_process_new_data_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data);
+
+int incfs_process_new_hash_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data);
+
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number);
+
+/*
+ * Collects pending reads and saves them into the array (reads/reads_size).
+ * Only reads with serial_number > sn_lowerbound are reported.
+ * Returns how many reads were saved into the array.
+ */
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+				struct incfs_pending_read_info *reads,
+				struct incfs_pending_read_info2 *reads2,
+				int reads_size, int *new_max_sn);
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+			       struct read_log_state *start_state,
+			       struct incfs_pending_read_info *reads,
+			       struct incfs_pending_read_info2 *reads2,
+			       int reads_size);
+struct read_log_state incfs_get_log_state(struct mount_info *mi);
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+				     const struct read_log_state *state);
+
+static inline struct inode_info *get_incfs_node(struct inode *inode)
+{
+	if (!inode)
+		return NULL;
+
+	if (inode->i_sb->s_magic != INCFS_MAGIC_NUMBER) {
+		/* This inode doesn't belong to us. */
+		pr_warn_once("incfs: %s on an alien inode.", __func__);
+		return NULL;
+	}
+
+	return container_of(inode, struct inode_info, n_vfs_inode);
+}
+
+static inline struct data_file *get_incfs_data_file(struct file *f)
+{
+	struct inode_info *node = NULL;
+
+	if (!f)
+		return NULL;
+
+	if (!S_ISREG(f->f_inode->i_mode))
+		return NULL;
+
+	node = get_incfs_node(f->f_inode);
+	if (!node)
+		return NULL;
+
+	return node->n_file;
+}
+
+static inline struct dir_file *get_incfs_dir_file(struct file *f)
+{
+	if (!f)
+		return NULL;
+
+	if (!S_ISDIR(f->f_inode->i_mode))
+		return NULL;
+
+	return (struct dir_file *)f->private_data;
+}
+
+/*
+ * Make sure that inode_info.n_file is initialized and inode can be used
+ * for reading and writing data from/to the backing file.
+ */
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+				struct inode *inode,
+				struct file *backing_file);
+
+static inline struct dentry_info *get_incfs_dentry(const struct dentry *d)
+{
+	if (!d)
+		return NULL;
+
+	return (struct dentry_info *)d->d_fsdata;
+}
+
+static inline void get_incfs_backing_path(const struct dentry *d,
+					  struct path *path)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (!di) {
+		*path = (struct path) {};
+		return;
+	}
+
+	*path = di->backing_path;
+	path_get(path);
+}
+
+static inline int get_blocks_count_for_size(u64 size)
+{
+	if (size == 0)
+		return 0;
+	return 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+}
+
+#endif /* _INCFS_DATA_MGMT_H */
diff --git a/fs/incfs/format.c b/fs/incfs/format.c
new file mode 100644
index 0000000..6ffcf33
--- /dev/null
+++ b/fs/incfs/format.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/falloc.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+
+#include "format.h"
+#include "data_mgmt.h"
+
+struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
+					     struct file *backing_file)
+{
+	struct backing_file_context *result = NULL;
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return ERR_PTR(-ENOMEM);
+
+	result->bc_file = get_file(backing_file);
+	result->bc_cred = mi->mi_owner;
+	mutex_init(&result->bc_mutex);
+	return result;
+}
+
+void incfs_free_bfc(struct backing_file_context *bfc)
+{
+	if (!bfc)
+		return;
+
+	if (bfc->bc_file)
+		fput(bfc->bc_file);
+
+	mutex_destroy(&bfc->bc_mutex);
+	kfree(bfc);
+}
+
+static loff_t incfs_get_end_offset(struct file *f)
+{
+	/*
+	 * This function assumes that file size and the end-offset
+	 * are the same. This is not always true.
+	 */
+	return i_size_read(file_inode(f));
+}
+
+/*
+ * Truncate the tail of the file to the given length.
+ * Used to rollback partially successful multistep writes.
+ */
+static int truncate_backing_file(struct backing_file_context *bfc,
+				loff_t new_end)
+{
+	struct inode *inode = NULL;
+	struct dentry *dentry = NULL;
+	loff_t old_end = 0;
+	struct iattr attr;
+	int result = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	if (!bfc->bc_file)
+		return -EFAULT;
+
+	old_end = incfs_get_end_offset(bfc->bc_file);
+	if (old_end == new_end)
+		return 0;
+	if (old_end < new_end)
+		return -EINVAL;
+
+	inode = bfc->bc_file->f_inode;
+	dentry = bfc->bc_file->f_path.dentry;
+
+	attr.ia_size = new_end;
+	attr.ia_valid = ATTR_SIZE;
+
+	inode_lock(inode);
+	result = notify_change(&init_user_ns, dentry, &attr, NULL);
+	inode_unlock(inode);
+
+	return result;
+}
+
+static int write_to_bf(struct backing_file_context *bfc, const void *buf,
+			size_t count, loff_t pos)
+{
+	ssize_t res = incfs_kwrite(bfc, buf, count, pos);
+
+	if (res < 0)
+		return res;
+	if (res != count)
+		return -EIO;
+	return 0;
+}
+
+static int append_zeros_no_fallocate(struct backing_file_context *bfc,
+				     size_t file_size, size_t len)
+{
+	u8 buffer[256] = {};
+	size_t i;
+
+	for (i = 0; i < len; i += sizeof(buffer)) {
+		int to_write = len - i > sizeof(buffer)
+			? sizeof(buffer) : len - i;
+		int err = write_to_bf(bfc, buffer, to_write, file_size + i);
+
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/* Append a given number of zero bytes to the end of the backing file. */
+static int append_zeros(struct backing_file_context *bfc, size_t len)
+{
+	loff_t file_size = 0;
+	loff_t new_last_byte_offset = 0;
+	int result;
+
+	if (!bfc)
+		return -EFAULT;
+
+	if (len == 0)
+		return 0;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	/*
+	 * Allocate only one byte at the new desired end of the file.
+	 * It will increase file size and create a zeroed area of
+	 * a given size.
+	 */
+	file_size = incfs_get_end_offset(bfc->bc_file);
+	new_last_byte_offset = file_size + len - 1;
+	result = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
+	if (result != -EOPNOTSUPP)
+		return result;
+
+	return append_zeros_no_fallocate(bfc, file_size, len);
+}
+
+/*
+ * Append a given metadata record to the backing file and update a previous
+ * record to add the new record the the metadata list.
+ */
+static int append_md_to_backing_file(struct backing_file_context *bfc,
+			      struct incfs_md_header *record)
+{
+	int result = 0;
+	loff_t record_offset;
+	loff_t file_pos;
+	__le64 new_md_offset;
+	size_t record_size;
+
+	if (!bfc || !record)
+		return -EFAULT;
+
+	if (bfc->bc_last_md_record_offset < 0)
+		return -EINVAL;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	record_size = le16_to_cpu(record->h_record_size);
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	record->h_next_md_offset = 0;
+
+	/* Write the metadata record to the end of the backing file */
+	record_offset = file_pos;
+	new_md_offset = cpu_to_le64(record_offset);
+	result = write_to_bf(bfc, record, record_size, file_pos);
+	if (result)
+		return result;
+
+	/* Update next metadata offset in a previous record or a superblock. */
+	if (bfc->bc_last_md_record_offset) {
+		/*
+		 * Find a place in the previous md record where new record's
+		 * offset needs to be saved.
+		 */
+		file_pos = bfc->bc_last_md_record_offset +
+			offsetof(struct incfs_md_header, h_next_md_offset);
+	} else {
+		/*
+		 * No metadata yet, file a place to update in the
+		 * file_header.
+		 */
+		file_pos = offsetof(struct incfs_file_header,
+				    fh_first_md_offset);
+	}
+	result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
+			     file_pos);
+	if (result)
+		return result;
+
+	bfc->bc_last_md_record_offset = record_offset;
+	return result;
+}
+
+/*
+ * Reserve 0-filled space for the blockmap body, and append
+ * incfs_blockmap metadata record pointing to it.
+ */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+					 u32 block_count)
+{
+	struct incfs_blockmap blockmap = {};
+	int result = 0;
+	loff_t file_end = 0;
+	size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
+
+	if (!bfc)
+		return -EFAULT;
+
+	blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
+	blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
+	blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
+	blockmap.m_block_count = cpu_to_le32(block_count);
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	/* Reserve 0-filled space for the blockmap body in the backing file. */
+	file_end = incfs_get_end_offset(bfc->bc_file);
+	result = append_zeros(bfc, map_size);
+	if (result)
+		return result;
+
+	/* Write blockmap metadata record pointing to the body written above. */
+	blockmap.m_base_offset = cpu_to_le64(file_end);
+	result = append_md_to_backing_file(bfc, &blockmap.m_header);
+	if (result)
+		/* Error, rollback file changes */
+		truncate_backing_file(bfc, file_end);
+
+	return result;
+}
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+					struct mem_range sig, u32 tree_size,
+					loff_t *tree_offset, loff_t *sig_offset)
+{
+	struct incfs_file_signature sg = {};
+	int result = 0;
+	loff_t rollback_pos = 0;
+	loff_t tree_area_pos = 0;
+	size_t alignment = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	rollback_pos = incfs_get_end_offset(bfc->bc_file);
+
+	sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
+	sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
+	sg.sg_header.h_next_md_offset = cpu_to_le64(0);
+	if (sig.data != NULL && sig.len > 0) {
+		sg.sg_sig_size = cpu_to_le32(sig.len);
+		sg.sg_sig_offset = cpu_to_le64(rollback_pos);
+
+		result = write_to_bf(bfc, sig.data, sig.len, rollback_pos);
+		if (result)
+			goto err;
+	}
+
+	tree_area_pos = incfs_get_end_offset(bfc->bc_file);
+	if (tree_size > 0) {
+		if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
+			/*
+			 * If hash tree is big enough, it makes sense to
+			 * align in the backing file for faster access.
+			 */
+			loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
+
+			alignment = offset - tree_area_pos;
+			tree_area_pos = offset;
+		}
+
+		/*
+		 * If root hash is not the only hash in the tree.
+		 * reserve 0-filled space for the tree.
+		 */
+		result = append_zeros(bfc, tree_size + alignment);
+		if (result)
+			goto err;
+
+		sg.sg_hash_tree_size = cpu_to_le32(tree_size);
+		sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
+	}
+
+	/* Write a hash tree metadata record pointing to the hash tree above. */
+	result = append_md_to_backing_file(bfc, &sg.sg_header);
+err:
+	if (result)
+		/* Error, rollback file changes */
+		truncate_backing_file(bfc, rollback_pos);
+	else {
+		if (tree_offset)
+			*tree_offset = tree_area_pos;
+		if (sig_offset)
+			*sig_offset = rollback_pos;
+	}
+
+	return result;
+}
+
+static int write_new_status_to_backing_file(struct backing_file_context *bfc,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written)
+{
+	int result;
+	loff_t rollback_pos;
+	struct incfs_status is = {
+		.is_header = {
+			.h_md_entry_type = INCFS_MD_STATUS,
+			.h_record_size = cpu_to_le16(sizeof(is)),
+		},
+		.is_data_blocks_written = cpu_to_le32(data_blocks_written),
+		.is_hash_blocks_written = cpu_to_le32(hash_blocks_written),
+	};
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+	rollback_pos = incfs_get_end_offset(bfc->bc_file);
+	result = append_md_to_backing_file(bfc, &is.is_header);
+	if (result)
+		truncate_backing_file(bfc, rollback_pos);
+
+	return result;
+}
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+				       loff_t status_offset,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written)
+{
+	struct incfs_status is;
+	int result;
+
+	if (!bfc)
+		return -EFAULT;
+
+	if (status_offset == 0)
+		return write_new_status_to_backing_file(bfc,
+				data_blocks_written, hash_blocks_written);
+
+	result = incfs_kread(bfc, &is, sizeof(is), status_offset);
+	if (result != sizeof(is))
+		return -EIO;
+
+	is.is_data_blocks_written = cpu_to_le32(data_blocks_written);
+	is.is_hash_blocks_written = cpu_to_le32(hash_blocks_written);
+	result = incfs_kwrite(bfc, &is, sizeof(is), status_offset);
+	if (result != sizeof(is))
+		return -EIO;
+
+	return 0;
+}
+
+int incfs_write_verity_signature_to_backing_file(
+		struct backing_file_context *bfc, struct mem_range signature,
+		loff_t *offset)
+{
+	struct incfs_file_verity_signature vs = {};
+	int result;
+	loff_t pos;
+
+	/* No verity signature section is equivalent to an empty section */
+	if (signature.data == NULL || signature.len == 0)
+		return 0;
+
+	pos = incfs_get_end_offset(bfc->bc_file);
+
+	vs = (struct incfs_file_verity_signature) {
+		.vs_header = (struct incfs_md_header) {
+			.h_md_entry_type = INCFS_MD_VERITY_SIGNATURE,
+			.h_record_size = cpu_to_le16(sizeof(vs)),
+			.h_next_md_offset = cpu_to_le64(0),
+		},
+		.vs_size = cpu_to_le32(signature.len),
+		.vs_offset = cpu_to_le64(pos),
+	};
+
+	result = write_to_bf(bfc, signature.data, signature.len, pos);
+	if (result)
+		goto err;
+
+	result = append_md_to_backing_file(bfc, &vs.vs_header);
+	if (result)
+		goto err;
+
+	*offset = pos;
+err:
+	if (result)
+		/* Error, rollback file changes */
+		truncate_backing_file(bfc, pos);
+	return result;
+}
+
+/*
+ * Write a backing file header
+ * It should always be called only on empty file.
+ * fh.fh_first_md_offset is 0 for now, but will be updated
+ * once first metadata record is added.
+ */
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+				   incfs_uuid_t *uuid, u64 file_size)
+{
+	struct incfs_file_header fh = {};
+	loff_t file_pos = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+	fh.fh_header_size = cpu_to_le16(sizeof(fh));
+	fh.fh_first_md_offset = cpu_to_le64(0);
+	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	fh.fh_file_size = cpu_to_le64(file_size);
+	fh.fh_uuid = *uuid;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	if (file_pos != 0)
+		return -EEXIST;
+
+	return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/*
+ * Write a backing file header for a mapping file
+ * It should always be called only on empty file.
+ */
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+				incfs_uuid_t *uuid, u64 file_size, u64 offset)
+{
+	struct incfs_file_header fh = {};
+	loff_t file_pos = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+	fh.fh_header_size = cpu_to_le16(sizeof(fh));
+	fh.fh_original_offset = cpu_to_le64(offset);
+	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	fh.fh_mapped_file_size = cpu_to_le64(file_size);
+	fh.fh_original_uuid = *uuid;
+	fh.fh_flags = cpu_to_le32(INCFS_FILE_MAPPED);
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	if (file_pos != 0)
+		return -EEXIST;
+
+	return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/* Write a given data block and update file's blockmap to point it. */
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+				     struct mem_range block, int block_index,
+				     loff_t bm_base_off, u16 flags)
+{
+	struct incfs_blockmap_entry bm_entry = {};
+	int result = 0;
+	loff_t data_offset = 0;
+	loff_t bm_entry_off =
+		bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
+
+	if (!bfc)
+		return -EFAULT;
+
+	if (block.len >= (1 << 16) || block_index < 0)
+		return -EINVAL;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	data_offset = incfs_get_end_offset(bfc->bc_file);
+	if (data_offset <= bm_entry_off) {
+		/* Blockmap entry is beyond the file's end. It is not normal. */
+		return -EINVAL;
+	}
+
+	/* Write the block data at the end of the backing file. */
+	result = write_to_bf(bfc, block.data, block.len, data_offset);
+	if (result)
+		return result;
+
+	/* Update the blockmap to point to the newly written data. */
+	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+	bm_entry.me_data_size = cpu_to_le16((u16)block.len);
+	bm_entry.me_flags = cpu_to_le16(flags);
+
+	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
+				bm_entry_off);
+}
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index,
+					   loff_t hash_area_off,
+					   loff_t bm_base_off,
+					   loff_t file_size)
+{
+	struct incfs_blockmap_entry bm_entry = {};
+	int result;
+	loff_t data_offset = 0;
+	loff_t file_end = 0;
+	loff_t bm_entry_off =
+		bm_base_off +
+		sizeof(struct incfs_blockmap_entry) *
+			(block_index + get_blocks_count_for_size(file_size));
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
+	file_end = incfs_get_end_offset(bfc->bc_file);
+	if (data_offset + block.len > file_end) {
+		/* Block is located beyond the file's end. It is not normal. */
+		return -EINVAL;
+	}
+
+	result = write_to_bf(bfc, block.data, block.len, data_offset);
+	if (result)
+		return result;
+
+	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+	bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off);
+}
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+			loff_t bm_base_off,
+			struct incfs_blockmap_entry *bm_entry)
+{
+	int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
+						bm_base_off);
+
+	if (error < 0)
+		return error;
+
+	if (error == 0)
+		return -EIO;
+
+	if (error != 1)
+		return -EFAULT;
+
+	return 0;
+}
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+		struct incfs_blockmap_entry *entries,
+		int start_index, int blocks_number,
+		loff_t bm_base_off)
+{
+	loff_t bm_entry_off =
+		bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
+	const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
+					* blocks_number;
+	int result = 0;
+
+	if (!bfc || !entries)
+		return -EFAULT;
+
+	if (start_index < 0 || bm_base_off <= 0)
+		return -ENODATA;
+
+	result = incfs_kread(bfc, entries, bytes_to_read, bm_entry_off);
+	if (result < 0)
+		return result;
+	return result / sizeof(*entries);
+}
+
+int incfs_read_file_header(struct backing_file_context *bfc,
+			   loff_t *first_md_off, incfs_uuid_t *uuid,
+			   u64 *file_size, u32 *flags)
+{
+	ssize_t bytes_read = 0;
+	struct incfs_file_header fh = {};
+
+	if (!bfc || !first_md_off)
+		return -EFAULT;
+
+	bytes_read = incfs_kread(bfc, &fh, sizeof(fh), 0);
+	if (bytes_read < 0)
+		return bytes_read;
+
+	if (bytes_read < sizeof(fh))
+		return -EBADMSG;
+
+	if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
+		return -EILSEQ;
+
+	if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
+		return -EILSEQ;
+
+	if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
+		return -EILSEQ;
+
+	if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
+		return -EILSEQ;
+
+	if (first_md_off)
+		*first_md_off = le64_to_cpu(fh.fh_first_md_offset);
+	if (uuid)
+		*uuid = fh.fh_uuid;
+	if (file_size)
+		*file_size = le64_to_cpu(fh.fh_file_size);
+	if (flags)
+		*flags = le32_to_cpu(fh.fh_flags);
+	return 0;
+}
+
+/*
+ * Read through metadata records from the backing file one by one
+ * and call provided metadata handlers.
+ */
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+			      struct metadata_handler *handler)
+{
+	const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
+	ssize_t bytes_read = 0;
+	size_t md_record_size = 0;
+	loff_t next_record = 0;
+	int res = 0;
+	struct incfs_md_header *md_hdr = NULL;
+
+	if (!bfc || !handler)
+		return -EFAULT;
+
+	if (handler->md_record_offset == 0)
+		return -EPERM;
+
+	memset(&handler->md_buffer, 0, max_md_size);
+	bytes_read = incfs_kread(bfc, &handler->md_buffer, max_md_size,
+				 handler->md_record_offset);
+	if (bytes_read < 0)
+		return bytes_read;
+	if (bytes_read < sizeof(*md_hdr))
+		return -EBADMSG;
+
+	md_hdr = &handler->md_buffer.md_header;
+	next_record = le64_to_cpu(md_hdr->h_next_md_offset);
+	md_record_size = le16_to_cpu(md_hdr->h_record_size);
+
+	if (md_record_size > max_md_size) {
+		pr_warn("incfs: The record is too large. Size: %zu",
+				md_record_size);
+		return -EBADMSG;
+	}
+
+	if (bytes_read < md_record_size) {
+		pr_warn("incfs: The record hasn't been fully read.");
+		return -EBADMSG;
+	}
+
+	if (next_record <= handler->md_record_offset && next_record != 0) {
+		pr_warn("incfs: Next record (%lld) points back in file.",
+			next_record);
+		return -EBADMSG;
+	}
+
+	switch (md_hdr->h_md_entry_type) {
+	case INCFS_MD_NONE:
+		break;
+	case INCFS_MD_BLOCK_MAP:
+		if (handler->handle_blockmap)
+			res = handler->handle_blockmap(
+				&handler->md_buffer.blockmap, handler);
+		break;
+	case INCFS_MD_FILE_ATTR:
+		/*
+		 * File attrs no longer supported, ignore section for
+		 * compatibility
+		 */
+		break;
+	case INCFS_MD_SIGNATURE:
+		if (handler->handle_signature)
+			res = handler->handle_signature(
+				&handler->md_buffer.signature, handler);
+		break;
+	case INCFS_MD_STATUS:
+		if (handler->handle_status)
+			res = handler->handle_status(
+				&handler->md_buffer.status, handler);
+		break;
+	case INCFS_MD_VERITY_SIGNATURE:
+		if (handler->handle_verity_signature)
+			res = handler->handle_verity_signature(
+				&handler->md_buffer.verity_signature, handler);
+		break;
+	default:
+		res = -ENOTSUPP;
+		break;
+	}
+
+	if (!res) {
+		if (next_record == 0) {
+			/*
+			 * Zero offset for the next record means that the last
+			 * metadata record has just been processed.
+			 */
+			bfc->bc_last_md_record_offset =
+				handler->md_record_offset;
+		}
+		handler->md_prev_record_offset = handler->md_record_offset;
+		handler->md_record_offset = next_record;
+	}
+	return res;
+}
+
+ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
+		    loff_t pos)
+{
+	const struct cred *old_cred = override_creds(bfc->bc_cred);
+	int ret = kernel_read(bfc->bc_file, buf, size, &pos);
+
+	revert_creds(old_cred);
+	return ret;
+}
+
+ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
+		     size_t size, loff_t pos)
+{
+	const struct cred *old_cred = override_creds(bfc->bc_cred);
+	int ret = kernel_write(bfc->bc_file, buf, size, &pos);
+
+	revert_creds(old_cred);
+	return ret;
+}
diff --git a/fs/incfs/format.h b/fs/incfs/format.h
new file mode 100644
index 0000000..14e475b
--- /dev/null
+++ b/fs/incfs/format.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+/*
+ * Overview
+ * --------
+ * The backbone of the incremental-fs ondisk format is an append only linked
+ * list of metadata blocks. Each metadata block contains an offset of the next
+ * one. These blocks describe files and directories on the
+ * file system. They also represent actions of adding and removing file names
+ * (hard links).
+ *
+ * Every time incremental-fs instance is mounted, it reads through this list
+ * to recreate filesystem's state in memory. An offset of the first record in
+ * the metadata list is stored in the superblock at the beginning of the backing
+ * file.
+ *
+ * Most of the backing file is taken by data areas and blockmaps.
+ * Since data blocks can be compressed and have different sizes,
+ * single per-file data area can't be pre-allocated. That's why blockmaps are
+ * needed in order to find a location and size of each data block in
+ * the backing file. Each time a file is created, a corresponding block map is
+ * allocated to store future offsets of data blocks.
+ *
+ * Whenever a data block is given by data loader to incremental-fs:
+ *   - A data area with the given block is appended to the end of
+ *     the backing file.
+ *   - A record in the blockmap for the given block index is updated to reflect
+ *     its location, size, and compression algorithm.
+
+ * Metadata records
+ * ----------------
+ * incfs_blockmap - metadata record that specifies size and location
+ *                           of a blockmap area for a given file. This area
+ *                           contains an array of incfs_blockmap_entry-s.
+ * incfs_file_signature - metadata record that specifies where file signature
+ *                           and its hash tree can be found in the backing file.
+ *
+ * incfs_file_attr - metadata record that specifies where additional file
+ *		        attributes blob can be found.
+ *
+ * Metadata header
+ * ---------------
+ * incfs_md_header - header of a metadata record. It's always a part
+ *                   of other structures and served purpose of metadata
+ *                   bookkeeping.
+ *
+ *              +-----------------------------------------------+       ^
+ *              |            incfs_md_header                    |       |
+ *              | 1. type of body(BLOCKMAP, FILE_ATTR..)        |       |
+ *              | 2. size of the whole record header + body     |       |
+ *              | 3. CRC the whole record header + body         |       |
+ *              | 4. offset of the previous md record           |]------+
+ *              | 5. offset of the next md record (md link)     |]---+
+ *              +-----------------------------------------------+    |
+ *              |  Metadata record body with useful data        |    |
+ *              +-----------------------------------------------+    |
+ *                                                                   +--->
+ *
+ * Other ondisk structures
+ * -----------------------
+ * incfs_super_block - backing file header
+ * incfs_blockmap_entry - a record in a blockmap area that describes size
+ *                       and location of a data block.
+ * Data blocks dont have any particular structure, they are written to the
+ * backing file in a raw form as they come from a data loader.
+ *
+ * Backing file layout
+ * -------------------
+ *
+ *
+ *              +-------------------------------------------+
+ *              |            incfs_file_header              |]---+
+ *              +-------------------------------------------+    |
+ *              |                 metadata                  |<---+
+ *              |           incfs_file_signature            |]---+
+ *              +-------------------------------------------+    |
+ *                        .........................              |
+ *              +-------------------------------------------+    |   metadata
+ *     +------->|               blockmap area               |    |  list links
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |    +--[|          [incfs_blockmap_entry]           |    |
+ *     |    |   |          [incfs_blockmap_entry]           |    |
+ *     |    |   |          [incfs_blockmap_entry]           |    |
+ *     |    |   +-------------------------------------------+    |
+ *     |    |             .........................              |
+ *     |    |   +-------------------------------------------+    |
+ *     |    |   |                 metadata                  |<---+
+ *     +----|--[|               incfs_blockmap              |]---+
+ *          |   +-------------------------------------------+    |
+ *          |             .........................              |
+ *          |   +-------------------------------------------+    |
+ *          +-->|                 data block                |    |
+ *              +-------------------------------------------+    |
+ *                        .........................              |
+ *              +-------------------------------------------+    |
+ *              |                 metadata                  |<---+
+ *              |              incfs_file_attr              |
+ *              +-------------------------------------------+
+ */
+#ifndef _INCFS_FORMAT_H
+#define _INCFS_FORMAT_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_NAME_LEN 255
+#define INCFS_FORMAT_V1 1
+#define INCFS_FORMAT_CURRENT_VER INCFS_FORMAT_V1
+
+enum incfs_metadata_type {
+	INCFS_MD_NONE = 0,
+	INCFS_MD_BLOCK_MAP = 1,
+	INCFS_MD_FILE_ATTR = 2,
+	INCFS_MD_SIGNATURE = 3,
+	INCFS_MD_STATUS = 4,
+	INCFS_MD_VERITY_SIGNATURE = 5,
+};
+
+enum incfs_file_header_flags {
+	INCFS_FILE_MAPPED = 1 << 1,
+};
+
+/* Header included at the beginning of all metadata records on the disk. */
+struct incfs_md_header {
+	__u8 h_md_entry_type;
+
+	/*
+	 * Size of the metadata record.
+	 * (e.g. inode, dir entry etc) not just this struct.
+	 */
+	__le16 h_record_size;
+
+	/*
+	 * Was: CRC32 of the metadata record.
+	 * (e.g. inode, dir entry etc) not just this struct.
+	 */
+	__le32 h_unused1;
+
+	/* Offset of the next metadata entry if any */
+	__le64 h_next_md_offset;
+
+	/* Was: Offset of the previous metadata entry if any */
+	__le64 h_unused2;
+
+} __packed;
+
+/* Backing file header */
+struct incfs_file_header {
+	/* Magic number: INCFS_MAGIC_NUMBER */
+	__le64 fh_magic;
+
+	/* Format version: INCFS_FORMAT_CURRENT_VER */
+	__le64 fh_version;
+
+	/* sizeof(incfs_file_header) */
+	__le16 fh_header_size;
+
+	/* INCFS_DATA_FILE_BLOCK_SIZE */
+	__le16 fh_data_block_size;
+
+	/* File flags, from incfs_file_header_flags */
+	__le32 fh_flags;
+
+	union {
+		/* Standard incfs file */
+		struct {
+			/* Offset of the first metadata record */
+			__le64 fh_first_md_offset;
+
+			/* Full size of the file's content */
+			__le64 fh_file_size;
+
+			/* File uuid */
+			incfs_uuid_t fh_uuid;
+		};
+
+		/* Mapped file - INCFS_FILE_MAPPED set in fh_flags */
+		struct {
+			/* Offset in original file */
+			__le64 fh_original_offset;
+
+			/* Full size of the file's content */
+			__le64 fh_mapped_file_size;
+
+			/* Original file's uuid */
+			incfs_uuid_t fh_original_uuid;
+		};
+	};
+} __packed;
+
+enum incfs_block_map_entry_flags {
+	INCFS_BLOCK_COMPRESSED_LZ4 = 1,
+	INCFS_BLOCK_COMPRESSED_ZSTD = 2,
+
+	/* Reserve 3 bits for compression alg */
+	INCFS_BLOCK_COMPRESSED_MASK = 7,
+};
+
+/* Block map entry pointing to an actual location of the data block. */
+struct incfs_blockmap_entry {
+	/* Offset of the actual data block. Lower 32 bits */
+	__le32 me_data_offset_lo;
+
+	/* Offset of the actual data block. Higher 16 bits */
+	__le16 me_data_offset_hi;
+
+	/* How many bytes the data actually occupies in the backing file */
+	__le16 me_data_size;
+
+	/* Block flags from incfs_block_map_entry_flags */
+	__le16 me_flags;
+} __packed;
+
+/* Metadata record for locations of file blocks. Type = INCFS_MD_BLOCK_MAP */
+struct incfs_blockmap {
+	struct incfs_md_header m_header;
+
+	/* Base offset of the array of incfs_blockmap_entry */
+	__le64 m_base_offset;
+
+	/* Size of the map entry array in blocks */
+	__le32 m_block_count;
+} __packed;
+
+/*
+ * Metadata record for file signature. Type = INCFS_MD_SIGNATURE
+ *
+ * The signature stored here is the APK V4 signature data blob. See the
+ * definition of incfs_new_file_args::signature_info for an explanation of this
+ * blob. Specifically, it contains the root hash, but it does *not* contain
+ * anything that the kernel treats as a signature.
+ *
+ * When FS_IOC_ENABLE_VERITY is called on a file without this record, an APK V4
+ * signature blob and a hash tree are added to the file, and then this metadata
+ * record is created to record their locations.
+ */
+struct incfs_file_signature {
+	struct incfs_md_header sg_header;
+
+	__le32 sg_sig_size; /* The size of the signature. */
+
+	__le64 sg_sig_offset; /* Signature's offset in the backing file */
+
+	__le32 sg_hash_tree_size; /* The size of the hash tree. */
+
+	__le64 sg_hash_tree_offset; /* Hash tree offset in the backing file */
+} __packed;
+
+/* In memory version of above */
+struct incfs_df_signature {
+	u32 sig_size;
+	u64 sig_offset;
+	u32 hash_size;
+	u64 hash_offset;
+};
+
+struct incfs_status {
+	struct incfs_md_header is_header;
+
+	__le32 is_data_blocks_written; /* Number of data blocks written */
+
+	__le32 is_hash_blocks_written; /* Number of hash blocks written */
+
+	__le32 is_dummy[6]; /* Spare fields */
+} __packed;
+
+/*
+ * Metadata record for verity signature. Type = INCFS_MD_VERITY_SIGNATURE
+ *
+ * This record will only exist for verity-enabled files with signatures. Verity
+ * enabled files without signatures do not have this record. This signature is
+ * checked by fs-verity identically to any other fs-verity signature.
+ */
+struct incfs_file_verity_signature {
+	struct incfs_md_header vs_header;
+
+	 /* The size of the signature */
+	__le32 vs_size;
+
+	 /* Signature's offset in the backing file */
+	__le64 vs_offset;
+} __packed;
+
+/* In memory version of above */
+struct incfs_df_verity_signature {
+	u32 size;
+	u64 offset;
+};
+
+/* State of the backing file. */
+struct backing_file_context {
+	/* Protects writes to bc_file */
+	struct mutex bc_mutex;
+
+	/* File object to read data from */
+	struct file *bc_file;
+
+	/*
+	 * Offset of the last known metadata record in the backing file.
+	 * 0 means there are no metadata records.
+	 */
+	loff_t bc_last_md_record_offset;
+
+	/*
+	 * Credentials to set before reads/writes
+	 * Note that this is a pointer to the mount_info mi_owner field so
+	 * there is no need to get/put the creds
+	 */
+	const struct cred *bc_cred;
+};
+
+struct metadata_handler {
+	loff_t md_record_offset;
+	loff_t md_prev_record_offset;
+	void *context;
+
+	union {
+		struct incfs_md_header md_header;
+		struct incfs_blockmap blockmap;
+		struct incfs_file_signature signature;
+		struct incfs_status status;
+		struct incfs_file_verity_signature verity_signature;
+	} md_buffer;
+
+	int (*handle_blockmap)(struct incfs_blockmap *bm,
+			       struct metadata_handler *handler);
+	int (*handle_signature)(struct incfs_file_signature *sig,
+				 struct metadata_handler *handler);
+	int (*handle_status)(struct incfs_status *sig,
+				 struct metadata_handler *handler);
+	int (*handle_verity_signature)(struct incfs_file_verity_signature *s,
+					struct metadata_handler *handler);
+};
+#define INCFS_MAX_METADATA_RECORD_SIZE \
+	sizeof_field(struct metadata_handler, md_buffer)
+
+/* Backing file context management */
+struct mount_info;
+struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
+					     struct file *backing_file);
+
+void incfs_free_bfc(struct backing_file_context *bfc);
+
+/* Writing stuff */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+					 u32 block_count);
+
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+				   incfs_uuid_t *uuid, u64 file_size);
+
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+				incfs_uuid_t *uuid, u64 file_size, u64 offset);
+
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index, loff_t bm_base_off,
+					   u16 flags);
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index,
+					   loff_t hash_area_off,
+					   loff_t bm_base_off,
+					   loff_t file_size);
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+				struct mem_range sig, u32 tree_size,
+				loff_t *tree_offset, loff_t *sig_offset);
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+				       loff_t status_offset,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written);
+int incfs_write_verity_signature_to_backing_file(
+		struct backing_file_context *bfc, struct mem_range signature,
+		loff_t *offset);
+
+/* Reading stuff */
+int incfs_read_file_header(struct backing_file_context *bfc,
+			   loff_t *first_md_off, incfs_uuid_t *uuid,
+			   u64 *file_size, u32 *flags);
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+			      loff_t bm_base_off,
+			      struct incfs_blockmap_entry *bm_entry);
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+		struct incfs_blockmap_entry *entries,
+		int start_index, int blocks_number,
+		loff_t bm_base_off);
+
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+				    struct metadata_handler *handler);
+
+ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
+		    loff_t pos);
+ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
+		     size_t size, loff_t pos);
+
+#endif /* _INCFS_FORMAT_H */
diff --git a/fs/incfs/integrity.c b/fs/incfs/integrity.c
new file mode 100644
index 0000000..93e7643
--- /dev/null
+++ b/fs/incfs/integrity.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <crypto/sha2.h>
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include <linux/version.h>
+
+#include "integrity.h"
+
+struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id)
+{
+	static struct incfs_hash_alg sha256 = {
+		.name = "sha256",
+		.digest_size = SHA256_DIGEST_SIZE,
+		.id = INCFS_HASH_TREE_SHA256
+	};
+	struct incfs_hash_alg *result = NULL;
+	struct crypto_shash *shash;
+
+	if (id == INCFS_HASH_TREE_SHA256) {
+		BUILD_BUG_ON(INCFS_MAX_HASH_SIZE < SHA256_DIGEST_SIZE);
+		result = &sha256;
+	}
+
+	if (result == NULL)
+		return ERR_PTR(-ENOENT);
+
+	/* pairs with cmpxchg_release() below */
+	shash = smp_load_acquire(&result->shash);
+	if (shash)
+		return result;
+
+	shash = crypto_alloc_shash(result->name, 0, 0);
+	if (IS_ERR(shash)) {
+		int err = PTR_ERR(shash);
+
+		pr_err("Can't allocate hash alg %s, error code:%d",
+			result->name, err);
+		return ERR_PTR(err);
+	}
+
+	/* pairs with smp_load_acquire() above */
+	if (cmpxchg_release(&result->shash, NULL, shash) != NULL)
+		crypto_free_shash(shash);
+
+	return result;
+}
+
+struct signature_info {
+	u32 version;
+	enum incfs_hash_tree_algorithm hash_algorithm;
+	u8 log2_blocksize;
+	struct mem_range salt;
+	struct mem_range root_hash;
+};
+
+static bool read_u32(u8 **p, u8 *top, u32 *result)
+{
+	if (*p + sizeof(u32) > top)
+		return false;
+
+	*result = le32_to_cpu(*(__le32 *)*p);
+	*p += sizeof(u32);
+	return true;
+}
+
+static bool read_u8(u8 **p, u8 *top, u8 *result)
+{
+	if (*p + sizeof(u8) > top)
+		return false;
+
+	*result = *(u8 *)*p;
+	*p += sizeof(u8);
+	return true;
+}
+
+static bool read_mem_range(u8 **p, u8 *top, struct mem_range *range)
+{
+	u32 len;
+
+	if (!read_u32(p, top, &len) || *p + len > top)
+		return false;
+
+	range->len = len;
+	range->data = *p;
+	*p += len;
+	return true;
+}
+
+static int incfs_parse_signature(struct mem_range signature,
+				 struct signature_info *si)
+{
+	u8 *p = signature.data;
+	u8 *top = signature.data + signature.len;
+	u32 hash_section_size;
+
+	if (signature.len > INCFS_MAX_SIGNATURE_SIZE)
+		return -EINVAL;
+
+	if (!read_u32(&p, top, &si->version) ||
+	    si->version != INCFS_SIGNATURE_VERSION)
+		return -EINVAL;
+
+	if (!read_u32(&p, top, &hash_section_size) ||
+	    p + hash_section_size > top)
+		return -EINVAL;
+	top = p + hash_section_size;
+
+	if (!read_u32(&p, top, &si->hash_algorithm) ||
+	    si->hash_algorithm != INCFS_HASH_TREE_SHA256)
+		return -EINVAL;
+
+	if (!read_u8(&p, top, &si->log2_blocksize) || si->log2_blocksize != 12)
+		return -EINVAL;
+
+	if (!read_mem_range(&p, top, &si->salt))
+		return -EINVAL;
+
+	if (!read_mem_range(&p, top, &si->root_hash))
+		return -EINVAL;
+
+	if (p != top)
+		return -EINVAL;
+
+	return 0;
+}
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+				int data_block_count)
+{
+	int error;
+	struct signature_info si;
+	struct mtree *result = NULL;
+	struct incfs_hash_alg *hash_alg = NULL;
+	int hash_per_block;
+	int lvl;
+	int total_blocks = 0;
+	int blocks_in_level[INCFS_MAX_MTREE_LEVELS];
+	int blocks = data_block_count;
+
+	if (data_block_count <= 0)
+		return ERR_PTR(-EINVAL);
+
+	error = incfs_parse_signature(signature, &si);
+	if (error)
+		return ERR_PTR(error);
+
+	hash_alg = incfs_get_hash_alg(si.hash_algorithm);
+	if (IS_ERR(hash_alg))
+		return ERR_PTR(PTR_ERR(hash_alg));
+
+	if (si.root_hash.len < hash_alg->digest_size)
+		return ERR_PTR(-EINVAL);
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return ERR_PTR(-ENOMEM);
+
+	result->alg = hash_alg;
+	hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / result->alg->digest_size;
+
+	/* Calculating tree geometry. */
+	/* First pass: calculate how many blocks in each tree level. */
+	for (lvl = 0; blocks > 1; lvl++) {
+		if (lvl >= INCFS_MAX_MTREE_LEVELS) {
+			pr_err("incfs: too much data in mtree");
+			goto err;
+		}
+
+		blocks = (blocks + hash_per_block - 1) / hash_per_block;
+		blocks_in_level[lvl] = blocks;
+		total_blocks += blocks;
+	}
+	result->depth = lvl;
+	result->hash_tree_area_size = total_blocks * INCFS_DATA_FILE_BLOCK_SIZE;
+	if (result->hash_tree_area_size > INCFS_MAX_HASH_AREA_SIZE)
+		goto err;
+
+	blocks = 0;
+	/* Second pass: calculate offset of each level. 0th level goes last. */
+	for (lvl = 0; lvl < result->depth; lvl++) {
+		u32 suboffset;
+
+		blocks += blocks_in_level[lvl];
+		suboffset = (total_blocks - blocks)
+					* INCFS_DATA_FILE_BLOCK_SIZE;
+
+		result->hash_level_suboffset[lvl] = suboffset;
+	}
+
+	/* Root hash is stored separately from the rest of the tree. */
+	memcpy(result->root_hash, si.root_hash.data, hash_alg->digest_size);
+	return result;
+
+err:
+	kfree(result);
+	return ERR_PTR(-E2BIG);
+}
+
+void incfs_free_mtree(struct mtree *tree)
+{
+	kfree(tree);
+}
+
+int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
+			struct mem_range digest)
+{
+	SHASH_DESC_ON_STACK(desc, alg->shash);
+
+	if (!alg || !alg->shash || !data.data || !digest.data)
+		return -EFAULT;
+
+	if (alg->digest_size > digest.len)
+		return -EINVAL;
+
+	desc->tfm = alg->shash;
+
+	if (data.len < INCFS_DATA_FILE_BLOCK_SIZE) {
+		int err;
+		void *buf = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS);
+
+		if (!buf)
+			return -ENOMEM;
+
+		memcpy(buf, data.data, data.len);
+		err = crypto_shash_digest(desc, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+					  digest.data);
+		kfree(buf);
+		return err;
+	}
+	return crypto_shash_digest(desc, data.data, data.len, digest.data);
+}
+
diff --git a/fs/incfs/integrity.h b/fs/incfs/integrity.h
new file mode 100644
index 0000000..cf79b64
--- /dev/null
+++ b/fs/incfs/integrity.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_INTEGRITY_H
+#define _INCFS_INTEGRITY_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_MTREE_LEVELS 8
+#define INCFS_MAX_HASH_AREA_SIZE (1280 * 1024 * 1024)
+
+struct incfs_hash_alg {
+	const char *name;
+	int digest_size;
+	enum incfs_hash_tree_algorithm id;
+
+	struct crypto_shash *shash;
+};
+
+/* Merkle tree structure. */
+struct mtree {
+	struct incfs_hash_alg *alg;
+
+	u8 root_hash[INCFS_MAX_HASH_SIZE];
+
+	/* Offset of each hash level in the hash area. */
+	u32 hash_level_suboffset[INCFS_MAX_MTREE_LEVELS];
+
+	u32 hash_tree_area_size;
+
+	/* Number of levels in hash_level_suboffset */
+	int depth;
+};
+
+struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id);
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+				int data_block_count);
+
+void incfs_free_mtree(struct mtree *tree);
+
+size_t incfs_get_mtree_depth(enum incfs_hash_tree_algorithm alg, loff_t size);
+
+size_t incfs_get_mtree_hash_count(enum incfs_hash_tree_algorithm alg,
+					loff_t size);
+
+int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
+			struct mem_range digest);
+
+#endif /* _INCFS_INTEGRITY_H */
diff --git a/fs/incfs/internal.h b/fs/incfs/internal.h
new file mode 100644
index 0000000..c2df8bf
--- /dev/null
+++ b/fs/incfs/internal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+#ifndef _INCFS_INTERNAL_H
+#define _INCFS_INTERNAL_H
+#include <linux/types.h>
+
+struct mem_range {
+	u8 *data;
+	size_t len;
+};
+
+static inline struct mem_range range(u8 *data, size_t len)
+{
+	return (struct mem_range){ .data = data, .len = len };
+}
+
+#define LOCK_REQUIRED(lock)  WARN_ON_ONCE(!mutex_is_locked(&lock))
+
+#define EFSCORRUPTED EUCLEAN
+
+#endif /* _INCFS_INTERNAL_H */
diff --git a/fs/incfs/main.c b/fs/incfs/main.c
new file mode 100644
index 0000000..23347ac
--- /dev/null
+++ b/fs/incfs/main.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "sysfs.h"
+#include "vfs.h"
+
+static struct file_system_type incfs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = INCFS_NAME,
+	.mount = incfs_mount_fs,
+	.kill_sb = incfs_kill_sb,
+	.fs_flags = 0
+};
+
+static int __init init_incfs_module(void)
+{
+	int err = 0;
+
+	err = incfs_init_sysfs();
+	if (err)
+		return err;
+
+	err = register_filesystem(&incfs_fs_type);
+	if (err)
+		incfs_cleanup_sysfs();
+
+	return err;
+}
+
+static void __exit cleanup_incfs_module(void)
+{
+	incfs_cleanup_sysfs();
+	unregister_filesystem(&incfs_fs_type);
+}
+
+module_init(init_incfs_module);
+module_exit(cleanup_incfs_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eugene Zemtsov <ezemtsov@google.com>");
+MODULE_DESCRIPTION("Incremental File System");
diff --git a/fs/incfs/pseudo_files.c b/fs/incfs/pseudo_files.c
new file mode 100644
index 0000000..4b3f377
--- /dev/null
+++ b/fs/incfs/pseudo_files.c
@@ -0,0 +1,1389 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "pseudo_files.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "vfs.h"
+
+#define READ_WRITE_FILE_MODE 0666
+
+static bool is_pseudo_filename(struct mem_range name);
+
+/*******************************************************************************
+ * .pending_reads pseudo file definition
+ ******************************************************************************/
+#define INCFS_PENDING_READS_INODE 2
+static const char pending_reads_file_name[] = INCFS_PENDING_READS_FILENAME;
+
+/* State of an open .pending_reads file, unique for each file descriptor. */
+struct pending_reads_state {
+	/* A serial number of the last pending read obtained from this file. */
+	int last_pending_read_sn;
+};
+
+static ssize_t pending_reads_read(struct file *f, char __user *buf, size_t len,
+			    loff_t *ppos)
+{
+	struct pending_reads_state *pr_state = f->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	bool report_uid;
+	unsigned long page = 0;
+	struct incfs_pending_read_info *reads_buf = NULL;
+	struct incfs_pending_read_info2 *reads_buf2 = NULL;
+	size_t record_size;
+	size_t reads_to_collect;
+	int last_known_read_sn = READ_ONCE(pr_state->last_pending_read_sn);
+	int new_max_sn = last_known_read_sn;
+	int reads_collected = 0;
+	ssize_t result = 0;
+
+	if (!mi)
+		return -EFAULT;
+
+	report_uid = mi->mi_options.report_uid;
+	record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+	reads_to_collect = len / record_size;
+
+	if (!incfs_fresh_pending_reads_exist(mi, last_known_read_sn))
+		return 0;
+
+	page = get_zeroed_page(GFP_NOFS);
+	if (!page)
+		return -ENOMEM;
+
+	if (report_uid)
+		reads_buf2 = (struct incfs_pending_read_info2 *) page;
+	else
+		reads_buf = (struct incfs_pending_read_info *) page;
+
+	reads_to_collect =
+		min_t(size_t, PAGE_SIZE / record_size, reads_to_collect);
+
+	reads_collected = incfs_collect_pending_reads(mi, last_known_read_sn,
+				reads_buf, reads_buf2, reads_to_collect,
+				&new_max_sn);
+
+	if (reads_collected < 0) {
+		result = reads_collected;
+		goto out;
+	}
+
+	/*
+	 * Just to make sure that we don't accidentally copy more data
+	 * to reads buffer than userspace can handle.
+	 */
+	reads_collected = min_t(size_t, reads_collected, reads_to_collect);
+	result = reads_collected * record_size;
+
+	/* Copy reads info to the userspace buffer */
+	if (copy_to_user(buf, (void *)page, result)) {
+		result = -EFAULT;
+		goto out;
+	}
+
+	WRITE_ONCE(pr_state->last_pending_read_sn, new_max_sn);
+	*ppos = 0;
+
+out:
+	free_page(page);
+	return result;
+}
+
+static __poll_t pending_reads_poll(struct file *file, poll_table *wait)
+{
+	struct pending_reads_state *state = file->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	__poll_t ret = 0;
+
+	poll_wait(file, &mi->mi_pending_reads_notif_wq, wait);
+	if (incfs_fresh_pending_reads_exist(mi,
+					    state->last_pending_read_sn))
+		ret = EPOLLIN | EPOLLRDNORM;
+
+	return ret;
+}
+
+static int pending_reads_open(struct inode *inode, struct file *file)
+{
+	struct pending_reads_state *state = NULL;
+
+	state = kzalloc(sizeof(*state), GFP_NOFS);
+	if (!state)
+		return -ENOMEM;
+
+	file->private_data = state;
+	return 0;
+}
+
+static int pending_reads_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static long ioctl_permit_fill(struct file *f, void __user *arg)
+{
+	struct incfs_permit_fill __user *usr_permit_fill = arg;
+	struct incfs_permit_fill permit_fill;
+	long error = 0;
+	struct file *file = NULL;
+	struct incfs_file_data *fd;
+
+	if (copy_from_user(&permit_fill, usr_permit_fill, sizeof(permit_fill)))
+		return -EFAULT;
+
+	file = fget(permit_fill.file_descriptor);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	if (file->f_op != &incfs_file_ops) {
+		error = -EPERM;
+		goto out;
+	}
+
+	if (file->f_inode->i_sb != f->f_inode->i_sb) {
+		error = -EPERM;
+		goto out;
+	}
+
+	fd = file->private_data;
+
+	switch (fd->fd_fill_permission) {
+	case CANT_FILL:
+		fd->fd_fill_permission = CAN_FILL;
+		break;
+
+	case CAN_FILL:
+		pr_debug("CAN_FILL already set");
+		break;
+
+	default:
+		pr_warn("Invalid file private data");
+		error = -EFAULT;
+		goto out;
+	}
+
+out:
+	fput(file);
+	return error;
+}
+
+static int chmod(struct dentry *dentry, umode_t mode)
+{
+	struct inode *inode = dentry->d_inode;
+	struct inode *delegated_inode = NULL;
+	struct iattr newattrs;
+	int error;
+
+retry_deleg:
+	inode_lock(inode);
+	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+	error = notify_change(&init_user_ns, dentry, &newattrs, &delegated_inode);
+	inode_unlock(inode);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
+	return error;
+}
+
+static bool incfs_equal_ranges(struct mem_range lhs, struct mem_range rhs)
+{
+	if (lhs.len != rhs.len)
+		return false;
+	return memcmp(lhs.data, rhs.data, lhs.len) == 0;
+}
+
+static int validate_name(char *file_name)
+{
+	struct mem_range name = range(file_name, strlen(file_name));
+	int i = 0;
+
+	if (name.len > INCFS_MAX_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	if (is_pseudo_filename(name))
+		return -EINVAL;
+
+	for (i = 0; i < name.len; i++)
+		if (name.data[i] == '/')
+			return -EINVAL;
+
+	return 0;
+}
+
+static int dir_relative_path_resolve(
+			struct mount_info *mi,
+			const char __user *relative_path,
+			struct path *result_path,
+			struct path *base_path)
+{
+	int dir_fd = get_unused_fd_flags(0);
+	struct file *dir_f = NULL;
+	int error = 0;
+
+	if (!base_path)
+		base_path = &mi->mi_backing_dir_path;
+
+	if (dir_fd < 0)
+		return dir_fd;
+
+	dir_f = dentry_open(base_path, O_RDONLY | O_NOATIME, current_cred());
+
+	if (IS_ERR(dir_f)) {
+		error = PTR_ERR(dir_f);
+		goto out;
+	}
+	fd_install(dir_fd, dir_f);
+
+	if (!relative_path) {
+		/* No relative path given, just return the base dir. */
+		*result_path = *base_path;
+		path_get(result_path);
+		goto out;
+	}
+
+	error = user_path_at_empty(dir_fd, relative_path,
+		LOOKUP_FOLLOW | LOOKUP_DIRECTORY, result_path, NULL);
+
+out:
+	close_fd(dir_fd);
+	if (error)
+		pr_debug("Error: %d\n", error);
+	return error;
+}
+
+static struct mem_range incfs_copy_signature_info_from_user(u8 __user *original,
+							    u64 size)
+{
+	u8 *result;
+
+	if (!original)
+		return range(NULL, 0);
+
+	if (size > INCFS_MAX_SIGNATURE_SIZE)
+		return range(ERR_PTR(-EFAULT), 0);
+
+	result = kzalloc(size, GFP_NOFS | __GFP_COMP);
+	if (!result)
+		return range(ERR_PTR(-ENOMEM), 0);
+
+	if (copy_from_user(result, original, size)) {
+		kfree(result);
+		return range(ERR_PTR(-EFAULT), 0);
+	}
+
+	return range(result, size);
+}
+
+static int init_new_file(struct mount_info *mi, struct dentry *dentry,
+			 incfs_uuid_t *uuid, u64 size, struct mem_range attr,
+			 u8 __user *user_signature_info, u64 signature_size)
+{
+	struct path path = {};
+	struct file *new_file;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+	u32 block_count;
+	struct mem_range raw_signature = { NULL };
+	struct mtree *hash_tree = NULL;
+
+	if (!mi || !dentry || !uuid)
+		return -EFAULT;
+
+	/* Resize newly created file to its true size. */
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = dentry
+	};
+
+	new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+			       current_cred());
+
+	if (IS_ERR(new_file)) {
+		error = PTR_ERR(new_file);
+		goto out;
+	}
+
+	bfc = incfs_alloc_bfc(mi, new_file);
+	fput(new_file);
+	if (IS_ERR(bfc)) {
+		error = PTR_ERR(bfc);
+		bfc = NULL;
+		goto out;
+	}
+
+	mutex_lock(&bfc->bc_mutex);
+	error = incfs_write_fh_to_backing_file(bfc, uuid, size);
+	if (error)
+		goto out;
+
+	block_count = (u32)get_blocks_count_for_size(size);
+
+	if (user_signature_info) {
+		raw_signature = incfs_copy_signature_info_from_user(
+			user_signature_info, signature_size);
+
+		if (IS_ERR(raw_signature.data)) {
+			error = PTR_ERR(raw_signature.data);
+			raw_signature.data = NULL;
+			goto out;
+		}
+
+		hash_tree = incfs_alloc_mtree(raw_signature, block_count);
+		if (IS_ERR(hash_tree)) {
+			error = PTR_ERR(hash_tree);
+			hash_tree = NULL;
+			goto out;
+		}
+
+		error = incfs_write_signature_to_backing_file(bfc,
+				raw_signature, hash_tree->hash_tree_area_size,
+				NULL, NULL);
+		if (error)
+			goto out;
+
+		block_count += get_blocks_count_for_size(
+			hash_tree->hash_tree_area_size);
+	}
+
+	if (block_count)
+		error = incfs_write_blockmap_to_backing_file(bfc, block_count);
+
+	if (error)
+		goto out;
+
+out:
+	if (bfc) {
+		mutex_unlock(&bfc->bc_mutex);
+		incfs_free_bfc(bfc);
+	}
+	incfs_free_mtree(hash_tree);
+	kfree(raw_signature.data);
+
+	if (error)
+		pr_debug("incfs: %s error: %d\n", __func__, error);
+	return error;
+}
+
+static void notify_create(struct file *pending_reads_file,
+			  const char  __user *dir_name, const char *file_name,
+			  const char *file_id_str, bool incomplete_file)
+{
+	struct mount_info *mi =
+		get_mount_info(file_superblock(pending_reads_file));
+	struct path base_path = {
+		.mnt = pending_reads_file->f_path.mnt,
+		.dentry = pending_reads_file->f_path.dentry->d_parent,
+	};
+	struct path dir_path = {};
+	struct dentry *file = NULL;
+	struct dentry *dir = NULL;
+	int error;
+
+	error = dir_relative_path_resolve(mi, dir_name, &dir_path, &base_path);
+	if (error)
+		goto out;
+
+	file = incfs_lookup_dentry(dir_path.dentry, file_name);
+	if (IS_ERR(file)) {
+		error = PTR_ERR(file);
+		file = NULL;
+		goto out;
+	}
+
+	fsnotify_create(d_inode(dir_path.dentry), file);
+
+	if (file_id_str) {
+		dir = incfs_lookup_dentry(base_path.dentry, INCFS_INDEX_NAME);
+		if (IS_ERR(dir)) {
+			error = PTR_ERR(dir);
+			dir = NULL;
+			goto out;
+		}
+
+		dput(file);
+		file = incfs_lookup_dentry(dir, file_id_str);
+		if (IS_ERR(file)) {
+			error = PTR_ERR(file);
+			file = NULL;
+			goto out;
+		}
+
+		fsnotify_create(d_inode(dir), file);
+
+		if (incomplete_file) {
+			dput(dir);
+			dir = incfs_lookup_dentry(base_path.dentry,
+						  INCFS_INCOMPLETE_NAME);
+			if (IS_ERR(dir)) {
+				error = PTR_ERR(dir);
+				dir = NULL;
+				goto out;
+			}
+
+			dput(file);
+			file = incfs_lookup_dentry(dir, file_id_str);
+			if (IS_ERR(file)) {
+				error = PTR_ERR(file);
+				file = NULL;
+				goto out;
+			}
+
+			fsnotify_create(d_inode(dir), file);
+		}
+	}
+out:
+	if (error)
+		pr_warn("%s failed with error %d\n", __func__, error);
+
+	dput(dir);
+	dput(file);
+	path_put(&dir_path);
+}
+
+static long ioctl_create_file(struct file *file,
+			struct incfs_new_file_args __user *usr_args)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	struct incfs_new_file_args args;
+	char *file_id_str = NULL;
+	struct dentry *index_file_dentry = NULL;
+	struct dentry *named_file_dentry = NULL;
+	struct dentry *incomplete_file_dentry = NULL;
+	struct path parent_dir_path = {};
+	struct inode *index_dir_inode = NULL;
+	__le64 size_attr_value = 0;
+	char *file_name = NULL;
+	char *attr_value = NULL;
+	int error = 0;
+	bool locked = false;
+	bool index_linked = false;
+	bool name_linked = false;
+	bool incomplete_linked = false;
+
+	if (!mi || !mi->mi_index_dir || !mi->mi_incomplete_dir) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	if (copy_from_user(&args, usr_args, sizeof(args)) > 0) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+	if (IS_ERR(file_name)) {
+		error = PTR_ERR(file_name);
+		file_name = NULL;
+		goto out;
+	}
+
+	error = validate_name(file_name);
+	if (error)
+		goto out;
+
+	file_id_str = file_id_to_str(args.file_id);
+	if (!file_id_str) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		goto out;
+	locked = true;
+
+	/* Find a directory to put the file into. */
+	error = dir_relative_path_resolve(mi,
+			u64_to_user_ptr(args.directory_path),
+			&parent_dir_path, NULL);
+	if (error)
+		goto out;
+
+	if (parent_dir_path.dentry == mi->mi_index_dir) {
+		/* Can't create a file directly inside .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	if (parent_dir_path.dentry == mi->mi_incomplete_dir) {
+		/* Can't create a file directly inside .incomplete */
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* Look up a dentry in the parent dir. It should be negative. */
+	named_file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+					file_name);
+	if (!named_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(named_file_dentry)) {
+		error = PTR_ERR(named_file_dentry);
+		named_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(named_file_dentry)) {
+		/* File with this path already exists. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Look up a dentry in the incomplete dir. It should be negative. */
+	incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+					file_id_str);
+	if (!incomplete_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(incomplete_file_dentry)) {
+		error = PTR_ERR(incomplete_file_dentry);
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(incomplete_file_dentry)) {
+		/* File with this path already exists. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Look up a dentry in the .index dir. It should be negative. */
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+	if (!index_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(index_file_dentry)) {
+		error = PTR_ERR(index_file_dentry);
+		index_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(index_file_dentry)) {
+		/* File with this ID already exists in index. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Creating a file in the .index dir. */
+	index_dir_inode = d_inode(mi->mi_index_dir);
+	inode_lock_nested(index_dir_inode, I_MUTEX_PARENT);
+	error = vfs_create(&init_user_ns, index_dir_inode, index_file_dentry,
+			   args.mode | 0222, true);
+	inode_unlock(index_dir_inode);
+
+	if (error)
+		goto out;
+	if (!d_really_is_positive(index_file_dentry)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = chmod(index_file_dentry, args.mode | 0222);
+	if (error) {
+		pr_debug("incfs: chmod err: %d\n", error);
+		goto out;
+	}
+
+	/* Save the file's ID as an xattr for easy fetching in future. */
+	error = vfs_setxattr(&init_user_ns, index_file_dentry, INCFS_XATTR_ID_NAME,
+		file_id_str, strlen(file_id_str), XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto out;
+	}
+
+	/* Save the file's size as an xattr for easy fetching in future. */
+	size_attr_value = cpu_to_le64(args.size);
+	error = vfs_setxattr(&init_user_ns, index_file_dentry, INCFS_XATTR_SIZE_NAME,
+		(char *)&size_attr_value, sizeof(size_attr_value),
+		XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto out;
+	}
+
+	/* Save the file's attribute as an xattr */
+	if (args.file_attr_len && args.file_attr) {
+		if (args.file_attr_len > INCFS_MAX_FILE_ATTR_SIZE) {
+			error = -E2BIG;
+			goto out;
+		}
+
+		attr_value = kmalloc(args.file_attr_len, GFP_NOFS);
+		if (!attr_value) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		if (copy_from_user(attr_value,
+				u64_to_user_ptr(args.file_attr),
+				args.file_attr_len) > 0) {
+			error = -EFAULT;
+			goto out;
+		}
+
+		error = vfs_setxattr(&init_user_ns, index_file_dentry,
+				INCFS_XATTR_METADATA_NAME,
+				attr_value, args.file_attr_len,
+				XATTR_CREATE);
+
+		if (error)
+			goto out;
+	}
+
+	/* Initializing a newly created file. */
+	error = init_new_file(mi, index_file_dentry, &args.file_id, args.size,
+			      range(attr_value, args.file_attr_len),
+			      u64_to_user_ptr(args.signature_info),
+			      args.signature_size);
+	if (error)
+		goto out;
+	index_linked = true;
+
+	/* Linking a file with its real name from the requested dir. */
+	error = incfs_link(index_file_dentry, named_file_dentry);
+	if (error)
+		goto out;
+	name_linked = true;
+
+	if (args.size) {
+		/* Linking a file with its incomplete entry */
+		error = incfs_link(index_file_dentry, incomplete_file_dentry);
+		if (error)
+			goto out;
+		incomplete_linked = true;
+	}
+
+	notify_create(file, u64_to_user_ptr(args.directory_path), file_name,
+		      file_id_str, args.size != 0);
+
+out:
+	if (error) {
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+		if (index_linked)
+			incfs_unlink(index_file_dentry);
+		if (name_linked)
+			incfs_unlink(named_file_dentry);
+		if (incomplete_linked)
+			incfs_unlink(incomplete_file_dentry);
+	}
+
+	kfree(file_id_str);
+	kfree(file_name);
+	kfree(attr_value);
+	dput(named_file_dentry);
+	dput(index_file_dentry);
+	dput(incomplete_file_dentry);
+	path_put(&parent_dir_path);
+	if (locked)
+		mutex_unlock(&mi->mi_dir_struct_mutex);
+
+	return error;
+}
+
+static int init_new_mapped_file(struct mount_info *mi, struct dentry *dentry,
+			 incfs_uuid_t *uuid, u64 size, u64 offset)
+{
+	struct path path = {};
+	struct file *new_file;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+
+	if (!mi || !dentry || !uuid)
+		return -EFAULT;
+
+	/* Resize newly created file to its true size. */
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = dentry
+	};
+	new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+			       current_cred());
+
+	if (IS_ERR(new_file)) {
+		error = PTR_ERR(new_file);
+		goto out;
+	}
+
+	bfc = incfs_alloc_bfc(mi, new_file);
+	fput(new_file);
+	if (IS_ERR(bfc)) {
+		error = PTR_ERR(bfc);
+		bfc = NULL;
+		goto out;
+	}
+
+	mutex_lock(&bfc->bc_mutex);
+	error = incfs_write_mapping_fh_to_backing_file(bfc, uuid, size, offset);
+	if (error)
+		goto out;
+
+out:
+	if (bfc) {
+		mutex_unlock(&bfc->bc_mutex);
+		incfs_free_bfc(bfc);
+	}
+
+	if (error)
+		pr_debug("incfs: %s error: %d\n", __func__, error);
+	return error;
+}
+
+static long ioctl_create_mapped_file(struct file *file, void __user *arg)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	struct incfs_create_mapped_file_args __user *args_usr_ptr = arg;
+	struct incfs_create_mapped_file_args args = {};
+	char *file_name;
+	int error = 0;
+	struct path parent_dir_path = {};
+	char *source_file_name = NULL;
+	struct dentry *source_file_dentry = NULL;
+	u64 source_file_size;
+	struct dentry *file_dentry = NULL;
+	struct inode *parent_inode;
+	__le64 size_attr_value;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+	if (IS_ERR(file_name)) {
+		error = PTR_ERR(file_name);
+		file_name = NULL;
+		goto out;
+	}
+
+	error = validate_name(file_name);
+	if (error)
+		goto out;
+
+	if (args.source_offset % INCFS_DATA_FILE_BLOCK_SIZE) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	/* Validate file mapping is in range */
+	source_file_name = file_id_to_str(args.source_file_id);
+	if (!source_file_name) {
+		pr_warn("Failed to alloc source_file_name\n");
+		error = -ENOMEM;
+		goto out;
+	}
+
+	source_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+						       source_file_name);
+	if (!source_file_dentry) {
+		pr_warn("Source file does not exist\n");
+		error = -EINVAL;
+		goto out;
+	}
+	if (IS_ERR(source_file_dentry)) {
+		pr_warn("Error opening source file\n");
+		error = PTR_ERR(source_file_dentry);
+		source_file_dentry = NULL;
+		goto out;
+	}
+	if (!d_really_is_positive(source_file_dentry)) {
+		pr_warn("Source file dentry negative\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = vfs_getxattr(&init_user_ns, source_file_dentry, INCFS_XATTR_SIZE_NAME,
+			     (char *)&size_attr_value, sizeof(size_attr_value));
+	if (error < 0)
+		goto out;
+
+	if (error != sizeof(size_attr_value)) {
+		pr_warn("Mapped file has no size attr\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	source_file_size = le64_to_cpu(size_attr_value);
+	if (args.source_offset + args.size > source_file_size) {
+		pr_warn("Mapped file out of range\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	/* Find a directory to put the file into. */
+	error = dir_relative_path_resolve(mi,
+			u64_to_user_ptr(args.directory_path),
+			&parent_dir_path, NULL);
+	if (error)
+		goto out;
+
+	if (parent_dir_path.dentry == mi->mi_index_dir) {
+		/* Can't create a file directly inside .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* Look up a dentry in the parent dir. It should be negative. */
+	file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+					file_name);
+	if (!file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(file_dentry)) {
+		error = PTR_ERR(file_dentry);
+		file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(file_dentry)) {
+		error = -EEXIST;
+		goto out;
+	}
+
+	parent_inode = d_inode(parent_dir_path.dentry);
+	inode_lock_nested(parent_inode, I_MUTEX_PARENT);
+	error = vfs_create(&init_user_ns, parent_inode, file_dentry,
+			   args.mode | 0222, true);
+	inode_unlock(parent_inode);
+	if (error)
+		goto out;
+
+	error = chmod(file_dentry, args.mode | 0222);
+	if (error) {
+		pr_debug("incfs: chmod err: %d\n", error);
+		goto delete_file;
+	}
+
+	/* Save the file's size as an xattr for easy fetching in future. */
+	size_attr_value = cpu_to_le64(args.size);
+	error = vfs_setxattr(&init_user_ns, file_dentry, INCFS_XATTR_SIZE_NAME,
+		(char *)&size_attr_value, sizeof(size_attr_value),
+		XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto delete_file;
+	}
+
+	error = init_new_mapped_file(mi, file_dentry, &args.source_file_id,
+			args.size, args.source_offset);
+	if (error)
+		goto delete_file;
+
+	notify_create(file, u64_to_user_ptr(args.directory_path), file_name,
+		      NULL, false);
+
+	goto out;
+
+delete_file:
+	incfs_unlink(file_dentry);
+
+out:
+	dput(file_dentry);
+	dput(source_file_dentry);
+	path_put(&parent_dir_path);
+	kfree(file_name);
+	kfree(source_file_name);
+	return error;
+}
+
+static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_get_read_timeouts_args __user *args_usr_ptr = arg;
+	struct incfs_get_read_timeouts_args args = {};
+	int error = 0;
+	struct incfs_per_uid_read_timeouts *buffer;
+	int size;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+		return -EINVAL;
+
+	if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
+		return -EINVAL;
+
+	buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
+	if (!buffer)
+		return -ENOMEM;
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	size = mi->mi_per_uid_read_timeouts_size;
+	if (args.timeouts_array_size < size)
+		error = -E2BIG;
+	else if (size)
+		memcpy(buffer, mi->mi_per_uid_read_timeouts, size);
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+	args.timeouts_array_size_out = size;
+	if (!error && size)
+		if (copy_to_user(u64_to_user_ptr(args.timeouts_array), buffer,
+				 size))
+			error = -EFAULT;
+
+	if (!error || error == -E2BIG)
+		if (copy_to_user(args_usr_ptr, &args, sizeof(args)) > 0)
+			error = -EFAULT;
+
+	kfree(buffer);
+	return error;
+}
+
+static long ioctl_set_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_set_read_timeouts_args __user *args_usr_ptr = arg;
+	struct incfs_set_read_timeouts_args args = {};
+	int error = 0;
+	int size;
+	struct incfs_per_uid_read_timeouts *buffer = NULL, *tmp;
+	int i;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+		return -EINVAL;
+
+	size = args.timeouts_array_size;
+	if (size) {
+		if (size > INCFS_DATA_FILE_BLOCK_SIZE ||
+		    size % sizeof(*buffer) != 0)
+			return -EINVAL;
+
+		buffer = kzalloc(size, GFP_NOFS);
+		if (!buffer)
+			return -ENOMEM;
+
+		if (copy_from_user(buffer, u64_to_user_ptr(args.timeouts_array),
+				   size)) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0; i < size / sizeof(*buffer); ++i) {
+			struct incfs_per_uid_read_timeouts *t = &buffer[i];
+
+			if (t->min_pending_time_us > t->max_pending_time_us) {
+				error = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	mi->mi_per_uid_read_timeouts_size = size;
+	tmp = mi->mi_per_uid_read_timeouts;
+	mi->mi_per_uid_read_timeouts = buffer;
+	buffer = tmp;
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+out:
+	kfree(buffer);
+	return error;
+}
+
+static long ioctl_get_last_read_error(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_get_last_read_error_args __user *args_usr_ptr = arg;
+	struct incfs_get_last_read_error_args args = {};
+	int error;
+
+	error = mutex_lock_interruptible(&mi->mi_le_mutex);
+	if (error)
+		return error;
+
+	args.file_id_out = mi->mi_le_file_id;
+	args.time_us_out = mi->mi_le_time_us;
+	args.page_out = mi->mi_le_page;
+	args.errno_out = mi->mi_le_errno;
+	args.uid_out = mi->mi_le_uid;
+
+	mutex_unlock(&mi->mi_le_mutex);
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)) > 0)
+		error = -EFAULT;
+
+	return error;
+}
+
+static long pending_reads_dispatch_ioctl(struct file *f, unsigned int req,
+					unsigned long arg)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+
+	switch (req) {
+	case INCFS_IOC_CREATE_FILE:
+		return ioctl_create_file(f, (void __user *)arg);
+	case INCFS_IOC_PERMIT_FILL:
+		return ioctl_permit_fill(f, (void __user *)arg);
+	case INCFS_IOC_CREATE_MAPPED_FILE:
+		return ioctl_create_mapped_file(f, (void __user *)arg);
+	case INCFS_IOC_GET_READ_TIMEOUTS:
+		return ioctl_get_read_timeouts(mi, (void __user *)arg);
+	case INCFS_IOC_SET_READ_TIMEOUTS:
+		return ioctl_set_read_timeouts(mi, (void __user *)arg);
+	case INCFS_IOC_GET_LAST_READ_ERROR:
+		return ioctl_get_last_read_error(mi, (void __user *)arg);
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct file_operations incfs_pending_reads_file_ops = {
+	.read = pending_reads_read,
+	.poll = pending_reads_poll,
+	.open = pending_reads_open,
+	.release = pending_reads_release,
+	.llseek = noop_llseek,
+	.unlocked_ioctl = pending_reads_dispatch_ioctl,
+	.compat_ioctl = pending_reads_dispatch_ioctl
+};
+
+/*******************************************************************************
+ * .log pseudo file definition
+ ******************************************************************************/
+#define INCFS_LOG_INODE 3
+static const char log_file_name[] = INCFS_LOG_FILENAME;
+
+/* State of an open .log file, unique for each file descriptor. */
+struct log_file_state {
+	struct read_log_state state;
+};
+
+static ssize_t log_read(struct file *f, char __user *buf, size_t len,
+			loff_t *ppos)
+{
+	struct log_file_state *log_state = f->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	int total_reads_collected = 0;
+	int rl_size;
+	ssize_t result = 0;
+	bool report_uid;
+	unsigned long page = 0;
+	struct incfs_pending_read_info *reads_buf = NULL;
+	struct incfs_pending_read_info2 *reads_buf2 = NULL;
+	size_t record_size;
+	ssize_t reads_to_collect;
+	ssize_t reads_per_page;
+
+	if (!mi)
+		return -EFAULT;
+
+	report_uid = mi->mi_options.report_uid;
+	record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+	reads_to_collect = len / record_size;
+	reads_per_page = PAGE_SIZE / record_size;
+
+	rl_size = READ_ONCE(mi->mi_log.rl_size);
+	if (rl_size == 0)
+		return 0;
+
+	page = __get_free_page(GFP_NOFS);
+	if (!page)
+		return -ENOMEM;
+
+	if (report_uid)
+		reads_buf2 = (struct incfs_pending_read_info2 *)page;
+	else
+		reads_buf = (struct incfs_pending_read_info *)page;
+
+	reads_to_collect = min_t(ssize_t, rl_size, reads_to_collect);
+	while (reads_to_collect > 0) {
+		struct read_log_state next_state;
+		int reads_collected;
+
+		memcpy(&next_state, &log_state->state, sizeof(next_state));
+		reads_collected = incfs_collect_logged_reads(
+			mi, &next_state, reads_buf, reads_buf2,
+			min_t(ssize_t, reads_to_collect, reads_per_page));
+		if (reads_collected <= 0) {
+			result = total_reads_collected ?
+				       total_reads_collected * record_size :
+				       reads_collected;
+			goto out;
+		}
+		if (copy_to_user(buf, (void *)page,
+				 reads_collected * record_size)) {
+			result = total_reads_collected ?
+				       total_reads_collected * record_size :
+				       -EFAULT;
+			goto out;
+		}
+
+		memcpy(&log_state->state, &next_state, sizeof(next_state));
+		total_reads_collected += reads_collected;
+		buf += reads_collected * record_size;
+		reads_to_collect -= reads_collected;
+	}
+
+	result = total_reads_collected * record_size;
+	*ppos = 0;
+out:
+	free_page(page);
+	return result;
+}
+
+static __poll_t log_poll(struct file *file, poll_table *wait)
+{
+	struct log_file_state *log_state = file->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	int count;
+	__poll_t ret = 0;
+
+	poll_wait(file, &mi->mi_log.ml_notif_wq, wait);
+	count = incfs_get_uncollected_logs_count(mi, &log_state->state);
+	if (count >= mi->mi_options.read_log_wakeup_count)
+		ret = EPOLLIN | EPOLLRDNORM;
+
+	return ret;
+}
+
+static int log_open(struct inode *inode, struct file *file)
+{
+	struct log_file_state *log_state = NULL;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+
+	log_state = kzalloc(sizeof(*log_state), GFP_NOFS);
+	if (!log_state)
+		return -ENOMEM;
+
+	log_state->state = incfs_get_log_state(mi);
+	file->private_data = log_state;
+	return 0;
+}
+
+static int log_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations incfs_log_file_ops = {
+	.read = log_read,
+	.poll = log_poll,
+	.open = log_open,
+	.release = log_release,
+	.llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * .blocks_written pseudo file definition
+ ******************************************************************************/
+#define INCFS_BLOCKS_WRITTEN_INODE 4
+static const char blocks_written_file_name[] = INCFS_BLOCKS_WRITTEN_FILENAME;
+
+/* State of an open .blocks_written file, unique for each file descriptor. */
+struct blocks_written_file_state {
+	unsigned long blocks_written;
+};
+
+static ssize_t blocks_written_read(struct file *f, char __user *buf, size_t len,
+			loff_t *ppos)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	struct blocks_written_file_state *state = f->private_data;
+	unsigned long blocks_written;
+	char string[21];
+	int result = 0;
+
+	if (!mi)
+		return -EFAULT;
+
+	blocks_written = atomic_read(&mi->mi_blocks_written);
+	if (state->blocks_written == blocks_written)
+		return 0;
+
+	result = snprintf(string, sizeof(string), "%lu", blocks_written);
+	if (result > len)
+		result = len;
+	if (copy_to_user(buf, string, result))
+		return -EFAULT;
+
+	state->blocks_written = blocks_written;
+	return result;
+}
+
+static __poll_t blocks_written_poll(struct file *f, poll_table *wait)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	struct blocks_written_file_state *state = f->private_data;
+	unsigned long blocks_written;
+
+	if (!mi)
+		return 0;
+
+	poll_wait(f, &mi->mi_blocks_written_notif_wq, wait);
+	blocks_written = atomic_read(&mi->mi_blocks_written);
+	if (state->blocks_written == blocks_written)
+		return 0;
+
+	return EPOLLIN | EPOLLRDNORM;
+}
+
+static int blocks_written_open(struct inode *inode, struct file *file)
+{
+	struct blocks_written_file_state *state =
+		kzalloc(sizeof(*state), GFP_NOFS);
+
+	if (!state)
+		return -ENOMEM;
+
+	state->blocks_written = -1;
+	file->private_data = state;
+	return 0;
+}
+
+static int blocks_written_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations incfs_blocks_written_file_ops = {
+	.read = blocks_written_read,
+	.poll = blocks_written_poll,
+	.open = blocks_written_open,
+	.release = blocks_written_release,
+	.llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * Generic inode lookup functionality
+ ******************************************************************************/
+
+const struct mem_range incfs_pseudo_file_names[] = {
+	{ .data = (u8 *)pending_reads_file_name,
+	  .len = ARRAY_SIZE(pending_reads_file_name) - 1 },
+	{ .data = (u8 *)log_file_name, .len = ARRAY_SIZE(log_file_name) - 1 },
+	{ .data = (u8 *)blocks_written_file_name,
+	  .len = ARRAY_SIZE(blocks_written_file_name) - 1 }
+};
+
+const unsigned long incfs_pseudo_file_inodes[] = { INCFS_PENDING_READS_INODE,
+						   INCFS_LOG_INODE,
+						   INCFS_BLOCKS_WRITTEN_INODE };
+
+static const struct file_operations *const pseudo_file_operations[] = {
+	&incfs_pending_reads_file_ops, &incfs_log_file_ops,
+	&incfs_blocks_written_file_ops
+};
+
+static bool is_pseudo_filename(struct mem_range name)
+{
+	int i = 0;
+
+	for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i)
+		if (incfs_equal_ranges(incfs_pseudo_file_names[i], name))
+			return true;
+	return false;
+}
+
+static bool get_pseudo_inode(int ino, struct inode *inode)
+{
+	int i = 0;
+
+	for (; i < ARRAY_SIZE(incfs_pseudo_file_inodes); ++i)
+		if (ino == incfs_pseudo_file_inodes[i])
+			break;
+	if (i == ARRAY_SIZE(incfs_pseudo_file_inodes))
+		return false;
+
+	inode->i_ctime = (struct timespec64){};
+	inode->i_mtime = inode->i_ctime;
+	inode->i_atime = inode->i_ctime;
+	inode->i_size = 0;
+	inode->i_ino = ino;
+	inode->i_private = NULL;
+	inode_init_owner(&init_user_ns, inode, NULL, S_IFREG | READ_WRITE_FILE_MODE);
+	inode->i_op = &incfs_file_inode_ops;
+	inode->i_fop = pseudo_file_operations[i];
+	return true;
+}
+
+struct inode_search {
+	unsigned long ino;
+};
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+
+	return inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+
+	if (get_pseudo_inode(search->ino, inode))
+		return 0;
+
+	/* Unknown inode requested. */
+	return -EINVAL;
+}
+
+static struct inode *fetch_inode(struct super_block *sb, unsigned long ino)
+{
+	struct inode_search search = {
+		.ino = ino
+	};
+	struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+				inode_set, &search);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+
+	return inode;
+}
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry)
+{
+	struct mem_range name_range =
+			range((u8 *)dentry->d_name.name, dentry->d_name.len);
+	unsigned long ino;
+	struct inode *inode;
+	int i = 0;
+
+	for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i)
+		if (incfs_equal_ranges(incfs_pseudo_file_names[i], name_range))
+			break;
+	if (i == ARRAY_SIZE(incfs_pseudo_file_names))
+		return -ENOENT;
+
+	ino = incfs_pseudo_file_inodes[i];
+
+	inode = fetch_inode(sb, ino);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+
+	d_add(dentry, inode);
+	return 0;
+}
+
+int emit_pseudo_files(struct dir_context *ctx)
+{
+	loff_t i = ctx->pos;
+
+	for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i) {
+		if (!dir_emit(ctx, incfs_pseudo_file_names[i].data,
+			      incfs_pseudo_file_names[i].len,
+			      incfs_pseudo_file_inodes[i], DT_REG))
+			return -EINVAL;
+
+		ctx->pos++;
+	}
+	return 0;
+}
diff --git a/fs/incfs/pseudo_files.h b/fs/incfs/pseudo_files.h
new file mode 100644
index 0000000..1887218
--- /dev/null
+++ b/fs/incfs/pseudo_files.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _INCFS_PSEUDO_FILES_H
+#define _INCFS_PSEUDO_FILES_H
+
+#include "internal.h"
+
+#define PSEUDO_FILE_COUNT 3
+#define INCFS_START_INO_RANGE 10
+
+extern const struct mem_range incfs_pseudo_file_names[PSEUDO_FILE_COUNT];
+extern const unsigned long incfs_pseudo_file_inodes[PSEUDO_FILE_COUNT];
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry);
+int emit_pseudo_files(struct dir_context *ctx);
+
+#endif
diff --git a/fs/incfs/sysfs.c b/fs/incfs/sysfs.c
new file mode 100644
index 0000000..360f03c
--- /dev/null
+++ b/fs/incfs/sysfs.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/kobject.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "sysfs.h"
+#include "data_mgmt.h"
+#include "vfs.h"
+
+/******************************************************************************
+ * Define sys/fs/incrementalfs & sys/fs/incrementalfs/features
+ *****************************************************************************/
+#define INCFS_NODE_FEATURES "features"
+#define INCFS_NODE_INSTANCES "instances"
+
+static struct kobject *sysfs_root;
+static struct kobject *features_node;
+static struct kobject *instances_node;
+
+#define DECLARE_FEATURE_FLAG(name)					\
+	static ssize_t name##_show(struct kobject *kobj,		\
+			 struct kobj_attribute *attr, char *buff)	\
+{									\
+	return sysfs_emit(buff, "supported\n");				\
+}									\
+									\
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+DECLARE_FEATURE_FLAG(corefs);
+DECLARE_FEATURE_FLAG(zstd);
+DECLARE_FEATURE_FLAG(v2);
+
+static struct attribute *attributes[] = {
+	&corefs_attr.attr,
+	&zstd_attr.attr,
+	&v2_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group attr_group = {
+	.attrs = attributes,
+};
+
+int __init incfs_init_sysfs(void)
+{
+	int res = -ENOMEM;
+
+	sysfs_root = kobject_create_and_add(INCFS_NAME, fs_kobj);
+	if (!sysfs_root)
+		return -ENOMEM;
+
+	instances_node = kobject_create_and_add(INCFS_NODE_INSTANCES,
+						sysfs_root);
+	if (!instances_node)
+		goto err_put_root;
+
+	features_node = kobject_create_and_add(INCFS_NODE_FEATURES,
+						sysfs_root);
+	if (!features_node)
+		goto err_put_instances;
+
+	res = sysfs_create_group(features_node, &attr_group);
+	if (res)
+		goto err_put_features;
+
+	return 0;
+
+err_put_features:
+	kobject_put(features_node);
+err_put_instances:
+	kobject_put(instances_node);
+err_put_root:
+	kobject_put(sysfs_root);
+
+	return res;
+}
+
+void incfs_cleanup_sysfs(void)
+{
+	if (features_node) {
+		sysfs_remove_group(features_node, &attr_group);
+		kobject_put(features_node);
+	}
+
+	kobject_put(instances_node);
+	kobject_put(sysfs_root);
+}
+
+/******************************************************************************
+ * Define sys/fs/incrementalfs/instances/<name>/
+ *****************************************************************************/
+#define __DECLARE_STATUS_FLAG(name)					\
+static ssize_t name##_show(struct kobject *kobj,			\
+			 struct kobj_attribute *attr, char *buff)	\
+{									\
+	struct incfs_sysfs_node *node = container_of(kobj,		\
+			struct incfs_sysfs_node, isn_sysfs_node);	\
+									\
+	return sysfs_emit(buff, "%d\n", node->isn_mi->mi_##name);	\
+}									\
+									\
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+#define __DECLARE_STATUS_FLAG64(name)					\
+static ssize_t name##_show(struct kobject *kobj,			\
+			 struct kobj_attribute *attr, char *buff)	\
+{									\
+	struct incfs_sysfs_node *node = container_of(kobj,		\
+			struct incfs_sysfs_node, isn_sysfs_node);	\
+									\
+	return sysfs_emit(buff, "%lld\n", node->isn_mi->mi_##name);	\
+}									\
+									\
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+__DECLARE_STATUS_FLAG(reads_failed_timed_out);
+__DECLARE_STATUS_FLAG(reads_failed_hash_verification);
+__DECLARE_STATUS_FLAG(reads_failed_other);
+__DECLARE_STATUS_FLAG(reads_delayed_pending);
+__DECLARE_STATUS_FLAG64(reads_delayed_pending_us);
+__DECLARE_STATUS_FLAG(reads_delayed_min);
+__DECLARE_STATUS_FLAG64(reads_delayed_min_us);
+
+static struct attribute *mount_attributes[] = {
+	&reads_failed_timed_out_attr.attr,
+	&reads_failed_hash_verification_attr.attr,
+	&reads_failed_other_attr.attr,
+	&reads_delayed_pending_attr.attr,
+	&reads_delayed_pending_us_attr.attr,
+	&reads_delayed_min_attr.attr,
+	&reads_delayed_min_us_attr.attr,
+	NULL,
+};
+
+static void incfs_sysfs_release(struct kobject *kobj)
+{
+	struct incfs_sysfs_node *node = container_of(kobj,
+				struct incfs_sysfs_node, isn_sysfs_node);
+
+	complete(&node->isn_completion);
+}
+
+static const struct attribute_group mount_attr_group = {
+	.attrs = mount_attributes,
+};
+
+static struct kobj_type incfs_kobj_node_ktype = {
+	.sysfs_ops	= &kobj_sysfs_ops,
+	.release	= &incfs_sysfs_release,
+};
+
+struct incfs_sysfs_node *incfs_add_sysfs_node(const char *name,
+					      struct mount_info *mi)
+{
+	struct incfs_sysfs_node *node = NULL;
+	int error;
+
+	if (!name)
+		return NULL;
+
+	node = kzalloc(sizeof(*node), GFP_NOFS);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+
+	node->isn_mi = mi;
+
+	init_completion(&node->isn_completion);
+	kobject_init(&node->isn_sysfs_node, &incfs_kobj_node_ktype);
+	error = kobject_add(&node->isn_sysfs_node, instances_node, "%s", name);
+	if (error)
+		goto err;
+
+	error = sysfs_create_group(&node->isn_sysfs_node, &mount_attr_group);
+	if (error)
+		goto err;
+
+	return node;
+
+err:
+	/*
+	 * Note kobject_put always calls release, so incfs_sysfs_release will
+	 * free node
+	 */
+	kobject_put(&node->isn_sysfs_node);
+	return ERR_PTR(error);
+}
+
+void incfs_free_sysfs_node(struct incfs_sysfs_node *node)
+{
+	if (!node)
+		return;
+
+	sysfs_remove_group(&node->isn_sysfs_node, &mount_attr_group);
+	kobject_put(&node->isn_sysfs_node);
+	wait_for_completion_interruptible(&node->isn_completion);
+	kfree(node);
+}
diff --git a/fs/incfs/sysfs.h b/fs/incfs/sysfs.h
new file mode 100644
index 0000000..65bf554
--- /dev/null
+++ b/fs/incfs/sysfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google LLC
+ */
+#ifndef _INCFS_SYSFS_H
+#define _INCFS_SYSFS_H
+
+struct incfs_sysfs_node {
+	struct kobject isn_sysfs_node;
+
+	struct completion isn_completion;
+
+	struct mount_info *isn_mi;
+};
+
+int incfs_init_sysfs(void);
+void incfs_cleanup_sysfs(void);
+struct incfs_sysfs_node *incfs_add_sysfs_node(const char *name,
+					      struct mount_info *mi);
+void incfs_free_sysfs_node(struct incfs_sysfs_node *node);
+
+#endif
diff --git a/fs/incfs/verity.c b/fs/incfs/verity.c
new file mode 100644
index 0000000..b69656b
--- /dev/null
+++ b/fs/incfs/verity.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+
+/*
+ * fs-verity integration into incfs
+ *
+ * Since incfs has its own merkle tree implementation, most of fs-verity code
+ * is not needed. The key part that is needed is the signature check, since
+ * that is based on the private /proc/sys/fs/verity/require_signatures value
+ * and a private keyring. Thus the first change is to modify verity code to
+ * export a version of fsverity_verify_signature.
+ *
+ * fs-verity integration then consists of the following modifications:
+ *
+ * 1. Add the (optional) verity signature to the incfs file format
+ * 2. Add a pointer to the digest of the fs-verity descriptor struct to the
+ *    data_file struct that incfs attaches to each file inode.
+ * 3. Add the following ioclts:
+ *  - FS_IOC_ENABLE_VERITY
+ *  - FS_IOC_GETFLAGS
+ *  - FS_IOC_MEASURE_VERITY
+ * 4. When FS_IOC_ENABLE_VERITY is called on a non-verity file, the
+ *    fs-verity descriptor struct is populated and digested. If it passes the
+ *    signature check or the signature is NULL and
+ *    fs.verity.require_signatures=0, then the S_VERITY flag is set and the
+ *    xattr incfs.verity is set. If the signature is non-NULL, an
+ *    INCFS_MD_VERITY_SIGNATURE is added to the backing file containing the
+ *    signature.
+ * 5. When a file with an incfs.verity xattr's inode is initialized, the
+ *    inode’s S_VERITY flag is set.
+ * 6. When a file with the S_VERITY flag set on its inode is opened, the
+ *    data_file is checked for its verity digest. If the file doesn’t have a
+ *    digest, the file’s digest is calculated as above, checked, and set, or the
+ *    open is denied if it is not valid.
+ * 7. FS_IOC_GETFLAGS simply returns the value of the S_VERITY flag
+ * 8. FS_IOC_MEASURE_VERITY simply returns the cached digest
+ * 9. The final complication is that if FS_IOC_ENABLE_VERITY is called on a file
+ *    which doesn’t have a merkle tree, the merkle tree is calculated before the
+ *    rest of the process is completed.
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha2.h>
+#include <linux/fsverity.h>
+#include <linux/mount.h>
+
+#include "verity.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "vfs.h"
+
+#define FS_VERITY_MAX_SIGNATURE_SIZE	16128
+
+static int incfs_get_root_hash(struct file *filp, u8 *root_hash)
+{
+	struct data_file *df = get_incfs_data_file(filp);
+
+	if (!df)
+		return -EINVAL;
+
+	memcpy(root_hash, df->df_hash_tree->root_hash,
+	       df->df_hash_tree->alg->digest_size);
+
+	return 0;
+}
+
+static int incfs_end_enable_verity(struct file *filp, u8 *sig, size_t sig_size)
+{
+	struct inode *inode = file_inode(filp);
+	struct mem_range signature = {
+		.data = sig,
+		.len = sig_size,
+	};
+	struct data_file *df = get_incfs_data_file(filp);
+	struct backing_file_context *bfc;
+	int error;
+	struct incfs_df_verity_signature *vs = NULL;
+	loff_t offset;
+
+	if (!df || !df->df_backing_file_context)
+		return -EFSCORRUPTED;
+
+	if (sig) {
+		vs = kzalloc(sizeof(*vs), GFP_NOFS);
+		if (!vs)
+			return -ENOMEM;
+	}
+
+	bfc = df->df_backing_file_context;
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (error)
+		goto out;
+
+	error = incfs_write_verity_signature_to_backing_file(bfc, signature,
+							     &offset);
+	mutex_unlock(&bfc->bc_mutex);
+	if (error)
+		goto out;
+
+	/*
+	 * Set verity xattr so we can set S_VERITY without opening backing file
+	 */
+	error = vfs_setxattr(&init_user_ns, bfc->bc_file->f_path.dentry,
+			     INCFS_XATTR_VERITY_NAME, NULL, 0, XATTR_CREATE);
+	if (error) {
+		pr_warn("incfs: error setting verity xattr: %d\n", error);
+		goto out;
+	}
+
+	if (sig) {
+		*vs = (struct incfs_df_verity_signature) {
+			.size = signature.len,
+			.offset = offset,
+		};
+
+		df->df_verity_signature = vs;
+		vs = NULL;
+	}
+
+	inode_set_flags(inode, S_VERITY, S_VERITY);
+
+out:
+	kfree(vs);
+	return error;
+}
+
+static int incfs_compute_file_digest(struct incfs_hash_alg *alg,
+				struct fsverity_descriptor *desc,
+				u8 *digest)
+{
+	SHASH_DESC_ON_STACK(d, alg->shash);
+
+	d->tfm = alg->shash;
+	return crypto_shash_digest(d, (u8 *)desc, sizeof(*desc), digest);
+}
+
+static enum incfs_hash_tree_algorithm incfs_convert_fsverity_hash_alg(
+								int hash_alg)
+{
+	switch (hash_alg) {
+	case FS_VERITY_HASH_ALG_SHA256:
+		return INCFS_HASH_TREE_SHA256;
+	default:
+		return -EINVAL;
+	}
+}
+
+static struct mem_range incfs_get_verity_digest(struct inode *inode)
+{
+	struct inode_info *node = get_incfs_node(inode);
+	struct data_file *df;
+	struct mem_range verity_file_digest;
+
+	if (!node) {
+		pr_warn("Invalid inode\n");
+		return range(NULL, 0);
+	}
+
+	df = node->n_file;
+
+	/*
+	 * Pairs with the cmpxchg_release() in incfs_set_verity_digest().
+	 * I.e., another task may publish ->df_verity_file_digest concurrently,
+	 * executing a RELEASE barrier.  We need to use smp_load_acquire() here
+	 * to safely ACQUIRE the memory the other task published.
+	 */
+	verity_file_digest.data = smp_load_acquire(
+					&df->df_verity_file_digest.data);
+	verity_file_digest.len = df->df_verity_file_digest.len;
+	return verity_file_digest;
+}
+
+static void incfs_set_verity_digest(struct inode *inode,
+				     struct mem_range verity_file_digest)
+{
+	struct inode_info *node = get_incfs_node(inode);
+	struct data_file *df;
+
+	if (!node) {
+		pr_warn("Invalid inode\n");
+		kfree(verity_file_digest.data);
+		return;
+	}
+
+	df = node->n_file;
+	df->df_verity_file_digest.len = verity_file_digest.len;
+
+	/*
+	 * Multiple tasks may race to set ->df_verity_file_digest.data, so use
+	 * cmpxchg_release().  This pairs with the smp_load_acquire() in
+	 * incfs_get_verity_digest().  I.e., here we publish
+	 * ->df_verity_file_digest.data, with a RELEASE barrier so that other
+	 * tasks can ACQUIRE it.
+	 */
+	if (cmpxchg_release(&df->df_verity_file_digest.data, NULL,
+			    verity_file_digest.data) != NULL)
+		/* Lost the race, so free the file_digest we allocated. */
+		kfree(verity_file_digest.data);
+}
+
+/*
+ * Calculate the digest of the fsverity_descriptor. The signature (if present)
+ * is also checked.
+ */
+static struct mem_range incfs_calc_verity_digest_from_desc(
+					const struct inode *inode,
+					struct fsverity_descriptor *desc,
+					u8 *signature, size_t sig_size)
+{
+	enum incfs_hash_tree_algorithm incfs_hash_alg;
+	struct mem_range verity_file_digest;
+	int err;
+	struct incfs_hash_alg *hash_alg;
+
+	incfs_hash_alg = incfs_convert_fsverity_hash_alg(desc->hash_algorithm);
+	if (incfs_hash_alg < 0)
+		return range(ERR_PTR(incfs_hash_alg), 0);
+
+	hash_alg = incfs_get_hash_alg(incfs_hash_alg);
+	if (IS_ERR(hash_alg))
+		return range((u8 *)hash_alg, 0);
+
+	verity_file_digest = range(kzalloc(hash_alg->digest_size, GFP_KERNEL),
+				   hash_alg->digest_size);
+	if (!verity_file_digest.data)
+		return range(ERR_PTR(-ENOMEM), 0);
+
+	err = incfs_compute_file_digest(hash_alg, desc,
+					verity_file_digest.data);
+	if (err) {
+		pr_err("Error %d computing file digest", err);
+		goto out;
+	}
+	pr_debug("Computed file digest: %s:%*phN\n",
+		 hash_alg->name, (int) verity_file_digest.len,
+		 verity_file_digest.data);
+
+	err = __fsverity_verify_signature(inode, signature, sig_size,
+					  verity_file_digest.data,
+					  desc->hash_algorithm);
+out:
+	if (err) {
+		kfree(verity_file_digest.data);
+		verity_file_digest = range(ERR_PTR(err), 0);
+	}
+	return verity_file_digest;
+}
+
+static struct fsverity_descriptor *incfs_get_fsverity_descriptor(
+					struct file *filp, int hash_algorithm)
+{
+	struct inode *inode = file_inode(filp);
+	struct fsverity_descriptor *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	int err;
+
+	if (!desc)
+		return ERR_PTR(-ENOMEM);
+
+	*desc = (struct fsverity_descriptor) {
+		.version = 1,
+		.hash_algorithm = hash_algorithm,
+		.log_blocksize = ilog2(INCFS_DATA_FILE_BLOCK_SIZE),
+		.data_size = cpu_to_le64(inode->i_size),
+	};
+
+	err = incfs_get_root_hash(filp, desc->root_hash);
+	if (err) {
+		kfree(desc);
+		return ERR_PTR(err);
+	}
+
+	return desc;
+}
+
+static struct mem_range incfs_calc_verity_digest(
+					struct inode *inode, struct file *filp,
+					u8 *signature, size_t signature_size,
+					int hash_algorithm)
+{
+	struct fsverity_descriptor *desc = incfs_get_fsverity_descriptor(filp,
+							hash_algorithm);
+	struct mem_range verity_file_digest;
+
+	if (IS_ERR(desc))
+		return range((u8 *)desc, 0);
+	verity_file_digest = incfs_calc_verity_digest_from_desc(inode, desc,
+						signature, signature_size);
+	kfree(desc);
+	return verity_file_digest;
+}
+
+static int incfs_build_merkle_tree(struct file *f, struct data_file *df,
+			     struct backing_file_context *bfc,
+			     struct mtree *hash_tree, loff_t hash_offset,
+			     struct incfs_hash_alg *alg, struct mem_range hash)
+{
+	int error = 0;
+	int limit, lvl, i, result;
+	struct mem_range buf = {.len = INCFS_DATA_FILE_BLOCK_SIZE};
+	struct mem_range tmp = {.len = 2 * INCFS_DATA_FILE_BLOCK_SIZE};
+
+	buf.data = (u8 *)__get_free_pages(GFP_NOFS, get_order(buf.len));
+	tmp.data = (u8 *)__get_free_pages(GFP_NOFS, get_order(tmp.len));
+	if (!buf.data || !tmp.data) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * lvl - 1 is the level we are reading, lvl the level we are writing
+	 * lvl == -1 means actual blocks
+	 * lvl == hash_tree->depth means root hash
+	 */
+	limit = df->df_data_block_count;
+	for (lvl = 0; lvl <= hash_tree->depth; lvl++) {
+		for (i = 0; i < limit; ++i) {
+			loff_t hash_level_offset;
+			struct mem_range partial_buf = buf;
+
+			if (lvl == 0)
+				result = incfs_read_data_file_block(partial_buf,
+						f, i, tmp, NULL);
+			else {
+				hash_level_offset = hash_offset +
+				       hash_tree->hash_level_suboffset[lvl - 1];
+
+				result = incfs_kread(bfc, partial_buf.data,
+						partial_buf.len,
+						hash_level_offset + i *
+						INCFS_DATA_FILE_BLOCK_SIZE);
+			}
+
+			if (result < 0) {
+				error = result;
+				goto out;
+			}
+
+			partial_buf.len = result;
+			error = incfs_calc_digest(alg, partial_buf, hash);
+			if (error)
+				goto out;
+
+			/*
+			 * last level - only one hash to take and it is stored
+			 * in the incfs signature record
+			 */
+			if (lvl == hash_tree->depth)
+				break;
+
+			hash_level_offset = hash_offset +
+				hash_tree->hash_level_suboffset[lvl];
+
+			result = incfs_kwrite(bfc, hash.data, hash.len,
+					hash_level_offset + hash.len * i);
+
+			if (result < 0) {
+				error = result;
+				goto out;
+			}
+
+			if (result != hash.len) {
+				error = -EIO;
+				goto out;
+			}
+		}
+		limit = DIV_ROUND_UP(limit,
+				     INCFS_DATA_FILE_BLOCK_SIZE / hash.len);
+	}
+
+out:
+	free_pages((unsigned long)tmp.data, get_order(tmp.len));
+	free_pages((unsigned long)buf.data, get_order(buf.len));
+	return error;
+}
+
+/*
+ * incfs files have a signature record that is separate from the
+ * verity_signature record. The signature record does not actually contain a
+ * signature, rather it contains the size/offset of the hash tree, and a binary
+ * blob which contains the root hash and potentially a signature.
+ *
+ * If the file was created with a signature record, then this function simply
+ * returns.
+ *
+ * Otherwise it will create a signature record with a minimal binary blob as
+ * defined by the structure below, create space for the hash tree and then
+ * populate it using incfs_build_merkle_tree
+ */
+static int incfs_add_signature_record(struct file *f)
+{
+	/* See incfs_parse_signature */
+	struct {
+		__le32 version;
+		__le32 size_of_hash_info_section;
+		struct {
+			__le32 hash_algorithm;
+			u8 log2_blocksize;
+			__le32 salt_size;
+			u8 salt[0];
+			__le32 hash_size;
+			u8 root_hash[32];
+		} __packed hash_section;
+		__le32 size_of_signing_info_section;
+		u8 signing_info_section[0];
+	} __packed sig = {
+		.version = cpu_to_le32(INCFS_SIGNATURE_VERSION),
+		.size_of_hash_info_section =
+			cpu_to_le32(sizeof(sig.hash_section)),
+		.hash_section = {
+			.hash_algorithm = cpu_to_le32(INCFS_HASH_TREE_SHA256),
+			.log2_blocksize = ilog2(INCFS_DATA_FILE_BLOCK_SIZE),
+			.hash_size = cpu_to_le32(SHA256_DIGEST_SIZE),
+		},
+	};
+
+	struct data_file *df = get_incfs_data_file(f);
+	struct mtree *hash_tree = NULL;
+	struct backing_file_context *bfc;
+	int error;
+	loff_t hash_offset, sig_offset;
+	struct incfs_hash_alg *alg = incfs_get_hash_alg(INCFS_HASH_TREE_SHA256);
+	u8 hash_buf[INCFS_MAX_HASH_SIZE];
+	int hash_size = alg->digest_size;
+	struct mem_range hash = range(hash_buf, hash_size);
+	int result;
+	struct incfs_df_signature *signature = NULL;
+
+	if (!df)
+		return -EINVAL;
+
+	if (df->df_header_flags & INCFS_FILE_MAPPED)
+		return -EINVAL;
+
+	/* Already signed? */
+	if (df->df_signature && df->df_hash_tree)
+		return 0;
+
+	if (df->df_signature || df->df_hash_tree)
+		return -EFSCORRUPTED;
+
+	/* Add signature metadata record to file */
+	hash_tree = incfs_alloc_mtree(range((u8 *)&sig, sizeof(sig)),
+				      df->df_data_block_count);
+	if (IS_ERR(hash_tree))
+		return PTR_ERR(hash_tree);
+
+	bfc = df->df_backing_file_context;
+	if (!bfc) {
+		error = -EFSCORRUPTED;
+		goto out;
+	}
+
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (error)
+		goto out;
+
+	error = incfs_write_signature_to_backing_file(bfc,
+				range((u8 *)&sig, sizeof(sig)),
+				hash_tree->hash_tree_area_size,
+				&hash_offset, &sig_offset);
+	mutex_unlock(&bfc->bc_mutex);
+	if (error)
+		goto out;
+
+	/* Populate merkle tree */
+	error = incfs_build_merkle_tree(f, df, bfc, hash_tree, hash_offset, alg,
+				  hash);
+	if (error)
+		goto out;
+
+	/* Update signature metadata record */
+	memcpy(sig.hash_section.root_hash, hash.data, alg->digest_size);
+	result = incfs_kwrite(bfc, &sig, sizeof(sig), sig_offset);
+	if (result < 0) {
+		error = result;
+		goto out;
+	}
+
+	if (result != sizeof(sig)) {
+		error = -EIO;
+		goto out;
+	}
+
+	/* Update in-memory records */
+	memcpy(hash_tree->root_hash, hash.data, alg->digest_size);
+	signature = kzalloc(sizeof(*signature), GFP_NOFS);
+	if (!signature) {
+		error = -ENOMEM;
+		goto out;
+	}
+	*signature = (struct incfs_df_signature) {
+		.hash_offset = hash_offset,
+		.hash_size = hash_tree->hash_tree_area_size,
+		.sig_offset = sig_offset,
+		.sig_size = sizeof(sig),
+	};
+	df->df_signature = signature;
+	signature = NULL;
+
+	/*
+	 * Use memory barrier to prevent readpage seeing the hash tree until
+	 * it's fully there
+	 */
+	smp_store_release(&df->df_hash_tree, hash_tree);
+	hash_tree = NULL;
+
+out:
+	kfree(signature);
+	kfree(hash_tree);
+	return error;
+}
+
+static int incfs_enable_verity(struct file *filp,
+			 const struct fsverity_enable_arg *arg)
+{
+	struct inode *inode = file_inode(filp);
+	struct data_file *df = get_incfs_data_file(filp);
+	u8 *signature = NULL;
+	struct mem_range verity_file_digest = range(NULL, 0);
+	int err;
+
+	if (!df)
+		return -EFSCORRUPTED;
+
+	err = mutex_lock_interruptible(&df->df_enable_verity);
+	if (err)
+		return err;
+
+	if (IS_VERITY(inode)) {
+		err = -EEXIST;
+		goto out;
+	}
+
+	err = incfs_add_signature_record(filp);
+	if (err)
+		goto out;
+
+	/* Get the signature if the user provided one */
+	if (arg->sig_size) {
+		signature = memdup_user(u64_to_user_ptr(arg->sig_ptr),
+					arg->sig_size);
+		if (IS_ERR(signature)) {
+			err = PTR_ERR(signature);
+			signature = NULL;
+			goto out;
+		}
+	}
+
+	verity_file_digest = incfs_calc_verity_digest(inode, filp, signature,
+					arg->sig_size, arg->hash_algorithm);
+	if (IS_ERR(verity_file_digest.data)) {
+		err = PTR_ERR(verity_file_digest.data);
+		verity_file_digest.data = NULL;
+		goto out;
+	}
+
+	err = incfs_end_enable_verity(filp, signature, arg->sig_size);
+	if (err)
+		goto out;
+
+	/* Successfully enabled verity */
+	incfs_set_verity_digest(inode, verity_file_digest);
+	verity_file_digest.data = NULL;
+out:
+	mutex_unlock(&df->df_enable_verity);
+	kfree(signature);
+	kfree(verity_file_digest.data);
+	if (err)
+		pr_err("%s failed with err %d\n", __func__, err);
+	return err;
+}
+
+int incfs_ioctl_enable_verity(struct file *filp, const void __user *uarg)
+{
+	struct inode *inode = file_inode(filp);
+	struct fsverity_enable_arg arg;
+
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	if (arg.version != 1)
+		return -EINVAL;
+
+	if (arg.__reserved1 ||
+	    memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
+		return -EINVAL;
+
+	if (arg.hash_algorithm != FS_VERITY_HASH_ALG_SHA256)
+		return -EINVAL;
+
+	if (arg.block_size != PAGE_SIZE)
+		return -EINVAL;
+
+	if (arg.salt_size)
+		return -EINVAL;
+
+	if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
+		return -EMSGSIZE;
+
+	if (S_ISDIR(inode->i_mode))
+		return -EISDIR;
+
+	if (!S_ISREG(inode->i_mode))
+		return -EINVAL;
+
+	return incfs_enable_verity(filp, &arg);
+}
+
+static u8 *incfs_get_verity_signature(struct file *filp, size_t *sig_size)
+{
+	struct data_file *df = get_incfs_data_file(filp);
+	struct incfs_df_verity_signature *vs;
+	u8 *signature;
+	int res;
+
+	if (!df || !df->df_backing_file_context)
+		return ERR_PTR(-EFSCORRUPTED);
+
+	vs = df->df_verity_signature;
+	if (!vs) {
+		*sig_size = 0;
+		return NULL;
+	}
+
+	if (!vs->size) {
+		*sig_size = 0;
+		return ERR_PTR(-EFSCORRUPTED);
+	}
+
+	signature = kzalloc(vs->size, GFP_KERNEL);
+	if (!signature)
+		return ERR_PTR(-ENOMEM);
+
+	res = incfs_kread(df->df_backing_file_context,
+			  signature, vs->size, vs->offset);
+
+	if (res < 0)
+		goto err_out;
+
+	if (res != vs->size) {
+		res = -EINVAL;
+		goto err_out;
+	}
+
+	*sig_size = vs->size;
+	return signature;
+
+err_out:
+	kfree(signature);
+	return ERR_PTR(res);
+}
+
+/* Ensure data_file->df_verity_file_digest is populated */
+static int ensure_verity_info(struct inode *inode, struct file *filp)
+{
+	struct mem_range verity_file_digest;
+	u8 *signature = NULL;
+	size_t sig_size;
+	int err = 0;
+
+	/* See if this file's verity file digest is already cached */
+	verity_file_digest = incfs_get_verity_digest(inode);
+	if (verity_file_digest.data)
+		return 0;
+
+	signature = incfs_get_verity_signature(filp, &sig_size);
+	if (IS_ERR(signature))
+		return PTR_ERR(signature);
+
+	verity_file_digest = incfs_calc_verity_digest(inode, filp, signature,
+						     sig_size,
+						     FS_VERITY_HASH_ALG_SHA256);
+	if (IS_ERR(verity_file_digest.data)) {
+		err = PTR_ERR(verity_file_digest.data);
+		goto out;
+	}
+
+	incfs_set_verity_digest(inode, verity_file_digest);
+
+out:
+	kfree(signature);
+	return err;
+}
+
+/**
+ * incfs_fsverity_file_open() - prepare to open a file that may be
+ * verity-enabled
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, set up data_file->df_verity_file_digest if not
+ * already done. Note that incfs does not allow opening for writing, so there is
+ * no need for that check.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int incfs_fsverity_file_open(struct inode *inode, struct file *filp)
+{
+	if (IS_VERITY(inode))
+		return ensure_verity_info(inode, filp);
+
+	return 0;
+}
+
+int incfs_ioctl_measure_verity(struct file *filp, void __user *_uarg)
+{
+	struct inode *inode = file_inode(filp);
+	struct mem_range verity_file_digest = incfs_get_verity_digest(inode);
+	struct fsverity_digest __user *uarg = _uarg;
+	struct fsverity_digest arg;
+
+	if (!verity_file_digest.data || !verity_file_digest.len)
+		return -ENODATA; /* not a verity file */
+
+	/*
+	 * The user specifies the digest_size their buffer has space for; we can
+	 * return the digest if it fits in the available space.  We write back
+	 * the actual size, which may be shorter than the user-specified size.
+	 */
+
+	if (get_user(arg.digest_size, &uarg->digest_size))
+		return -EFAULT;
+	if (arg.digest_size < verity_file_digest.len)
+		return -EOVERFLOW;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.digest_algorithm = FS_VERITY_HASH_ALG_SHA256;
+	arg.digest_size = verity_file_digest.len;
+
+	if (copy_to_user(uarg, &arg, sizeof(arg)))
+		return -EFAULT;
+
+	if (copy_to_user(uarg->digest, verity_file_digest.data,
+			 verity_file_digest.len))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int incfs_read_merkle_tree(struct file *filp, void __user *buf,
+				  u64 start_offset, int length)
+{
+	struct mem_range tmp_buf;
+	size_t offset;
+	int retval = 0;
+	int err = 0;
+	struct data_file *df = get_incfs_data_file(filp);
+
+	if (!df)
+		return -EINVAL;
+
+	tmp_buf = (struct mem_range) {
+		.data = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS),
+		.len = INCFS_DATA_FILE_BLOCK_SIZE,
+	};
+	if (!tmp_buf.data)
+		return -ENOMEM;
+
+	for (offset = start_offset; offset < start_offset + length;
+	     offset += tmp_buf.len) {
+		err = incfs_read_merkle_tree_blocks(tmp_buf, df, offset);
+
+		if (err < 0)
+			break;
+
+		if (err != tmp_buf.len)
+			break;
+
+		if (copy_to_user(buf, tmp_buf.data, tmp_buf.len))
+			break;
+
+		buf += tmp_buf.len;
+		retval += tmp_buf.len;
+	}
+
+	kfree(tmp_buf.data);
+	return retval ? retval : err;
+}
+
+static int incfs_read_descriptor(struct file *filp,
+				 void __user *buf, u64 offset, int length)
+{
+	int err;
+	struct fsverity_descriptor *desc = incfs_get_fsverity_descriptor(filp,
+						FS_VERITY_HASH_ALG_SHA256);
+
+	if (IS_ERR(desc))
+		return PTR_ERR(desc);
+	length = min_t(u64, length, sizeof(*desc));
+	err = copy_to_user(buf, desc, length);
+	kfree(desc);
+	return err ? err : length;
+}
+
+static int incfs_read_signature(struct file *filp,
+				void __user *buf, u64 offset, int length)
+{
+	size_t sig_size;
+	static u8 *signature;
+	int err;
+
+	signature = incfs_get_verity_signature(filp, &sig_size);
+	if (IS_ERR(signature))
+		return PTR_ERR(signature);
+
+	if (!signature)
+		return -ENODATA;
+
+	length = min_t(u64, length, sig_size);
+	err = copy_to_user(buf, signature, length);
+	kfree(signature);
+	return err ? err : length;
+}
+
+int incfs_ioctl_read_verity_metadata(struct file *filp,
+				     const void __user *uarg)
+{
+	struct fsverity_read_metadata_arg arg;
+	int length;
+	void __user *buf;
+
+	if (copy_from_user(&arg, uarg, sizeof(arg)))
+		return -EFAULT;
+
+	if (arg.__reserved)
+		return -EINVAL;
+
+	/* offset + length must not overflow. */
+	if (arg.offset + arg.length < arg.offset)
+		return -EINVAL;
+
+	/* Ensure that the return value will fit in INT_MAX. */
+	length = min_t(u64, arg.length, INT_MAX);
+
+	buf = u64_to_user_ptr(arg.buf_ptr);
+
+	switch (arg.metadata_type) {
+	case FS_VERITY_METADATA_TYPE_MERKLE_TREE:
+		return incfs_read_merkle_tree(filp, buf, arg.offset, length);
+	case FS_VERITY_METADATA_TYPE_DESCRIPTOR:
+		return incfs_read_descriptor(filp, buf, arg.offset, length);
+	case FS_VERITY_METADATA_TYPE_SIGNATURE:
+		return incfs_read_signature(filp, buf, arg.offset, length);
+	default:
+		return -EINVAL;
+	}
+}
diff --git a/fs/incfs/verity.h b/fs/incfs/verity.h
new file mode 100644
index 0000000..8fcdbc8
--- /dev/null
+++ b/fs/incfs/verity.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _INCFS_VERITY_H
+#define _INCFS_VERITY_H
+
+/* Arbitrary limit to bound the kmalloc() size.  Can be changed. */
+#define FS_VERITY_MAX_SIGNATURE_SIZE	16128
+
+#ifdef CONFIG_FS_VERITY
+
+int incfs_ioctl_enable_verity(struct file *filp, const void __user *uarg);
+int incfs_ioctl_measure_verity(struct file *filp, void __user *_uarg);
+
+int incfs_fsverity_file_open(struct inode *inode, struct file *filp);
+int incfs_ioctl_read_verity_metadata(struct file *filp,
+				     const void __user *uarg);
+
+#else /* !CONFIG_FS_VERITY */
+
+static inline int incfs_ioctl_enable_verity(struct file *filp,
+					    const void __user *uarg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int incfs_ioctl_measure_verity(struct file *filp,
+					     void __user *_uarg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int incfs_fsverity_file_open(struct inode *inode,
+					   struct file *filp)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int incfs_ioctl_read_verity_metadata(struct file *filp,
+						const void __user *uarg)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif /* !CONFIG_FS_VERITY */
+
+#endif
diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c
new file mode 100644
index 0000000..282f457
--- /dev/null
+++ b/fs/incfs/vfs.c
@@ -0,0 +1,1916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_stack.h>
+#include <linux/fsnotify.h>
+#include <linux/fsverity.h>
+#include <linux/mmap_lock.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/backing-dev-defs.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "vfs.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "internal.h"
+#include "pseudo_files.h"
+#include "sysfs.h"
+#include "verity.h"
+
+static int incfs_remount_fs(struct super_block *sb, int *flags, char *data);
+
+static int dentry_revalidate(struct dentry *dentry, unsigned int flags);
+static void dentry_release(struct dentry *d);
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx);
+static struct dentry *dir_lookup(struct inode *dir_inode,
+		struct dentry *dentry, unsigned int flags);
+static int dir_mkdir(struct user_namespace *ns, struct inode *dir,
+		     struct dentry *dentry, umode_t mode);
+static int dir_unlink(struct inode *dir, struct dentry *dentry);
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+			 struct dentry *new_dentry);
+static int dir_rmdir(struct inode *dir, struct dentry *dentry);
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry);
+
+static int file_open(struct inode *inode, struct file *file);
+static int file_release(struct inode *inode, struct file *file);
+static int read_single_page(struct file *f, struct page *page);
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+static long incfs_compat_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg);
+#endif
+
+static struct inode *alloc_inode(struct super_block *sb);
+static void free_inode(struct inode *inode);
+static void evict_inode(struct inode *inode);
+
+static int incfs_setattr(struct user_namespace *ns, struct dentry *dentry,
+			 struct iattr *ia);
+static int incfs_getattr(struct user_namespace *ns, const struct path *path,
+			 struct kstat *stat, u32 request_mask,
+			 unsigned int query_flags);
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+			void *value, size_t size);
+static ssize_t incfs_setxattr(struct user_namespace *ns, struct dentry *d,
+			      const char *name, const void *value, size_t size,
+			      int flags);
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size);
+
+static int show_options(struct seq_file *, struct dentry *);
+
+static const struct super_operations incfs_super_ops = {
+	.statfs = simple_statfs,
+	.remount_fs = incfs_remount_fs,
+	.alloc_inode	= alloc_inode,
+	.destroy_inode	= free_inode,
+	.evict_inode = evict_inode,
+	.show_options = show_options
+};
+
+static int dir_rename_wrap(struct user_namespace *ns, struct inode *old_dir,
+			   struct dentry *old_dentry, struct inode *new_dir,
+			   struct dentry *new_dentry, unsigned int flags)
+{
+	return dir_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
+static const struct inode_operations incfs_dir_inode_ops = {
+	.lookup = dir_lookup,
+	.mkdir = dir_mkdir,
+	.rename = dir_rename_wrap,
+	.unlink = dir_unlink,
+	.link = dir_link,
+	.rmdir = dir_rmdir,
+	.setattr = incfs_setattr,
+};
+
+static const struct file_operations incfs_dir_fops = {
+	.llseek = generic_file_llseek,
+	.read = generic_read_dir,
+	.iterate = iterate_incfs_dir,
+	.open = file_open,
+	.release = file_release,
+};
+
+static const struct dentry_operations incfs_dentry_ops = {
+	.d_revalidate = dentry_revalidate,
+	.d_release = dentry_release
+};
+
+static const struct address_space_operations incfs_address_space_ops = {
+	.readpage = read_single_page,
+	/* .readpages = readpages */
+};
+
+static vm_fault_t incfs_fault(struct vm_fault *vmf)
+{
+	vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
+	return filemap_fault(vmf);
+}
+
+static const struct vm_operations_struct incfs_file_vm_ops = {
+	.fault		= incfs_fault,
+	.map_pages	= filemap_map_pages,
+	.page_mkwrite	= filemap_page_mkwrite,
+};
+
+/* This is used for a general mmap of a disk file */
+
+static int incfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct address_space *mapping = file->f_mapping;
+
+	if (!mapping->a_ops->readpage)
+		return -ENOEXEC;
+	file_accessed(file);
+	vma->vm_ops = &incfs_file_vm_ops;
+	return 0;
+}
+
+const struct file_operations incfs_file_ops = {
+	.open = file_open,
+	.release = file_release,
+	.read_iter = generic_file_read_iter,
+	.mmap = incfs_file_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = generic_file_llseek,
+	.unlocked_ioctl = dispatch_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = incfs_compat_ioctl,
+#endif
+};
+
+const struct inode_operations incfs_file_inode_ops = {
+	.setattr = incfs_setattr,
+	.getattr = incfs_getattr,
+	.listxattr = incfs_listxattr
+};
+
+static int incfs_handler_getxattr(const struct xattr_handler *xh,
+				  struct dentry *d, struct inode *inode,
+				  const char *name, void *buffer, size_t size)
+{
+	return incfs_getxattr(d, name, buffer, size);
+}
+
+static int incfs_handler_setxattr(const struct xattr_handler *xh,
+				  struct user_namespace *ns,
+				  struct dentry *d, struct inode *inode,
+				  const char *name, const void *buffer,
+				  size_t size, int flags)
+{
+	return incfs_setxattr(ns, d, name, buffer, size, flags);
+}
+
+static const struct xattr_handler incfs_xattr_handler = {
+	.prefix = "",	/* AKA all attributes */
+	.get = incfs_handler_getxattr,
+	.set = incfs_handler_setxattr,
+};
+
+static const struct xattr_handler *incfs_xattr_ops[] = {
+	&incfs_xattr_handler,
+	NULL,
+};
+
+struct inode_search {
+	unsigned long ino;
+
+	struct dentry *backing_dentry;
+
+	size_t size;
+
+	bool verity;
+};
+
+enum parse_parameter {
+	Opt_read_timeout,
+	Opt_readahead_pages,
+	Opt_rlog_pages,
+	Opt_rlog_wakeup_cnt,
+	Opt_report_uid,
+	Opt_sysfs_name,
+	Opt_err
+};
+
+static const match_table_t option_tokens = {
+	{ Opt_read_timeout, "read_timeout_ms=%u" },
+	{ Opt_readahead_pages, "readahead=%u" },
+	{ Opt_rlog_pages, "rlog_pages=%u" },
+	{ Opt_rlog_wakeup_cnt, "rlog_wakeup_cnt=%u" },
+	{ Opt_report_uid, "report_uid" },
+	{ Opt_sysfs_name, "sysfs_name=%s" },
+	{ Opt_err, NULL }
+};
+
+static void free_options(struct mount_options *opts)
+{
+	kfree(opts->sysfs_name);
+	opts->sysfs_name = NULL;
+}
+
+static int parse_options(struct mount_options *opts, char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+	int value;
+	char *position;
+
+	if (opts == NULL)
+		return -EFAULT;
+
+	*opts = (struct mount_options) {
+		.read_timeout_ms = 1000, /* Default: 1s */
+		.readahead_pages = 10,
+		.read_log_pages = 2,
+		.read_log_wakeup_count = 10,
+	};
+
+	if (str == NULL || *str == 0)
+		return 0;
+
+	while ((position = strsep(&str, ",")) != NULL) {
+		int token;
+
+		if (!*position)
+			continue;
+
+		token = match_token(position, option_tokens, args);
+
+		switch (token) {
+		case Opt_read_timeout:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			if (value > 3600000)
+				return -EINVAL;
+			opts->read_timeout_ms = value;
+			break;
+		case Opt_readahead_pages:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->readahead_pages = value;
+			break;
+		case Opt_rlog_pages:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->read_log_pages = value;
+			break;
+		case Opt_rlog_wakeup_cnt:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->read_log_wakeup_count = value;
+			break;
+		case Opt_report_uid:
+			opts->report_uid = true;
+			break;
+		case Opt_sysfs_name:
+			opts->sysfs_name = match_strdup(&args[0]);
+			break;
+		default:
+			free_options(opts);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* Read file size from the attribute. Quicker than reading the header */
+static u64 read_size_attr(struct dentry *backing_dentry)
+{
+	__le64 attr_value;
+	ssize_t bytes_read;
+
+	bytes_read = vfs_getxattr(&init_user_ns, backing_dentry, INCFS_XATTR_SIZE_NAME,
+			(char *)&attr_value, sizeof(attr_value));
+
+	if (bytes_read != sizeof(attr_value))
+		return 0;
+
+	return le64_to_cpu(attr_value);
+}
+
+/* Read verity flag from the attribute. Quicker than reading the header */
+static bool read_verity_attr(struct dentry *backing_dentry)
+{
+	return vfs_getxattr(&init_user_ns, backing_dentry, INCFS_XATTR_VERITY_NAME, NULL, 0)
+		>= 0;
+}
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+	struct inode_info *node = get_incfs_node(inode);
+	struct inode *backing_inode = d_inode(search->backing_dentry);
+
+	if (!node)
+		return 0;
+
+	return node->n_backing_inode == backing_inode &&
+		inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+	struct inode_info *node = get_incfs_node(inode);
+	struct dentry *backing_dentry = search->backing_dentry;
+	struct inode *backing_inode = d_inode(backing_dentry);
+
+	fsstack_copy_attr_all(inode, backing_inode);
+	if (S_ISREG(inode->i_mode)) {
+		u64 size = search->size;
+
+		inode->i_size = size;
+		inode->i_blocks = get_blocks_count_for_size(size);
+		inode->i_mapping->a_ops = &incfs_address_space_ops;
+		inode->i_op = &incfs_file_inode_ops;
+		inode->i_fop = &incfs_file_ops;
+		inode->i_mode &= ~0222;
+		if (search->verity)
+			inode_set_flags(inode, S_VERITY, S_VERITY);
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_size = 0;
+		inode->i_blocks = 1;
+		inode->i_mapping->a_ops = &incfs_address_space_ops;
+		inode->i_op = &incfs_dir_inode_ops;
+		inode->i_fop = &incfs_dir_fops;
+	} else {
+		pr_warn_once("incfs: Unexpected inode type\n");
+		return -EBADF;
+	}
+
+	ihold(backing_inode);
+	node->n_backing_inode = backing_inode;
+	node->n_mount_info = get_mount_info(inode->i_sb);
+	inode->i_ctime = backing_inode->i_ctime;
+	inode->i_mtime = backing_inode->i_mtime;
+	inode->i_atime = backing_inode->i_atime;
+	inode->i_ino = backing_inode->i_ino;
+	if (backing_inode->i_ino < INCFS_START_INO_RANGE) {
+		pr_warn("incfs: ino conflict with backing FS %ld\n",
+			backing_inode->i_ino);
+	}
+
+	return 0;
+}
+
+static struct inode *fetch_regular_inode(struct super_block *sb,
+					struct dentry *backing_dentry)
+{
+	struct inode *backing_inode = d_inode(backing_dentry);
+	struct inode_search search = {
+		.ino = backing_inode->i_ino,
+		.backing_dentry = backing_dentry,
+		.size = read_size_attr(backing_dentry),
+		.verity = read_verity_attr(backing_dentry),
+	};
+	struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+				inode_set, &search);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+
+	return inode;
+}
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
+{
+	struct dir_file *dir = get_incfs_dir_file(file);
+	int error = 0;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	bool root;
+
+	if (!dir) {
+		error = -EBADF;
+		goto out;
+	}
+
+	root = dir->backing_dir->f_inode
+			== d_inode(mi->mi_backing_dir_path.dentry);
+
+	if (root) {
+		error = emit_pseudo_files(ctx);
+		if (error)
+			goto out;
+	}
+
+	ctx->pos -= PSEUDO_FILE_COUNT;
+	error = iterate_dir(dir->backing_dir, ctx);
+	ctx->pos += PSEUDO_FILE_COUNT;
+	file->f_pos = dir->backing_dir->f_pos;
+out:
+	if (error)
+		pr_warn("incfs: %s %s %d\n", __func__,
+			file->f_path.dentry->d_name.name, error);
+	return error;
+}
+
+static int incfs_init_dentry(struct dentry *dentry, struct path *path)
+{
+	struct dentry_info *d_info = NULL;
+
+	if (!dentry || !path)
+		return -EFAULT;
+
+	d_info = kzalloc(sizeof(*d_info), GFP_NOFS);
+	if (!d_info)
+		return -ENOMEM;
+
+	d_info->backing_path = *path;
+	path_get(path);
+
+	dentry->d_fsdata = d_info;
+	return 0;
+}
+
+static struct dentry *open_or_create_special_dir(struct dentry *backing_dir,
+						 const char *name)
+{
+	struct dentry *index_dentry;
+	struct inode *backing_inode = d_inode(backing_dir);
+	int err = 0;
+
+	index_dentry = incfs_lookup_dentry(backing_dir, name);
+	if (!index_dentry) {
+		return ERR_PTR(-EINVAL);
+	} else if (IS_ERR(index_dentry)) {
+		return index_dentry;
+	} else if (d_really_is_positive(index_dentry)) {
+		/* Index already exists. */
+		return index_dentry;
+	}
+
+	/* Index needs to be created. */
+	inode_lock_nested(backing_inode, I_MUTEX_PARENT);
+	err = vfs_mkdir(&init_user_ns, backing_inode, index_dentry, 0777);
+	inode_unlock(backing_inode);
+
+	if (err) {
+		dput(index_dentry);
+		return ERR_PTR(err);
+	}
+
+	if (!d_really_is_positive(index_dentry) ||
+		unlikely(d_unhashed(index_dentry))) {
+		dput(index_dentry);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return index_dentry;
+}
+
+static int read_single_page_timeouts(struct data_file *df, struct file *f,
+				     int block_index, struct mem_range range,
+				     struct mem_range tmp)
+{
+	struct mount_info *mi = df->df_mount_info;
+	struct incfs_read_data_file_timeouts timeouts = {
+		.max_pending_time_us = U32_MAX,
+	};
+	int uid = current_uid().val;
+	int i;
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	for (i = 0; i < mi->mi_per_uid_read_timeouts_size /
+		sizeof(*mi->mi_per_uid_read_timeouts); ++i) {
+		struct incfs_per_uid_read_timeouts *t =
+			&mi->mi_per_uid_read_timeouts[i];
+
+		if(t->uid == uid) {
+			timeouts.min_time_us = t->min_time_us;
+			timeouts.min_pending_time_us = t->min_pending_time_us;
+			timeouts.max_pending_time_us = t->max_pending_time_us;
+			break;
+		}
+	}
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+	if (timeouts.max_pending_time_us == U32_MAX) {
+		u64 read_timeout_us = (u64)mi->mi_options.read_timeout_ms *
+					1000;
+
+		timeouts.max_pending_time_us = read_timeout_us <= U32_MAX ?
+					       read_timeout_us : U32_MAX;
+	}
+
+	return incfs_read_data_file_block(range, f, block_index, tmp,
+					  &timeouts);
+}
+
+static int read_single_page(struct file *f, struct page *page)
+{
+	loff_t offset = 0;
+	loff_t size = 0;
+	ssize_t bytes_to_read = 0;
+	ssize_t read_result = 0;
+	struct data_file *df = get_incfs_data_file(f);
+	int result = 0;
+	void *page_start;
+	int block_index;
+
+	if (!df) {
+		SetPageError(page);
+		unlock_page(page);
+		return -EBADF;
+	}
+
+	page_start = kmap(page);
+	offset = page_offset(page);
+	block_index = (offset + df->df_mapped_offset) /
+		INCFS_DATA_FILE_BLOCK_SIZE;
+	size = df->df_size;
+
+	if (offset < size) {
+		struct mem_range tmp = {
+			.len = 2 * INCFS_DATA_FILE_BLOCK_SIZE
+		};
+		tmp.data = (u8 *)__get_free_pages(GFP_NOFS, get_order(tmp.len));
+		if (!tmp.data) {
+			read_result = -ENOMEM;
+			goto err;
+		}
+		bytes_to_read = min_t(loff_t, size - offset, PAGE_SIZE);
+
+		read_result = read_single_page_timeouts(df, f, block_index,
+					range(page_start, bytes_to_read), tmp);
+
+		free_pages((unsigned long)tmp.data, get_order(tmp.len));
+	} else {
+		bytes_to_read = 0;
+		read_result = 0;
+	}
+
+err:
+	if (read_result < 0)
+		result = read_result;
+	else if (read_result < PAGE_SIZE)
+		zero_user(page, read_result, PAGE_SIZE - read_result);
+
+	if (result == 0)
+		SetPageUptodate(page);
+	else
+		SetPageError(page);
+
+	flush_dcache_page(page);
+	kunmap(page);
+	unlock_page(page);
+	return result;
+}
+
+int incfs_link(struct dentry *what, struct dentry *where)
+{
+	struct dentry *parent_dentry = dget_parent(where);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_link(what, &init_user_ns, pinode, where, NULL);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+int incfs_unlink(struct dentry *dentry)
+{
+	struct dentry *parent_dentry = dget_parent(dentry);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_unlink(&init_user_ns, pinode, dentry, NULL);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+static int incfs_rmdir(struct dentry *dentry)
+{
+	struct dentry *parent_dentry = dget_parent(dentry);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_rmdir(&init_user_ns, pinode, dentry);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+static void notify_unlink(struct dentry *dentry, const char *file_id_str,
+			  const char *special_directory)
+{
+	struct dentry *root = dentry;
+	struct dentry *file = NULL;
+	struct dentry *dir = NULL;
+	int error = 0;
+	bool take_lock = root->d_parent != root->d_parent->d_parent;
+
+	while (root != root->d_parent)
+		root = root->d_parent;
+
+	if (take_lock)
+		dir = incfs_lookup_dentry(root, special_directory);
+	else
+		dir = lookup_one_len(special_directory, root,
+				     strlen(special_directory));
+
+	if (IS_ERR(dir)) {
+		error = PTR_ERR(dir);
+		goto out;
+	}
+	if (d_is_negative(dir)) {
+		error = -ENOENT;
+		goto out;
+	}
+
+	file = incfs_lookup_dentry(dir, file_id_str);
+	if (IS_ERR(file)) {
+		error = PTR_ERR(file);
+		goto out;
+	}
+	if (d_is_negative(file)) {
+		error = -ENOENT;
+		goto out;
+	}
+
+	fsnotify_unlink(d_inode(dir), file);
+	d_delete(file);
+
+out:
+	if (error)
+		pr_warn("%s failed with error %d\n", __func__, error);
+
+	dput(dir);
+	dput(file);
+}
+
+static void maybe_delete_incomplete_file(struct file *f,
+					 struct data_file *df)
+{
+	struct backing_file_context *bfc;
+	struct mount_info *mi = df->df_mount_info;
+	char *file_id_str = NULL;
+	struct dentry *incomplete_file_dentry = NULL;
+	const struct cred *old_cred = override_creds(mi->mi_owner);
+	int error;
+
+	if (atomic_read(&df->df_data_blocks_written) < df->df_data_block_count)
+		goto out;
+
+	/* Truncate file to remove any preallocated space */
+	bfc = df->df_backing_file_context;
+	if (bfc) {
+		struct file *f = bfc->bc_file;
+
+		if (f) {
+			loff_t size = i_size_read(file_inode(f));
+
+			error = vfs_truncate(&f->f_path, size);
+			if (error)
+				/* No useful action on failure */
+				pr_warn("incfs: Failed to truncate complete file: %d\n",
+					error);
+		}
+	}
+
+	/* This is best effort - there is no useful action to take on failure */
+	file_id_str = file_id_to_str(df->df_id);
+	if (!file_id_str)
+		goto out;
+
+	incomplete_file_dentry = incfs_lookup_dentry(
+					df->df_mount_info->mi_incomplete_dir,
+					file_id_str);
+	if (!incomplete_file_dentry || IS_ERR(incomplete_file_dentry)) {
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+
+	if (!d_really_is_positive(incomplete_file_dentry))
+		goto out;
+
+	vfs_fsync(df->df_backing_file_context->bc_file, 0);
+	error = incfs_unlink(incomplete_file_dentry);
+	if (error) {
+		pr_warn("incfs: Deleting incomplete file failed: %d\n", error);
+		goto out;
+	}
+
+	notify_unlink(f->f_path.dentry, file_id_str, INCFS_INCOMPLETE_NAME);
+
+out:
+	dput(incomplete_file_dentry);
+	kfree(file_id_str);
+	revert_creds(old_cred);
+}
+
+static long ioctl_fill_blocks(struct file *f, void __user *arg)
+{
+	struct incfs_fill_blocks __user *usr_fill_blocks = arg;
+	struct incfs_fill_blocks fill_blocks;
+	struct incfs_fill_block __user *usr_fill_block_array;
+	struct data_file *df = get_incfs_data_file(f);
+	struct incfs_file_data *fd = f->private_data;
+	const ssize_t data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE;
+	u8 *data_buf = NULL;
+	ssize_t error = 0;
+	int i = 0;
+
+	if (!df)
+		return -EBADF;
+
+	if (!fd || fd->fd_fill_permission != CAN_FILL)
+		return -EPERM;
+
+	if (copy_from_user(&fill_blocks, usr_fill_blocks, sizeof(fill_blocks)))
+		return -EFAULT;
+
+	usr_fill_block_array = u64_to_user_ptr(fill_blocks.fill_blocks);
+	data_buf = (u8 *)__get_free_pages(GFP_NOFS | __GFP_COMP,
+					  get_order(data_buf_size));
+	if (!data_buf)
+		return -ENOMEM;
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		struct incfs_fill_block fill_block = {};
+
+		if (copy_from_user(&fill_block, &usr_fill_block_array[i],
+				   sizeof(fill_block)) > 0) {
+			error = -EFAULT;
+			break;
+		}
+
+		if (fill_block.data_len > data_buf_size) {
+			error = -E2BIG;
+			break;
+		}
+
+		if (copy_from_user(data_buf, u64_to_user_ptr(fill_block.data),
+				   fill_block.data_len) > 0) {
+			error = -EFAULT;
+			break;
+		}
+		fill_block.data = 0; /* To make sure nobody uses it. */
+		if (fill_block.flags & INCFS_BLOCK_FLAGS_HASH) {
+			error = incfs_process_new_hash_block(df, &fill_block,
+							     data_buf);
+		} else {
+			error = incfs_process_new_data_block(df, &fill_block,
+							     data_buf);
+		}
+		if (error)
+			break;
+	}
+
+	if (data_buf)
+		free_pages((unsigned long)data_buf, get_order(data_buf_size));
+
+	maybe_delete_incomplete_file(f, df);
+
+	/*
+	 * Only report the error if no records were processed, otherwise
+	 * just return how many were processed successfully.
+	 */
+	if (i == 0)
+		return error;
+
+	return i;
+}
+
+static long ioctl_read_file_signature(struct file *f, void __user *arg)
+{
+	struct incfs_get_file_sig_args __user *args_usr_ptr = arg;
+	struct incfs_get_file_sig_args args = {};
+	u8 *sig_buffer = NULL;
+	size_t sig_buf_size = 0;
+	int error = 0;
+	int read_result = 0;
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!df)
+		return -EINVAL;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	sig_buf_size = args.file_signature_buf_size;
+	if (sig_buf_size > INCFS_MAX_SIGNATURE_SIZE)
+		return -E2BIG;
+
+	sig_buffer = kzalloc(sig_buf_size, GFP_NOFS | __GFP_COMP);
+	if (!sig_buffer)
+		return -ENOMEM;
+
+	read_result = incfs_read_file_signature(df,
+			range(sig_buffer, sig_buf_size));
+
+	if (read_result < 0) {
+		error = read_result;
+		goto out;
+	}
+
+	if (copy_to_user(u64_to_user_ptr(args.file_signature), sig_buffer,
+			read_result)) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	args.file_signature_len_out = read_result;
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		error = -EFAULT;
+
+out:
+	kfree(sig_buffer);
+
+	return error;
+}
+
+static long ioctl_get_filled_blocks(struct file *f, void __user *arg)
+{
+	struct incfs_get_filled_blocks_args __user *args_usr_ptr = arg;
+	struct incfs_get_filled_blocks_args args = {};
+	struct data_file *df = get_incfs_data_file(f);
+	struct incfs_file_data *fd = f->private_data;
+	int error;
+
+	if (!df || !fd)
+		return -EINVAL;
+
+	if (fd->fd_fill_permission != CAN_FILL)
+		return -EPERM;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	error = incfs_get_filled_blocks(df, fd, &args);
+
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		return -EFAULT;
+
+	return error;
+}
+
+static long ioctl_get_block_count(struct file *f, void __user *arg)
+{
+	struct incfs_get_block_count_args __user *args_usr_ptr = arg;
+	struct incfs_get_block_count_args args = {};
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!df)
+		return -EINVAL;
+
+	args.total_data_blocks_out = df->df_data_block_count;
+	args.filled_data_blocks_out = atomic_read(&df->df_data_blocks_written);
+	args.total_hash_blocks_out = df->df_total_block_count -
+		df->df_data_block_count;
+	args.filled_hash_blocks_out = atomic_read(&df->df_hash_blocks_written);
+
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int incfs_ioctl_get_flags(struct file *f, void __user *arg)
+{
+	u32 flags = IS_VERITY(file_inode(f)) ? FS_VERITY_FL : 0;
+
+	return put_user(flags, (int __user *) arg);
+}
+
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg)
+{
+	switch (req) {
+	case INCFS_IOC_FILL_BLOCKS:
+		return ioctl_fill_blocks(f, (void __user *)arg);
+	case INCFS_IOC_READ_FILE_SIGNATURE:
+		return ioctl_read_file_signature(f, (void __user *)arg);
+	case INCFS_IOC_GET_FILLED_BLOCKS:
+		return ioctl_get_filled_blocks(f, (void __user *)arg);
+	case INCFS_IOC_GET_BLOCK_COUNT:
+		return ioctl_get_block_count(f, (void __user *)arg);
+	case FS_IOC_ENABLE_VERITY:
+		return incfs_ioctl_enable_verity(f, (const void __user *)arg);
+	case FS_IOC_GETFLAGS:
+		return incfs_ioctl_get_flags(f, (void __user *) arg);
+	case FS_IOC_MEASURE_VERITY:
+		return incfs_ioctl_measure_verity(f, (void __user *)arg);
+	case FS_IOC_READ_VERITY_METADATA:
+		return incfs_ioctl_read_verity_metadata(f, (void __user *)arg);
+	default:
+		return -EINVAL;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+static long incfs_compat_ioctl(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	switch (cmd) {
+	case FS_IOC32_GETFLAGS:
+		cmd = FS_IOC_GETFLAGS;
+		break;
+	case INCFS_IOC_FILL_BLOCKS:
+	case INCFS_IOC_READ_FILE_SIGNATURE:
+	case INCFS_IOC_GET_FILLED_BLOCKS:
+	case INCFS_IOC_GET_BLOCK_COUNT:
+	case FS_IOC_ENABLE_VERITY:
+	case FS_IOC_MEASURE_VERITY:
+	case FS_IOC_READ_VERITY_METADATA:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return dispatch_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static struct dentry *dir_lookup(struct inode *dir_inode, struct dentry *dentry,
+				 unsigned int flags)
+{
+	struct mount_info *mi = get_mount_info(dir_inode->i_sb);
+	struct dentry *dir_dentry = NULL;
+	struct dentry *backing_dentry = NULL;
+	struct path dir_backing_path = {};
+	struct inode_info *dir_info = get_incfs_node(dir_inode);
+	int err = 0;
+
+	if (!mi || !dir_info || !dir_info->n_backing_inode)
+		return ERR_PTR(-EBADF);
+
+	if (d_inode(mi->mi_backing_dir_path.dentry) ==
+		dir_info->n_backing_inode) {
+		/* We do lookup in the FS root. Show pseudo files. */
+		err = dir_lookup_pseudo_files(dir_inode->i_sb, dentry);
+		if (err != -ENOENT)
+			goto out;
+		err = 0;
+	}
+
+	dir_dentry = dget_parent(dentry);
+	get_incfs_backing_path(dir_dentry, &dir_backing_path);
+	backing_dentry = incfs_lookup_dentry(dir_backing_path.dentry,
+						dentry->d_name.name);
+
+	if (!backing_dentry || IS_ERR(backing_dentry)) {
+		err = IS_ERR(backing_dentry)
+			? PTR_ERR(backing_dentry)
+			: -EFAULT;
+		backing_dentry = NULL;
+		goto out;
+	} else {
+		struct inode *inode = NULL;
+		struct path backing_path = {
+			.mnt = dir_backing_path.mnt,
+			.dentry = backing_dentry
+		};
+
+		err = incfs_init_dentry(dentry, &backing_path);
+		if (err)
+			goto out;
+
+		if (!d_really_is_positive(backing_dentry)) {
+			/*
+			 * No such entry found in the backing dir.
+			 * Create a negative entry.
+			 */
+			d_add(dentry, NULL);
+			err = 0;
+			goto out;
+		}
+
+		if (d_inode(backing_dentry)->i_sb !=
+				dir_info->n_backing_inode->i_sb) {
+			/*
+			 * Somehow after the path lookup we ended up in a
+			 * different fs mount. If we keep going it's going
+			 * to end badly.
+			 */
+			err = -EXDEV;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir_inode->i_sb, backing_dentry);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto out;
+		}
+
+		d_add(dentry, inode);
+	}
+
+out:
+	dput(dir_dentry);
+	dput(backing_dentry);
+	path_put(&dir_backing_path);
+	if (err)
+		pr_debug("incfs: %s %s %d\n", __func__,
+			 dentry->d_name.name, err);
+	return ERR_PTR(err);
+}
+
+static int dir_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct inode_info *dir_node = get_incfs_node(dir);
+	struct dentry *backing_dentry = NULL;
+	struct path backing_path = {};
+	int err = 0;
+
+
+	if (!mi || !dir_node || !dir_node->n_backing_inode)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	backing_dentry = backing_path.dentry;
+
+	if (!backing_dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_dentry->d_parent == mi->mi_index_dir) {
+		/* Can't create a subdir inside .index */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Can't create a subdir inside .incomplete */
+		err = -EBUSY;
+		goto out;
+	}
+	inode_lock_nested(dir_node->n_backing_inode, I_MUTEX_PARENT);
+	err = vfs_mkdir(ns, dir_node->n_backing_inode, backing_dentry, mode | 0222);
+	inode_unlock(dir_node->n_backing_inode);
+	if (!err) {
+		struct inode *inode = NULL;
+
+		if (d_really_is_negative(backing_dentry) ||
+			unlikely(d_unhashed(backing_dentry))) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir->i_sb, backing_dentry);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto out;
+		}
+		d_instantiate(dentry, inode);
+	}
+
+out:
+	if (d_really_is_negative(dentry))
+		d_drop(dentry);
+	path_put(&backing_path);
+
+path_err:
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	return err;
+}
+
+/*
+ * Delete file referenced by backing_dentry and if appropriate its hardlink
+ * from .index and .incomplete
+ */
+static int file_delete(struct mount_info *mi, struct dentry *dentry,
+			struct dentry *backing_dentry, int nlink)
+{
+	struct dentry *index_file_dentry = NULL;
+	struct dentry *incomplete_file_dentry = NULL;
+	/* 2 chars per byte of file ID + 1 char for \0 */
+	char file_id_str[2 * sizeof(incfs_uuid_t) + 1] = {0};
+	ssize_t uuid_size = 0;
+	int error = 0;
+
+	WARN_ON(!mutex_is_locked(&mi->mi_dir_struct_mutex));
+
+	if (nlink > 3)
+		goto just_unlink;
+
+	uuid_size = vfs_getxattr(&init_user_ns, backing_dentry, INCFS_XATTR_ID_NAME,
+			file_id_str, 2 * sizeof(incfs_uuid_t));
+	if (uuid_size < 0) {
+		error = uuid_size;
+		goto out;
+	}
+
+	if (uuid_size != 2 * sizeof(incfs_uuid_t)) {
+		error = -EBADMSG;
+		goto out;
+	}
+
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+	if (IS_ERR(index_file_dentry)) {
+		error = PTR_ERR(index_file_dentry);
+		index_file_dentry = NULL;
+		goto out;
+	}
+
+	if (d_really_is_positive(index_file_dentry) && nlink > 0)
+		nlink--;
+
+	if (nlink > 2)
+		goto just_unlink;
+
+	incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+						     file_id_str);
+	if (IS_ERR(incomplete_file_dentry)) {
+		error = PTR_ERR(incomplete_file_dentry);
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+
+	if (d_really_is_positive(incomplete_file_dentry) && nlink > 0)
+		nlink--;
+
+	if (nlink > 1)
+		goto just_unlink;
+
+	if (d_really_is_positive(index_file_dentry)) {
+		error = incfs_unlink(index_file_dentry);
+		if (error)
+			goto out;
+		notify_unlink(dentry, file_id_str, INCFS_INDEX_NAME);
+	}
+
+	if (d_really_is_positive(incomplete_file_dentry)) {
+		error = incfs_unlink(incomplete_file_dentry);
+		if (error)
+			goto out;
+		notify_unlink(dentry, file_id_str, INCFS_INCOMPLETE_NAME);
+	}
+
+just_unlink:
+	error = incfs_unlink(backing_dentry);
+
+out:
+	dput(index_file_dentry);
+	dput(incomplete_file_dentry);
+	if (error)
+		pr_debug("incfs: delete_file_from_index err:%d\n", error);
+	return error;
+}
+
+static int dir_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_path = {};
+	struct kstat stat;
+	int err = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	if (!backing_path.dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_path.dentry->d_parent == mi->mi_index_dir) {
+		/* Direct unlink from .index are not allowed. */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_path.dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Direct unlink from .incomplete are not allowed. */
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = vfs_getattr(&backing_path, &stat, STATX_NLINK,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	err = file_delete(mi, dentry, backing_path.dentry, stat.nlink);
+
+	d_drop(dentry);
+out:
+	path_put(&backing_path);
+path_err:
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return err;
+}
+
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+			 struct dentry *new_dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_old_path = {};
+	struct path backing_new_path = {};
+	int error = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		return error;
+
+	get_incfs_backing_path(old_dentry, &backing_old_path);
+	get_incfs_backing_path(new_dentry, &backing_new_path);
+
+	if (backing_new_path.dentry->d_parent == mi->mi_index_dir) {
+		/* Can't link to .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	if (backing_new_path.dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Can't link to .incomplete */
+		error = -EBUSY;
+		goto out;
+	}
+
+	error = incfs_link(backing_old_path.dentry, backing_new_path.dentry);
+	if (!error) {
+		struct inode *inode = NULL;
+		struct dentry *bdentry = backing_new_path.dentry;
+
+		if (d_really_is_negative(bdentry)) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir->i_sb, bdentry);
+		if (IS_ERR(inode)) {
+			error = PTR_ERR(inode);
+			goto out;
+		}
+		d_instantiate(new_dentry, inode);
+	}
+
+out:
+	path_put(&backing_old_path);
+	path_put(&backing_new_path);
+	if (error)
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return error;
+}
+
+static int dir_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_path = {};
+	int err = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	if (!backing_path.dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_path.dentry == mi->mi_index_dir) {
+		/* Can't delete .index */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_path.dentry == mi->mi_incomplete_dir) {
+		/* Can't delete .incomplete */
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = incfs_rmdir(backing_path.dentry);
+	if (!err)
+		d_drop(dentry);
+out:
+	path_put(&backing_path);
+
+path_err:
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return err;
+}
+
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct mount_info *mi = get_mount_info(old_dir->i_sb);
+	struct dentry *backing_old_dentry;
+	struct dentry *backing_new_dentry;
+	struct dentry *backing_old_dir_dentry;
+	struct dentry *backing_new_dir_dentry;
+	struct inode *target_inode;
+	struct dentry *trap;
+	struct renamedata rd = {};
+	int error = 0;
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		return error;
+
+	backing_old_dentry = get_incfs_dentry(old_dentry)->backing_path.dentry;
+
+	if (!backing_old_dentry || backing_old_dentry == mi->mi_index_dir ||
+	    backing_old_dentry == mi->mi_incomplete_dir) {
+		/* Renaming .index or .incomplete not allowed */
+		error = -EBUSY;
+		goto exit;
+	}
+
+	backing_new_dentry = get_incfs_dentry(new_dentry)->backing_path.dentry;
+	dget(backing_old_dentry);
+	dget(backing_new_dentry);
+
+	backing_old_dir_dentry = dget_parent(backing_old_dentry);
+	backing_new_dir_dentry = dget_parent(backing_new_dentry);
+	target_inode = d_inode(new_dentry);
+
+	if (backing_old_dir_dentry == mi->mi_index_dir ||
+	    backing_old_dir_dentry == mi->mi_incomplete_dir) {
+		/* Direct moves from .index or .incomplete are not allowed. */
+		error = -EBUSY;
+		goto out;
+	}
+
+	trap = lock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+	if (trap == backing_old_dentry) {
+		error = -EINVAL;
+		goto unlock_out;
+	}
+	if (trap == backing_new_dentry) {
+		error = -ENOTEMPTY;
+		goto unlock_out;
+	}
+
+	rd.old_dir	= d_inode(backing_old_dir_dentry);
+	rd.old_dentry	= backing_old_dentry;
+	rd.new_dir	= d_inode(backing_new_dir_dentry);
+	rd.new_dentry	= backing_new_dentry;
+	error = vfs_rename(&rd);
+	if (error)
+		goto unlock_out;
+	if (target_inode)
+		fsstack_copy_attr_all(target_inode,
+			get_incfs_node(target_inode)->n_backing_inode);
+	fsstack_copy_attr_all(new_dir, d_inode(backing_new_dir_dentry));
+	if (new_dir != old_dir)
+		fsstack_copy_attr_all(old_dir, d_inode(backing_old_dir_dentry));
+
+unlock_out:
+	unlock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+out:
+	dput(backing_new_dir_dentry);
+	dput(backing_old_dir_dentry);
+	dput(backing_new_dentry);
+	dput(backing_old_dentry);
+
+exit:
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	if (error)
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+	return error;
+}
+
+
+static int file_open(struct inode *inode, struct file *file)
+{
+	struct mount_info *mi = get_mount_info(inode->i_sb);
+	struct file *backing_file = NULL;
+	struct path backing_path = {};
+	int err = 0;
+	int flags = O_NOATIME | O_LARGEFILE |
+		(S_ISDIR(inode->i_mode) ? O_RDONLY : O_RDWR);
+	const struct cred *old_cred;
+
+	WARN_ON(file->private_data);
+
+	if (!mi)
+		return -EBADF;
+
+	get_incfs_backing_path(file->f_path.dentry, &backing_path);
+	if (!backing_path.dentry)
+		return -EBADF;
+
+	old_cred = override_creds(mi->mi_owner);
+	backing_file = dentry_open(&backing_path, flags, current_cred());
+	revert_creds(old_cred);
+	path_put(&backing_path);
+
+	if (IS_ERR(backing_file)) {
+		err = PTR_ERR(backing_file);
+		backing_file = NULL;
+		goto out;
+	}
+
+	if (S_ISREG(inode->i_mode)) {
+		struct incfs_file_data *fd = kzalloc(sizeof(*fd), GFP_NOFS);
+
+		if (!fd) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		*fd = (struct incfs_file_data) {
+			.fd_fill_permission = CANT_FILL,
+		};
+		file->private_data = fd;
+
+		err = make_inode_ready_for_data_ops(mi, inode, backing_file);
+		if (err)
+			goto out;
+
+		err = incfs_fsverity_file_open(inode, file);
+		if (err)
+			goto out;
+	} else if (S_ISDIR(inode->i_mode)) {
+		struct dir_file *dir = NULL;
+
+		dir = incfs_open_dir_file(mi, backing_file);
+		if (IS_ERR(dir))
+			err = PTR_ERR(dir);
+		else
+			file->private_data = dir;
+	} else
+		err = -EBADF;
+
+out:
+	if (err) {
+		pr_debug("name:%s err: %d\n",
+			 file->f_path.dentry->d_name.name, err);
+		if (S_ISREG(inode->i_mode))
+			kfree(file->private_data);
+		else if (S_ISDIR(inode->i_mode))
+			incfs_free_dir_file(file->private_data);
+
+		file->private_data = NULL;
+	}
+
+	if (backing_file)
+		fput(backing_file);
+	return err;
+}
+
+static int file_release(struct inode *inode, struct file *file)
+{
+	if (S_ISREG(inode->i_mode)) {
+		kfree(file->private_data);
+		file->private_data = NULL;
+	} else if (S_ISDIR(inode->i_mode)) {
+		struct dir_file *dir = get_incfs_dir_file(file);
+
+		incfs_free_dir_file(dir);
+	}
+
+	return 0;
+}
+
+static int dentry_revalidate(struct dentry *d, unsigned int flags)
+{
+	struct path backing_path = {};
+	struct inode_info *info = get_incfs_node(d_inode(d));
+	struct inode *binode = (info == NULL) ? NULL : info->n_backing_inode;
+	struct dentry *backing_dentry = NULL;
+	int result = 0;
+
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	get_incfs_backing_path(d, &backing_path);
+	backing_dentry = backing_path.dentry;
+	if (!backing_dentry)
+		goto out;
+
+	if (d_inode(backing_dentry) != binode) {
+		/*
+		 * Backing inodes obtained via dentry and inode don't match.
+		 * It indicates that most likely backing dir has changed
+		 * directly bypassing Incremental FS interface.
+		 */
+		goto out;
+	}
+
+	if (backing_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+		result = backing_dentry->d_op->d_revalidate(backing_dentry,
+				flags);
+	} else
+		result = 1;
+
+out:
+	path_put(&backing_path);
+	return result;
+}
+
+static void dentry_release(struct dentry *d)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (di)
+		path_put(&di->backing_path);
+	kfree(d->d_fsdata);
+	d->d_fsdata = NULL;
+}
+
+static struct inode *alloc_inode(struct super_block *sb)
+{
+	struct inode_info *node = kzalloc(sizeof(*node), GFP_NOFS);
+
+	/* TODO: add a slab-based cache here. */
+	if (!node)
+		return NULL;
+	inode_init_once(&node->n_vfs_inode);
+	return &node->n_vfs_inode;
+}
+
+static void free_inode(struct inode *inode)
+{
+	struct inode_info *node = get_incfs_node(inode);
+
+	kfree(node);
+}
+
+static void evict_inode(struct inode *inode)
+{
+	struct inode_info *node = get_incfs_node(inode);
+
+	if (node) {
+		if (node->n_backing_inode) {
+			iput(node->n_backing_inode);
+			node->n_backing_inode = NULL;
+		}
+		if (node->n_file) {
+			incfs_free_data_file(node->n_file);
+			node->n_file = NULL;
+		}
+	}
+
+	truncate_inode_pages(&inode->i_data, 0);
+	clear_inode(inode);
+}
+
+static int incfs_setattr(struct user_namespace *ns, struct dentry *dentry,
+			 struct iattr *ia)
+{
+	struct dentry_info *di = get_incfs_dentry(dentry);
+	struct dentry *backing_dentry;
+	struct inode *backing_inode;
+	int error;
+
+	if (ia->ia_valid & ATTR_SIZE)
+		return -EINVAL;
+
+	if (!di)
+		return -EINVAL;
+	backing_dentry = di->backing_path.dentry;
+	if (!backing_dentry)
+		return -EINVAL;
+
+	backing_inode = d_inode(backing_dentry);
+
+	/* incfs files are readonly, but the backing files must be writeable */
+	if (S_ISREG(backing_inode->i_mode)) {
+		if ((ia->ia_valid & ATTR_MODE) && (ia->ia_mode & 0222))
+			return -EINVAL;
+
+		ia->ia_mode |= 0222;
+	}
+
+	inode_lock(d_inode(backing_dentry));
+	error = notify_change(ns, backing_dentry, ia, NULL);
+	inode_unlock(d_inode(backing_dentry));
+
+	if (error)
+		return error;
+
+	if (S_ISREG(backing_inode->i_mode))
+		ia->ia_mode &= ~0222;
+
+	return simple_setattr(ns, dentry, ia);
+}
+
+
+static int incfs_getattr(struct user_namespace *ns, const struct path *path,
+			 struct kstat *stat, u32 request_mask,
+			 unsigned int query_flags)
+{
+	struct inode *inode = d_inode(path->dentry);
+
+	generic_fillattr(ns, inode, stat);
+
+	if (inode->i_ino < INCFS_START_INO_RANGE)
+		return 0;
+
+	stat->attributes &= ~STATX_ATTR_VERITY;
+	if (IS_VERITY(inode))
+		stat->attributes |= STATX_ATTR_VERITY;
+	stat->attributes_mask |= STATX_ATTR_VERITY;
+
+	if (request_mask & STATX_BLOCKS) {
+		struct kstat backing_kstat;
+		struct dentry_info *di = get_incfs_dentry(path->dentry);
+		int error = 0;
+		struct path *backing_path;
+
+		if (!di)
+			return -EFSCORRUPTED;
+		backing_path = &di->backing_path;
+		error = vfs_getattr(backing_path, &backing_kstat, STATX_BLOCKS,
+				    AT_STATX_SYNC_AS_STAT);
+		if (error)
+			return error;
+
+		stat->blocks = backing_kstat.blocks;
+	}
+
+	return 0;
+}
+
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+			void *value, size_t size)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+	struct mount_info *mi = get_mount_info(d->d_sb);
+	char *stored_value;
+	size_t stored_size;
+	int i;
+
+	if (di && di->backing_path.dentry)
+		return vfs_getxattr(&init_user_ns, di->backing_path.dentry, name, value, size);
+
+	if (strcmp(name, "security.selinux"))
+		return -ENODATA;
+
+	for (i = 0; i < PSEUDO_FILE_COUNT; ++i)
+		if (!strcmp(d->d_iname, incfs_pseudo_file_names[i].data))
+			break;
+	if (i == PSEUDO_FILE_COUNT)
+		return -ENODATA;
+
+	stored_value = mi->pseudo_file_xattr[i].data;
+	stored_size = mi->pseudo_file_xattr[i].len;
+	if (!stored_value)
+		return -ENODATA;
+
+	if (stored_size > size)
+		return -E2BIG;
+
+	memcpy(value, stored_value, stored_size);
+	return stored_size;
+}
+
+
+static ssize_t incfs_setxattr(struct user_namespace *ns, struct dentry *d,
+			      const char *name, const void *value, size_t size,
+			      int flags)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+	struct mount_info *mi = get_mount_info(d->d_sb);
+	u8 **stored_value;
+	size_t *stored_size;
+	int i;
+
+	if (di && di->backing_path.dentry)
+		return vfs_setxattr(ns, di->backing_path.dentry, name, value,
+				    size, flags);
+
+	if (strcmp(name, "security.selinux"))
+		return -ENODATA;
+
+	if (size > INCFS_MAX_FILE_ATTR_SIZE)
+		return -E2BIG;
+
+	for (i = 0; i < PSEUDO_FILE_COUNT; ++i)
+		if (!strcmp(d->d_iname, incfs_pseudo_file_names[i].data))
+			break;
+	if (i == PSEUDO_FILE_COUNT)
+		return -ENODATA;
+
+	stored_value = &mi->pseudo_file_xattr[i].data;
+	stored_size = &mi->pseudo_file_xattr[i].len;
+	kfree (*stored_value);
+	*stored_value = kzalloc(size, GFP_NOFS);
+	if (!*stored_value)
+		return -ENOMEM;
+
+	memcpy(*stored_value, value, size);
+	*stored_size = size;
+	return 0;
+}
+
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (!di || !di->backing_path.dentry)
+		return -ENODATA;
+
+	return vfs_listxattr(di->backing_path.dentry, list, size);
+}
+
+struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
+			      const char *dev_name, void *data)
+{
+	struct mount_options options = {};
+	struct mount_info *mi = NULL;
+	struct path backing_dir_path = {};
+	struct dentry *index_dir = NULL;
+	struct dentry *incomplete_dir = NULL;
+	struct super_block *src_fs_sb = NULL;
+	struct inode *root_inode = NULL;
+	struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL);
+	int error = 0;
+
+	if (IS_ERR(sb))
+		return ERR_CAST(sb);
+
+	sb->s_op = &incfs_super_ops;
+	sb->s_d_op = &incfs_dentry_ops;
+	sb->s_flags |= S_NOATIME;
+	sb->s_magic = INCFS_MAGIC_NUMBER;
+	sb->s_time_gran = 1;
+	sb->s_blocksize = INCFS_DATA_FILE_BLOCK_SIZE;
+	sb->s_blocksize_bits = blksize_bits(sb->s_blocksize);
+	sb->s_xattr = incfs_xattr_ops;
+
+	BUILD_BUG_ON(PAGE_SIZE != INCFS_DATA_FILE_BLOCK_SIZE);
+
+	error = parse_options(&options, (char *)data);
+	if (error != 0) {
+		pr_err("incfs: Options parsing error. %d\n", error);
+		goto err;
+	}
+
+	sb->s_bdi->ra_pages = options.readahead_pages;
+	if (!dev_name) {
+		pr_err("incfs: Backing dir is not set, filesystem can't be mounted.\n");
+		error = -ENOENT;
+		goto err;
+	}
+
+	error = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+			&backing_dir_path);
+	if (error || backing_dir_path.dentry == NULL ||
+		!d_really_is_positive(backing_dir_path.dentry)) {
+		pr_err("incfs: Error accessing: %s.\n",
+			dev_name);
+		goto err;
+	}
+	src_fs_sb = backing_dir_path.dentry->d_sb;
+	sb->s_maxbytes = src_fs_sb->s_maxbytes;
+
+	mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
+
+	if (IS_ERR_OR_NULL(mi)) {
+		error = PTR_ERR(mi);
+		pr_err("incfs: Error allocating mount info. %d\n", error);
+		mi = NULL;
+		goto err;
+	}
+
+	index_dir = open_or_create_special_dir(backing_dir_path.dentry,
+					       INCFS_INDEX_NAME);
+	if (IS_ERR_OR_NULL(index_dir)) {
+		error = PTR_ERR(index_dir);
+		pr_err("incfs: Can't find or create .index dir in %s\n",
+			dev_name);
+		/* No need to null index_dir since we don't put it */
+		goto err;
+	}
+	mi->mi_index_dir = index_dir;
+
+	incomplete_dir = open_or_create_special_dir(backing_dir_path.dentry,
+						    INCFS_INCOMPLETE_NAME);
+	if (IS_ERR_OR_NULL(incomplete_dir)) {
+		error = PTR_ERR(incomplete_dir);
+		pr_err("incfs: Can't find or create .incomplete dir in %s\n",
+			dev_name);
+		/* No need to null incomplete_dir since we don't put it */
+		goto err;
+	}
+	mi->mi_incomplete_dir = incomplete_dir;
+
+	sb->s_fs_info = mi;
+	root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
+	if (IS_ERR(root_inode)) {
+		error = PTR_ERR(root_inode);
+		goto err;
+	}
+
+	sb->s_root = d_make_root(root_inode);
+	if (!sb->s_root) {
+		error = -ENOMEM;
+		goto err;
+	}
+	error = incfs_init_dentry(sb->s_root, &backing_dir_path);
+	if (error)
+		goto err;
+
+	path_put(&backing_dir_path);
+	sb->s_flags |= SB_ACTIVE;
+
+	pr_debug("incfs: mount\n");
+	return dget(sb->s_root);
+err:
+	sb->s_fs_info = NULL;
+	path_put(&backing_dir_path);
+	incfs_free_mount_info(mi);
+	deactivate_locked_super(sb);
+	free_options(&options);
+	return ERR_PTR(error);
+}
+
+static int incfs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+	struct mount_options options;
+	struct mount_info *mi = get_mount_info(sb);
+	int err = 0;
+
+	sync_filesystem(sb);
+	err = parse_options(&options, (char *)data);
+	if (err)
+		return err;
+
+	if (options.report_uid != mi->mi_options.report_uid) {
+		pr_err("incfs: Can't change report_uid mount option on remount\n");
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	err = incfs_realloc_mount_info(mi, &options);
+	if (err)
+		goto out;
+
+	pr_debug("incfs: remount\n");
+
+out:
+	free_options(&options);
+	return err;
+}
+
+void incfs_kill_sb(struct super_block *sb)
+{
+	struct mount_info *mi = sb->s_fs_info;
+
+	pr_debug("incfs: unmount\n");
+	generic_shutdown_super(sb);
+	incfs_free_mount_info(mi);
+}
+
+static int show_options(struct seq_file *m, struct dentry *root)
+{
+	struct mount_info *mi = get_mount_info(root->d_sb);
+
+	seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
+	seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
+	if (mi->mi_options.read_log_pages != 0) {
+		seq_printf(m, ",rlog_pages=%u", mi->mi_options.read_log_pages);
+		seq_printf(m, ",rlog_wakeup_cnt=%u",
+			   mi->mi_options.read_log_wakeup_count);
+	}
+	if (mi->mi_options.report_uid)
+		seq_puts(m, ",report_uid");
+
+	if (mi->mi_sysfs_node)
+		seq_printf(m, ",sysfs_name=%s",
+			   kobject_name(&mi->mi_sysfs_node->isn_sysfs_node));
+	return 0;
+}
diff --git a/fs/incfs/vfs.h b/fs/incfs/vfs.h
new file mode 100644
index 0000000..79fdf24
--- /dev/null
+++ b/fs/incfs/vfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#ifndef _INCFS_VFS_H
+#define _INCFS_VFS_H
+
+extern const struct file_operations incfs_file_ops;
+extern const struct inode_operations incfs_file_inode_ops;
+
+void incfs_kill_sb(struct super_block *sb);
+struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
+			      const char *dev_name, void *data);
+int incfs_link(struct dentry *what, struct dentry *where);
+int incfs_unlink(struct dentry *dentry);
+
+static inline struct mount_info *get_mount_info(struct super_block *sb)
+{
+	struct mount_info *result = sb->s_fs_info;
+
+	WARN_ON(!result);
+	return result;
+}
+
+static inline struct super_block *file_superblock(struct file *f)
+{
+	struct inode *inode = file_inode(f);
+
+	return inode->i_sb;
+}
+
+#endif
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b4dc510..81781fa 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -6,6 +6,7 @@
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/fs.h>
+#include <linux/fscrypt.h>
 #include <linux/iomap.h>
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
@@ -178,11 +179,14 @@
 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
 		loff_t pos, unsigned len)
 {
+	struct inode *inode = file_inode(dio->iocb->ki_filp);
 	struct page *page = ZERO_PAGE(0);
 	int flags = REQ_SYNC | REQ_IDLE;
 	struct bio *bio;
 
 	bio = bio_alloc(GFP_KERNEL, 1);
+	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+				  GFP_KERNEL);
 	bio_set_dev(bio, iter->iomap.bdev);
 	bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
 	bio->bi_private = dio;
@@ -309,6 +313,8 @@
 		}
 
 		bio = bio_alloc(GFP_KERNEL, nr_pages);
+		fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+					  GFP_KERNEL);
 		bio_set_dev(bio, iomap->bdev);
 		bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
 		bio->bi_write_hint = dio->iocb->ki_hint;
diff --git a/fs/mpage.c b/fs/mpage.c
index 334e7d0..af32fda 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -32,6 +32,14 @@
 #include <linux/cleancache.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/android_fs.h>
+
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -49,6 +57,16 @@
 	struct bio_vec *bv;
 	struct bvec_iter_all iter_all;
 
+	if (trace_android_fs_dataread_end_enabled() &&
+	    (bio_data_dir(bio) == READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL)
+			trace_android_fs_dataread_end(first_page->mapping->host,
+						      page_offset(first_page),
+						      bio->bi_iter.bi_size);
+	}
+
 	bio_for_each_segment_all(bv, bio, iter_all) {
 		struct page *page = bv->bv_page;
 		page_endio(page, bio_op(bio),
@@ -60,6 +78,24 @@
 
 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
 	bio->bi_end_io = mpage_end_io;
 	bio_set_op_attrs(bio, op, op_flags);
 	guard_bio_eod(bio);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 29fca32..92e9ac25 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -712,6 +712,8 @@
 	struct fsnotify_group *group;
 	struct inode *inode;
 	struct path path;
+	struct path alteredpath;
+	struct path *canonical_path = &path;
 	struct fd f;
 	int ret;
 	unsigned flags = 0;
@@ -758,13 +760,23 @@
 	if (ret)
 		goto fput_and_out;
 
+	/* support stacked filesystems */
+	if (path.dentry && path.dentry->d_op) {
+		if (path.dentry->d_op->d_canonical_path) {
+			path.dentry->d_op->d_canonical_path(&path,
+							    &alteredpath);
+			canonical_path = &alteredpath;
+			path_put(&path);
+		}
+	}
+
 	/* inode held in place by reference to path; group by fget on fd */
-	inode = path.dentry->d_inode;
+	inode = canonical_path->dentry->d_inode;
 	group = f.file->private_data;
 
 	/* create/update an inode mark */
 	ret = inotify_update_watch(group, inode, mask);
-	path_put(&path);
+	path_put(canonical_path);
 fput_and_out:
 	fdput(f);
 	return ret;
diff --git a/fs/open.c b/fs/open.c
index f732fb9..3a8a207 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -34,6 +34,7 @@
 #include <linux/compat.h>
 
 #include "internal.h"
+#include <trace/hooks/syscall_check.h>
 
 int do_truncate(struct user_namespace *mnt_userns, struct dentry *dentry,
 		loff_t length, unsigned int time_attrs, struct file *filp)
@@ -805,6 +806,7 @@
 		error = -ENODEV;
 		goto cleanup_all;
 	}
+	trace_android_vh_check_file_open(f);
 
 	error = security_file_open(f);
 	if (error)
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index fa125fe..888c3e1 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -15,6 +15,8 @@
 #include <linux/fs.h>
 #include "overlayfs.h"
 
+#define OVL_IOCB_MASK (IOCB_DSYNC | IOCB_HIPRI | IOCB_NOWAIT | IOCB_SYNC)
+
 struct ovl_aio_req {
 	struct kiocb iocb;
 	refcount_t ref;
@@ -237,22 +239,6 @@
 	touch_atime(&file->f_path);
 }
 
-static rwf_t ovl_iocb_to_rwf(int ifl)
-{
-	rwf_t flags = 0;
-
-	if (ifl & IOCB_NOWAIT)
-		flags |= RWF_NOWAIT;
-	if (ifl & IOCB_HIPRI)
-		flags |= RWF_HIPRI;
-	if (ifl & IOCB_DSYNC)
-		flags |= RWF_DSYNC;
-	if (ifl & IOCB_SYNC)
-		flags |= RWF_SYNC;
-
-	return flags;
-}
-
 static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
 {
 	if (refcount_dec_and_test(&aio_req->ref)) {
@@ -313,7 +299,8 @@
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	if (is_sync_kiocb(iocb)) {
 		ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
-				    ovl_iocb_to_rwf(iocb->ki_flags));
+				    iocb_to_rw_flags(iocb->ki_flags,
+						     OVL_IOCB_MASK));
 	} else {
 		struct ovl_aio_req *aio_req;
 
@@ -378,7 +365,7 @@
 	if (is_sync_kiocb(iocb)) {
 		file_start_write(real.file);
 		ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
-				     ovl_iocb_to_rwf(ifl));
+				     iocb_to_rw_flags(ifl, OVL_IOCB_MASK));
 		file_end_write(real.file);
 		/* Update size */
 		ovl_copyattr(ovl_inode_real(inode), inode);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 13eda8d..6fba6670 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -97,6 +97,7 @@
 #include <linux/time_namespace.h>
 #include <linux/resctrl.h>
 #include <linux/cn_proc.h>
+#include <linux/cpufreq_times.h>
 #include <trace/events/oom.h>
 #include "internal.h"
 #include "fd.h"
@@ -3277,6 +3278,9 @@
 #ifdef CONFIG_LIVEPATCH
 	ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
 #endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 #ifdef CONFIG_STACKLEAK_METRICS
 	ONE("stack_depth", S_IRUGO, proc_stack_depth),
 #endif
@@ -3619,6 +3623,9 @@
 #ifdef CONFIG_SECCOMP_CACHE_DEBUG
 	ONE("seccomp_cache", S_IRUSR, proc_pid_seccomp_cache),
 #endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 };
 
 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ad667db..d794bae 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -123,6 +123,56 @@
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+	const char __user *name = vma_get_anon_name(vma);
+	struct mm_struct *mm = vma->vm_mm;
+
+	unsigned long page_start_vaddr;
+	unsigned long page_offset;
+	unsigned long num_pages;
+	unsigned long max_len = NAME_MAX;
+	int i;
+
+	page_start_vaddr = (unsigned long)name & PAGE_MASK;
+	page_offset = (unsigned long)name - page_start_vaddr;
+	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+	seq_puts(m, "[anon:");
+
+	for (i = 0; i < num_pages; i++) {
+		int len;
+		int write_len;
+		const char *kaddr;
+		long pages_pinned;
+		struct page *page;
+
+		pages_pinned = get_user_pages_remote(mm, page_start_vaddr, 1, 0,
+						     &page, NULL, NULL);
+		if (pages_pinned < 1) {
+			seq_puts(m, "<fault>]");
+			return;
+		}
+
+		kaddr = (const char *)kmap(page);
+		len = min(max_len, PAGE_SIZE - page_offset);
+		write_len = strnlen(kaddr + page_offset, len);
+		seq_write(m, kaddr + page_offset, write_len);
+		kunmap(page);
+		put_page(page);
+
+		/* if strnlen hit a null terminator then we're done */
+		if (write_len != len)
+			break;
+
+		max_len -= len;
+		page_offset = 0;
+		page_start_vaddr += PAGE_SIZE;
+	}
+
+	seq_putc(m, ']');
+}
+
 static void *m_start(struct seq_file *m, loff_t *ppos)
 {
 	struct proc_maps_private *priv = m->private;
@@ -319,8 +369,15 @@
 			goto done;
 		}
 
-		if (is_stack(vma))
+		if (is_stack(vma)) {
 			name = "[stack]";
+			goto done;
+		}
+
+		if (vma_get_anon_name(vma)) {
+			seq_pad(m, ' ');
+			seq_print_vma_name(m, vma);
+		}
 	}
 
 done:
@@ -822,6 +879,11 @@
 	smap_gather_stats(vma, &mss, 0);
 
 	show_map_vma(m, vma);
+	if (vma_get_anon_name(vma)) {
+		seq_puts(m, "Name:           ");
+		seq_print_vma_name(m, vma);
+		seq_putc(m, '\n');
+	}
 
 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
diff --git a/fs/sync.c b/fs/sync.c
index 3ce8e21..ab672eb 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -10,7 +10,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/namei.h>
-#include <linux/sched.h>
+#include <linux/sched/xacct.h>
 #include <linux/writeback.h>
 #include <linux/syscalls.h>
 #include <linux/linkage.h>
@@ -205,6 +205,7 @@
 	if (f.file) {
 		ret = vfs_fsync(f.file, datasync);
 		fdput(f);
+		inc_syscfs(current);
 	}
 	return ret;
 }
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 22bf14a..c6ac1a5 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -877,7 +877,8 @@
 				 new_flags, vma->anon_vma,
 				 vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev)
 			vma = prev;
 		else
@@ -1436,7 +1437,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 ((struct vm_userfaultfd_ctx){ ctx }));
+				 ((struct vm_userfaultfd_ctx){ ctx }),
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
@@ -1613,7 +1615,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
index 143a530..e33f6c4 100644
--- a/fs/verity/signature.c
+++ b/fs/verity/signature.c
@@ -40,11 +40,38 @@
 int fsverity_verify_signature(const struct fsverity_info *vi,
 			      const u8 *signature, size_t sig_size)
 {
-	const struct inode *inode = vi->inode;
-	const struct fsverity_hash_alg *hash_alg = vi->tree_params.hash_alg;
+	unsigned int digest_algorithm =
+		vi->tree_params.hash_alg - fsverity_hash_algs;
+
+	return __fsverity_verify_signature(vi->inode, signature, sig_size,
+					   vi->file_digest, digest_algorithm);
+}
+
+/**
+ * __fsverity_verify_signature() - check a verity file's signature
+ * @inode: the file's inode
+ * @signature: the file's signature
+ * @sig_size: size of @signature. Can be 0 if there is no signature
+ * @file_digest: the file's digest
+ * @digest_algorithm: the digest algorithm used
+ *
+ * Takes the file's digest and optional signature and verifies the signature
+ * against the digest and the fs-verity keyring if appropriate
+ *
+ * Return: 0 on success (signature valid or not required); -errno on failure
+ */
+int __fsverity_verify_signature(const struct inode *inode, const u8 *signature,
+				size_t sig_size, const u8 *file_digest,
+				unsigned int digest_algorithm)
+{
 	struct fsverity_formatted_digest *d;
+	struct fsverity_hash_alg *hash_alg = fsverity_get_hash_alg(inode,
+							digest_algorithm);
 	int err;
 
+	if (IS_ERR(hash_alg))
+		return PTR_ERR(hash_alg);
+
 	if (sig_size == 0) {
 		if (fsverity_require_signatures) {
 			fsverity_err(inode,
@@ -60,7 +87,7 @@
 	memcpy(d->magic, "FSVerity", 8);
 	d->digest_algorithm = cpu_to_le16(hash_alg - fsverity_hash_algs);
 	d->digest_size = cpu_to_le16(hash_alg->digest_size);
-	memcpy(d->digest, vi->file_digest, hash_alg->digest_size);
+	memcpy(d->digest, file_digest, hash_alg->digest_size);
 
 	err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size,
 				     signature, sig_size, fsverity_keyring,
@@ -83,9 +110,10 @@
 	}
 
 	pr_debug("Valid signature for file digest %s:%*phN\n",
-		 hash_alg->name, hash_alg->digest_size, vi->file_digest);
+		 hash_alg->name, hash_alg->digest_size, file_digest);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__fsverity_verify_signature);
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table_header *fsverity_sysctl_header;
diff --git a/include/OWNERS b/include/OWNERS
new file mode 100644
index 0000000..4b815c2
--- /dev/null
+++ b/include/OWNERS
@@ -0,0 +1 @@
+per-file net/**=file:/net/OWNERS
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8..6292fa6 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@
 	void (*free_cb)(struct kref *kref);
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
 /**
  * struct drm_object_properties - property tracking for &drm_mode_object
  */
diff --git a/include/linux/OWNERS b/include/linux/OWNERS
new file mode 100644
index 0000000..68b6ded
--- /dev/null
+++ b/include/linux/OWNERS
@@ -0,0 +1,4 @@
+per-file bio.h=file:/block/OWNERS
+per-file blk*.h=file:/block/OWNERS
+per-file f2fs**=file:/fs/f2fs/OWNERS
+per-file net**=file:/net/OWNERS
diff --git a/include/linux/android_vendor.h b/include/linux/android_vendor.h
new file mode 100644
index 0000000..59fc573
--- /dev/null
+++ b/include/linux/android_vendor.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * android_vendor.h - Android vendor data
+ *
+ * Copyright 2020 Google LLC
+ *
+ * These macros are to be used to reserve space in kernel data structures
+ * for use by vendor modules.
+ *
+ * These macros should be used before the kernel abi is "frozen".
+ * Fields can be added to various kernel structures that need space
+ * for functionality implemented in vendor modules. The use of
+ * these fields is vendor specific.
+ */
+#ifndef _ANDROID_VENDOR_H
+#define _ANDROID_VENDOR_H
+
+/*
+ * ANDROID_VENDOR_DATA
+ *   Reserve some "padding" in a structure for potential future use.
+ *   This normally placed at the end of a structure.
+ *   number: the "number" of the padding variable in the structure.  Start with
+ *   1 and go up.
+ *
+ * ANDROID_VENDOR_DATA_ARRAY
+ *   Same as ANDROID_VENDOR_DATA but allocates an array of u64 with
+ *   the specified size
+ */
+#define ANDROID_VENDOR_DATA(n)		u64 android_vendor_data##n
+#define ANDROID_VENDOR_DATA_ARRAY(n, s)	u64 android_vendor_data##n[s]
+
+#define ANDROID_OEM_DATA(n)		u64 android_oem_data##n
+#define ANDROID_OEM_DATA_ARRAY(n, s)	u64 android_oem_data##n[s]
+
+#endif /* _ANDROID_VENDOR_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index b97cea8..cfd1efd 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -7,6 +7,7 @@
 
 #include <linux/types.h>
 #include <linux/percpu.h>
+#include <linux/android_vendor.h>
 
 void topology_normalize_cpu_scale(void);
 int topology_update_cpu_topology(void);
@@ -69,6 +70,8 @@
 	cpumask_t core_sibling;
 	cpumask_t cluster_sibling;
 	cpumask_t llc_sibling;
+
+	cpumask_t android_vendor_data1;
 };
 
 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
@@ -90,5 +93,6 @@
 void reset_cpu_topology(void);
 int parse_acpi_topology(void);
 #endif
+extern bool topology_update_done;
 
 #endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
index bbab65b..706de8c 100644
--- a/include/linux/blk-crypto-profile.h
+++ b/include/linux/blk-crypto-profile.h
@@ -57,6 +57,21 @@
 	int (*keyslot_evict)(struct blk_crypto_profile *profile,
 			     const struct blk_crypto_key *key,
 			     unsigned int slot);
+
+	/**
+	 * @derive_sw_secret: Derive the software secret from a hardware-wrapped
+	 *		      key.
+	 *
+	 * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+	 * is supported.
+	 *
+	 * Must return 0 on success, -EBADMSG if the key is invalid, or another
+	 * -errno code on other errors.
+	 */
+	int (*derive_sw_secret)(struct blk_crypto_profile *profile,
+				const u8 *wrapped_key,
+				unsigned int wrapped_key_size,
+				u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
 };
 
 /**
@@ -85,6 +100,12 @@
 	unsigned int max_dun_bytes_supported;
 
 	/**
+	 * @key_types_supported: Supported types of keys --
+	 * BLK_CRYPTO_KEY_TYPE_STANDARD and/or BLK_CRYPTO_KEY_TYPE_HW_WRAPPED.
+	 */
+	unsigned int key_types_supported;
+
+	/**
 	 * @modes_supported: Array of bitmasks that specifies whether each
 	 * combination of crypto mode and data unit size is supported.
 	 * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
@@ -154,6 +175,11 @@
 
 void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
 
+int blk_crypto_derive_sw_secret(struct blk_crypto_profile *profile,
+				const u8 *wrapped_key,
+				unsigned int wrapped_key_size,
+				u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
 void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
 				       const struct blk_crypto_profile *child);
 
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 69b24fe..812e517 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -16,7 +16,58 @@
 	BLK_ENCRYPTION_MODE_MAX,
 };
 
-#define BLK_CRYPTO_MAX_KEY_SIZE		64
+/*
+ * Supported types of keys.  Must be bit-flags due to their use in
+ * blk_crypto_profile::key_types_supported.
+ */
+enum blk_crypto_key_type {
+	/*
+	 * Standard keys (i.e. "software keys").  These keys are simply kept in
+	 * raw, plaintext form in kernel memory.
+	 */
+	BLK_CRYPTO_KEY_TYPE_STANDARD = 1 << 0,
+
+	/*
+	 * Hardware-wrapped keys.  These keys are only present in kernel memory
+	 * in ephemerally-wrapped form, and they can only be unwrapped by
+	 * dedicated hardware.  For details, see the "Hardware-wrapped keys"
+	 * section of Documentation/block/inline-encryption.rst.
+	 */
+	BLK_CRYPTO_KEY_TYPE_HW_WRAPPED = 1 << 1,
+};
+
+/*
+ * Currently the maximum standard key size is 64 bytes, as that is the key size
+ * of BLK_ENCRYPTION_MODE_AES_256_XTS which takes the longest key.
+ *
+ * The maximum hardware-wrapped key size depends on the hardware's key wrapping
+ * algorithm, which is a hardware implementation detail, so it isn't precisely
+ * specified.  But currently 128 bytes is plenty in practice.  Implementations
+ * are recommended to wrap a 32-byte key for the hardware KDF with AES-256-GCM,
+ * which should result in a size closer to 64 bytes than 128.
+ *
+ * Both of these values can trivially be increased if ever needed.
+ */
+#define BLK_CRYPTO_MAX_STANDARD_KEY_SIZE	64
+#define BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE	128
+
+/* This should use max(), but max() doesn't work in a struct definition. */
+#define BLK_CRYPTO_MAX_ANY_KEY_SIZE \
+	(BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE > \
+	 BLK_CRYPTO_MAX_STANDARD_KEY_SIZE ? \
+	 BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE : BLK_CRYPTO_MAX_STANDARD_KEY_SIZE)
+
+/*
+ * Size of the "software secret" which can be derived from a hardware-wrapped
+ * key.  This is currently always 32 bytes.  Note, the choice of 32 bytes
+ * assumes that the software secret is only used directly for algorithms that
+ * don't require more than a 256-bit key to get the desired security strength.
+ * If it were to be used e.g. directly as an AES-256-XTS key, then this would
+ * need to be increased (which is possible if hardware supports it, but care
+ * would need to be taken to avoid breaking users who need exactly 32 bytes).
+ */
+#define BLK_CRYPTO_SW_SECRET_SIZE	32
+
 /**
  * struct blk_crypto_config - an inline encryption key's crypto configuration
  * @crypto_mode: encryption algorithm this key is for
@@ -25,20 +76,23 @@
  *	ciphertext.  This is always a power of 2.  It might be e.g. the
  *	filesystem block size or the disk sector size.
  * @dun_bytes: the maximum number of bytes of DUN used when using this key
+ * @key_type: the type of this key -- either standard or hardware-wrapped
  */
 struct blk_crypto_config {
 	enum blk_crypto_mode_num crypto_mode;
 	unsigned int data_unit_size;
 	unsigned int dun_bytes;
+	enum blk_crypto_key_type key_type;
 };
 
 /**
  * struct blk_crypto_key - an inline encryption key
- * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this
- *		key
+ * @crypto_cfg: the crypto mode, data unit size, key type, and other
+ *		characteristics of this key and how it will be used
  * @data_unit_size_bits: log2 of data_unit_size
- * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode)
- * @raw: the raw bytes of this key.  Only the first @size bytes are used.
+ * @size: size of this key in bytes.  The size of a standard key is fixed for a
+ *	  given crypto mode, but the size of a hardware-wrapped key can vary.
+ * @raw: the bytes of this key.  Only the first @size bytes are significant.
  *
  * A blk_crypto_key is immutable once created, and many bios can reference it at
  * the same time.  It must not be freed until all bios using it have completed
@@ -48,7 +102,7 @@
 	struct blk_crypto_config crypto_cfg;
 	unsigned int data_unit_size_bits;
 	unsigned int size;
-	u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+	u8 raw[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
 };
 
 #define BLK_CRYPTO_MAX_IV_SIZE		32
@@ -89,7 +143,9 @@
 				 unsigned int bytes,
 				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
 
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+			const u8 *raw_key, unsigned int raw_key_size,
+			enum blk_crypto_key_type key_type,
 			enum blk_crypto_mode_num crypto_mode,
 			unsigned int dun_bytes,
 			unsigned int data_unit_size);
@@ -112,6 +168,9 @@
 
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src);
+
 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
 /**
  * bio_crypt_clone - clone bio encryption context
@@ -127,9 +186,42 @@
 static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
 				  gfp_t gfp_mask)
 {
+	bio_clone_skip_dm_default_key(dst, src);
 	if (bio_has_crypt_ctx(src))
 		return __bio_crypt_clone(dst, src, gfp_mask);
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+	bio->bi_skip_dm_default_key = true;
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+	return bio->bi_skip_dm_default_key;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src)
+{
+	dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key;
+}
+#else /* CONFIG_DM_DEFAULT_KEY */
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+	return false;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src)
+{
+}
+#endif /* !CONFIG_DM_DEFAULT_KEY */
+
 #endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fe065c3..40f0f80 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -268,6 +268,9 @@
 
 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
 	struct bio_crypt_ctx	*bi_crypt_context;
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+	bool			bi_skip_dm_default_key;
+#endif
 #endif
 
 	union {
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index f59c875..f72c372 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,7 @@
 #define CLK_OPS_PARENT_ENABLE	BIT(12)
 /* duty cycle call may be forwarded to the parent clock */
 #define CLK_DUTY_CYCLE_PARENT	BIT(13)
+#define CLK_DONT_HOLD_STATE	BIT(14) /* Don't hold state */
 
 struct clk;
 struct clk_hw;
@@ -205,6 +206,13 @@
  *		directory is provided as an argument.  Called with
  *		prepare_lock held.  Returns 0 on success, -EERROR otherwise.
  *
+ * @pre_rate_change: Optional callback for a clock to fulfill its rate
+ *		change requirements before any rate change has occurred in
+ *		its clock tree. Returns 0 on success, -EERROR otherwise.
+ *
+ * @post_rate_change: Optional callback for a clock to clean up any
+ *		requirements that were needed while the clock and its tree
+ *		was changing states. Returns 0 on success, -EERROR otherwise.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -252,6 +260,12 @@
 	int		(*init)(struct clk_hw *hw);
 	void		(*terminate)(struct clk_hw *hw);
 	void		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+	int		(*pre_rate_change)(struct clk_hw *hw,
+					   unsigned long rate,
+					   unsigned long new_rate);
+	int		(*post_rate_change)(struct clk_hw *hw,
+					    unsigned long old_rate,
+					    unsigned long rate);
 };
 
 /**
@@ -1154,6 +1168,7 @@
 
 void clk_hw_unregister(struct clk_hw *hw);
 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
+void clk_sync_state(struct device *dev);
 
 /* helper functions */
 const char *__clk_get_name(const struct clk *clk);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1ab29e6..81560d7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1183,14 +1183,6 @@
 }
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov) { }
-#endif
-
 extern void arch_freq_prepare_all(void);
 extern unsigned int arch_freq_get_on_cpu(int cpu);
 
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
new file mode 100644
index 0000000..38272a5
--- /dev/null
+++ b/include/linux/cpufreq_times.h
@@ -0,0 +1,42 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPUFREQ_TIMES_H
+#define _LINUX_CPUFREQ_TIMES_H
+
+#include <linux/cpufreq.h>
+#include <linux/pid.h>
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_alloc(struct task_struct *p);
+void cpufreq_task_times_exit(struct task_struct *p);
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+			    struct pid *pid, struct task_struct *p);
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
+void cpufreq_times_create_policy(struct cpufreq_policy *policy);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+                                     unsigned int new_freq);
+#else
+static inline void cpufreq_task_times_init(struct task_struct *p) {}
+static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+static inline void cpufreq_acct_update_power(struct task_struct *p,
+					     u64 cputime) {}
+static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+static inline void cpufreq_times_record_transition(
+	struct cpufreq_policy *policy, unsigned int new_freq) {}
+#endif /* CONFIG_CPU_FREQ_TIMES */
+#endif /* _LINUX_CPUFREQ_TIMES_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index d58e047..92b0a92 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -71,8 +71,6 @@
 extern void cpuset_force_rebuild(void);
 extern void cpuset_update_active_cpus(void);
 extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -196,9 +194,6 @@
 
 static inline void cpuset_wait_for_hotplug(void) { }
 
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
-
 static inline void cpuset_cpus_allowed(struct task_struct *p,
 				       struct cpumask *mask)
 {
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 9e23d33..485d4e7 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -149,6 +149,7 @@
 	struct vfsmount *(*d_automount)(struct path *);
 	int (*d_manage)(const struct path *, bool);
 	struct dentry *(*d_real)(struct dentry *, const struct inode *);
+	void (*d_canonical_path)(const struct path *, struct path *);
 } ____cacheline_aligned;
 
 /*
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 42a323a..30cae0c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -230,6 +230,41 @@
 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
 
 	/**
+	 * @begin_cpu_access_partial:
+	 *
+	 * This is called from dma_buf_begin_cpu_access_partial() and allows the
+	 * exporter to ensure that the memory specified in the range is
+	 * available for cpu access - the exporter might need to allocate or
+	 * swap-in and pin the backing storage.
+	 * The exporter also needs to ensure that cpu access is
+	 * coherent for the access direction. The direction can be used by the
+	 * exporter to optimize the cache flushing, i.e. access with a different
+	 * direction (read instead of write) might return stale or even bogus
+	 * data (e.g. when the exporter needs to copy the data to temporary
+	 * storage).
+	 *
+	 * This callback is optional.
+	 *
+	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
+	 * from userspace (where storage shouldn't be pinned to avoid handing
+	 * de-factor mlock rights to userspace) and for the kernel-internal
+	 * users of the various kmap interfaces, where the backing storage must
+	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
+	 * there's no in-kernel users of the kmap interfaces yet this isn't a
+	 * real problem.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. This can for
+	 * example fail when the backing storage can't be allocated. Can also
+	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
+	 * needs to be restarted.
+	 */
+	int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
+					enum dma_data_direction,
+					unsigned int offset, unsigned int len);
+
+	/**
 	 * @end_cpu_access:
 	 *
 	 * This is called from dma_buf_end_cpu_access() when the importer is
@@ -247,6 +282,28 @@
 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 
 	/**
+	 * @end_cpu_access_partial:
+	 *
+	 * This is called from dma_buf_end_cpu_access_partial() when the
+	 * importer is done accessing the CPU. The exporter can use to limit
+	 * cache flushing to only the range specefied and to unpin any
+	 * resources pinned in @begin_cpu_access_umapped.
+	 * The result of any dma_buf kmap calls after end_cpu_access_partial is
+	 * undefined.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. Can return
+	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
+	 * to be restarted.
+	 */
+	int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
+				      enum dma_data_direction,
+				      unsigned int offset, unsigned int len);
+
+	/**
 	 * @mmap:
 	 *
 	 * This callback is used by the dma_buf_mmap() function
@@ -285,6 +342,20 @@
 
 	int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
 	void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+
+	/**
+	 * @get_flags:
+	 *
+	 * This is called by dma_buf_get_flags and is used to get the buffer's
+	 * flags.
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. On success flags
+	 * will be populated with the buffer's flags.
+	 */
+	int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
 };
 
 /**
@@ -502,6 +573,8 @@
  * @importer_ops: importer operations for this attachment, if provided
  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
  * @importer_priv: importer specific attachment data.
+ * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
+ * through dma_buf_map_attachment.
  *
  * This structure holds the attachment information between the dma_buf buffer
  * and its user device(s). The list contains one attachment struct per device
@@ -522,6 +595,7 @@
 	const struct dma_buf_attach_ops *importer_ops;
 	void *importer_priv;
 	void *priv;
+	unsigned long dma_map_attrs;
 };
 
 /**
@@ -623,11 +697,18 @@
 void dma_buf_move_notify(struct dma_buf *dma_buf);
 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 			     enum dma_data_direction dir);
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
+				     enum dma_data_direction dir,
+				     unsigned int offset, unsigned int len);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 			   enum dma_data_direction dir);
+int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
+				     enum dma_data_direction dir,
+				     unsigned int offset, unsigned int len);
 
 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
 		 unsigned long);
 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
 #endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 0c05561..44f1050 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -51,6 +51,15 @@
 void *dma_heap_get_drvdata(struct dma_heap *heap);
 
 /**
+ * dma_heap_get_dev() - get device struct for the heap
+ * @heap: DMA-Heap to retrieve device struct from
+ *
+ * Returns:
+ * The device struct for the heap.
+ */
+struct device *dma_heap_get_dev(struct dma_heap *heap);
+
+/**
  * dma_heap_get_name() - get heap name
  * @heap: DMA-Heap to retrieve private data for
  *
@@ -65,4 +74,49 @@
  */
 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
 
+/**
+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
+ * @heap:		heap pointer
+ */
+void dma_heap_put(struct dma_heap *heap);
+
+/**
+ * dma_heap_find - Returns the registered dma_heap with the specified name
+ * @name: Name of the heap to find
+ *
+ * NOTE: dma_heaps returned from this function MUST be released
+ * using dma_heap_put() when the user is done.
+ */
+struct dma_heap *dma_heap_find(const char *name);
+
+/**
+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
+ * @heap:	dma_heap to allocate from
+ * @len:	size to allocate
+ * @fd_flags:	flags to set on returned dma-buf fd
+ * @heap_flags:	flags to pass to the dma heap
+ *
+ * This is for internal dma-buf allocations only.
+ */
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+				      unsigned int fd_flags,
+				      unsigned int heap_flags);
+
+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
+ * @dma_buf:	dma_buf to free
+ *
+ * This is really only a simple wrapper to dma_buf_put()
+ */
+void dma_heap_buffer_free(struct dma_buf *);
+
+/**
+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
+ * @heap:	dma_heap to allocate from
+ * @len:	size to allocate
+ * @fd_flags:	flags to set on returned dma-buf fd
+ * @heap_flags:	flags to pass to the dma heap
+ */
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+			    unsigned int fd_flags,
+			    unsigned int heap_flags);
 #endif /* _DMA_HEAPS_H */
diff --git a/include/linux/export.h b/include/linux/export.h
index 27d8487..088a380 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -118,7 +118,7 @@
  */
 #define __EXPORT_SYMBOL(sym, sec, ns)
 
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS) && !defined(MODULE)
 
 #include <generated/autoksyms.h>
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bbf812c..d161186 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3509,6 +3509,11 @@
 	return 0;
 }
 
+static inline rwf_t iocb_to_rw_flags(int ifl, int iocb_mask)
+{
+	return ifl & iocb_mask;
+}
+
 static inline ino_t parent_ino(struct dentry *dentry)
 {
 	ino_t res;
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 91ea947..f3100eb 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -714,6 +714,10 @@
 bool fscrypt_mergeable_bio_bh(struct bio *bio,
 			      const struct buffer_head *next_bh);
 
+bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
 #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
 static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
@@ -742,8 +746,36 @@
 {
 	return true;
 }
+
+static inline bool fscrypt_dio_supported(struct kiocb *iocb,
+					 struct iov_iter *iter)
+{
+	const struct inode *inode = file_inode(iocb->ki_filp);
+
+	return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+					  u64 nr_blocks)
+{
+	return nr_blocks;
+}
 #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+#else
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+	return false;
+}
+#endif
+
 /**
  * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
  *					encryption
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index b568b3c..19c118e 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -233,4 +233,18 @@
 	return fsverity_get_info(inode) != NULL;
 }
 
+#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+int __fsverity_verify_signature(const struct inode *inode, const u8 *signature,
+				size_t sig_size, const u8 *file_digest,
+				unsigned int digest_algorithm);
+#else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+static inline int __fsverity_verify_signature(const struct inode *inode,
+				const u8 *signature, size_t sig_size,
+				const u8 *file_digest,
+				unsigned int digest_algorithm)
+{
+	return 0;
+}
+#endif /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+
 #endif	/* _LINUX_FSVERITY_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8fcc384..9baa011 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -473,16 +473,7 @@
 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \
 )
 
-static inline enum zone_type gfp_zone(gfp_t flags)
-{
-	enum zone_type z;
-	int bit = (__force int) (flags & GFP_ZONEMASK);
-
-	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
-					 ((1 << GFP_ZONES_SHIFT) - 1);
-	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
-	return z;
-}
+enum zone_type gfp_zone(gfp_t flags);
 
 /*
  * There is only one page-allocator function, and two main namespaces to
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 1f22a30..48a6365 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -529,6 +529,12 @@
 };
 
 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
+			   (1 << NET_RX_SOFTIRQ)       | \
+			   (1 << BLOCK_SOFTIRQ)        | \
+			   (1 << IRQ_POLL_SOFTIRQ) | \
+			   (1 << TASKLET_SOFTIRQ))
 
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
@@ -555,6 +561,7 @@
 extern void raise_softirq(unsigned int nr);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
 {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 20c1f96..9021571 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -43,6 +43,7 @@
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
+	__s32		accept_ra_rt_table;
 	__s32		proxy_ndp;
 	__s32		accept_source_route;
 	__s32		accept_ra_from_local;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 81cbf85..6bb55ca 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -652,6 +652,8 @@
 	return !!(val & ICC_SRE_EL1_SRE);
 }
 
+void gic_resume(void);
+
 #endif
 
 #endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a7e4a9e..e72fcea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2658,7 +2658,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *, struct vm_userfaultfd_ctx);
+	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
 	unsigned long addr, int new_below);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c3a6e62..413db9a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -426,11 +426,18 @@
 	/*
 	 * For areas with an address space and backing store,
 	 * linkage into the address_space->i_mmap interval tree.
+	 *
+	 * For private anonymous mappings, a pointer to a null terminated string
+	 * in the user process containing the name given to the vma, or NULL
+	 * if unnamed.
 	 */
-	struct {
-		struct rb_node rb;
-		unsigned long rb_subtree_last;
-	} shared;
+	union {
+		struct {
+			struct rb_node rb;
+			unsigned long rb_subtree_last;
+		} shared;
+		const char __user *anon_name;
+	};
 
 	/*
 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -875,4 +882,13 @@
 	unsigned long val;
 } swp_entry_t;
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		return NULL;
+
+	return vma->anon_name;
+}
+
 #endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 936dc0b..0098d6f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -154,9 +154,7 @@
 	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
 	/* Second 128 byte cacheline */
 	NR_BOUNCE,
-#if IS_ENABLED(CONFIG_ZSMALLOC)
 	NR_ZSPAGES,		/* allocated in zsmalloc */
-#endif
 	NR_FREE_CMA_PAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
diff --git a/include/linux/module.h b/include/linux/module.h
index c9f1200..34fb763 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -380,6 +380,7 @@
 	struct module_attribute *modinfo_attrs;
 	const char *version;
 	const char *srcversion;
+	const char *scmversion;
 	struct kobject *holders_dir;
 
 	/* Exported symbols */
@@ -404,10 +405,12 @@
 	const s32 *gpl_crcs;
 	bool using_gplonly_symbols;
 
-#ifdef CONFIG_MODULE_SIG
-	/* Signature was verified. */
+	/*
+	 * Signature was verified. Unconditionally compiled in Android to
+	 * preserve ABI compatibility between kernels without module
+	 * signing enabled and signed modules.
+	 */
 	bool sig_ok;
-#endif
 
 	bool async_probe_requested;
 
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..a391871
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,26 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+#include <linux/types.h>
+
+enum xt_quota_flags {
+	XT_QUOTA_INVERT    = 1 << 0,
+	XT_QUOTA_GROW      = 1 << 1,
+	XT_QUOTA_PACKET    = 1 << 2,
+	XT_QUOTA_NO_CHANGE = 1 << 3,
+	XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+	char name[15];
+	u_int8_t flags;
+
+	/* Comparison-invariant */
+	aligned_u64 quota;
+
+	/* Used internally by the kernel */
+	struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index cf48983..7e0e2d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,6 +58,27 @@
 extern unsigned long of_get_flat_dt_root(void);
 extern uint32_t of_get_flat_dt_phandle(unsigned long node);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d150a908..084d555 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -378,10 +378,7 @@
 	return __page_cache_alloc(mapping_gfp_mask(x));
 }
 
-static inline gfp_t readahead_gfp_mask(struct address_space *x)
-{
-	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
-}
+gfp_t readahead_gfp_mask(struct address_space *x);
 
 typedef int filler_t(void *, struct page *);
 
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 9ca1f12..ccd34f9 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -49,6 +49,12 @@
 	POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE,	/* dynamically adjusted speed */
 	POWER_SUPPLY_CHARGE_TYPE_CUSTOM,	/* use CHARGE_CONTROL_* props */
 	POWER_SUPPLY_CHARGE_TYPE_LONGLIFE,	/* slow speed, longer life */
+
+	/*
+	 * force to 50 to minimize the chances of userspace binary
+	 * incompatibility on newer upstream kernels
+	 */
+	POWER_SUPPLY_CHARGE_TYPE_TAPER_EXT = 50,	/* charging in CV phase */
 };
 
 enum {
@@ -393,12 +399,22 @@
 #ifdef CONFIG_OF
 extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
 							const char *property);
+extern int power_supply_get_by_phandle_array(struct device_node *np,
+					     const char *property,
+					     struct power_supply **psy,
+					     ssize_t size);
 extern struct power_supply *devm_power_supply_get_by_phandle(
 				    struct device *dev, const char *property);
 #else /* !CONFIG_OF */
 static inline struct power_supply *
 power_supply_get_by_phandle(struct device_node *np, const char *property)
 { return NULL; }
+static inline int
+power_supply_get_by_phandle_array(struct device_node *np,
+				  const char *property,
+				  struct power_supply **psy,
+				  int size)
+{ return 0; }
 static inline struct power_supply *
 devm_power_supply_get_by_phandle(struct device *dev, const char *property)
 { return NULL; }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 78c351e..5c5553f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
 #include <linux/rseq.h>
 #include <linux/seqlock.h>
 #include <linux/kcsan.h>
+#include <linux/android_vendor.h>
 #include <asm/kmap_size.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
@@ -997,6 +998,10 @@
 	u64				stimescaled;
 #endif
 	u64				gtime;
+#ifdef CONFIG_CPU_FREQ_TIMES
+	u64				*time_in_state;
+	unsigned int			max_state;
+#endif
 	struct prev_cputime		prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 	struct vtime			vtime;
@@ -1109,6 +1114,7 @@
 	raw_spinlock_t			pi_lock;
 
 	struct wake_q_node		wake_q;
+	int				wake_q_count;
 
 #ifdef CONFIG_RT_MUTEXES
 	/* PI waiters blocked on a rt_mutex held by this task: */
@@ -1469,6 +1475,8 @@
 	struct callback_head		mce_kill_me;
 	int				mce_count;
 #endif
+	ANDROID_VENDOR_DATA_ARRAY(1, 64);
+	ANDROID_OEM_DATA_ARRAY(1, 6);
 
 #ifdef CONFIG_KRETPROBES
 	struct llist_head               kretprobe_instances;
@@ -1787,6 +1795,16 @@
 
 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+extern bool cpupri_check_rt(void);
+#else
+static inline bool cpupri_check_rt(void)
+{
+	return false;
+}
+#endif
+
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index c07bfa2..490708c 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -3,6 +3,7 @@
 #define _LINUX_SCHED_TOPOLOGY_H
 
 #include <linux/topology.h>
+#include <linux/android_vendor.h>
 
 #include <linux/sched/idle.h>
 
@@ -81,6 +82,8 @@
 	atomic_t	ref;
 	atomic_t	nr_busy_cpus;
 	int		has_idle_cores;
+
+	ANDROID_VENDOR_DATA(1);
 };
 
 struct sched_domain {
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 06cd8fb..9a1394f 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -38,6 +38,7 @@
 struct wake_q_head {
 	struct wake_q_node *first;
 	struct wake_q_node **lastp;
+	int count;
 };
 
 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
@@ -52,6 +53,7 @@
 {
 	head->first = WAKE_Q_TAIL;
 	head->lastp = &head->first;
+	head->count = 0;
 }
 
 static inline bool wake_q_empty(struct wake_q_head *head)
diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h
index c078f0a..9544c9d 100644
--- a/include/linux/sched/xacct.h
+++ b/include/linux/sched/xacct.h
@@ -28,6 +28,11 @@
 {
 	tsk->ioac.syscw++;
 }
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+	tsk->ioac.syscfs++;
+}
 #else
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 {
@@ -44,6 +49,10 @@
 static inline void inc_syscw(struct task_struct *tsk)
 {
 }
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+}
 #endif
 
 #endif /* _LINUX_SCHED_XACCT_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1810451..79c2ff9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -669,8 +669,7 @@
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
-				   __alloc_size(1);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
 #define kmalloc_track_caller(size, flags) \
 	__kmalloc_track_caller(size, flags, _RET_IP_)
 
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 8af13ba..04b337d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -517,6 +517,7 @@
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 
 extern void lock_system_sleep(void);
 extern void unlock_system_sleep(void);
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
index 6f6acce..bb26108 100644
--- a/include/linux/task_io_accounting.h
+++ b/include/linux/task_io_accounting.h
@@ -19,6 +19,8 @@
 	u64 syscr;
 	/* # of write syscalls */
 	u64 syscw;
+	/* # of fsync syscalls */
+	u64 syscfs;
 #endif /* CONFIG_TASK_XACCT */
 
 #ifdef CONFIG_TASK_IO_ACCOUNTING
diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h
index bb5498b..733ab62 100644
--- a/include/linux/task_io_accounting_ops.h
+++ b/include/linux/task_io_accounting_ops.h
@@ -97,6 +97,7 @@
 	dst->wchar += src->wchar;
 	dst->syscr += src->syscr;
 	dst->syscw += src->syscw;
+	dst->syscfs += src->syscfs;
 }
 #else
 static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 9d27622..043a7bf 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -592,6 +592,7 @@
 	struct config_group group;
 	struct list_head cfs_list;
 	struct usb_function_driver *fd;
+	struct usb_function *f;
 	int (*set_inst_name)(struct usb_function_instance *inst,
 			      const char *name);
 	void (*free_func_inst)(struct usb_function_instance *inst);
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 0000000..ebe3c4d
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 35d7eed..de584a7 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -9,7 +9,8 @@
 
 #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
 #define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
-#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
+#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		virtio_transport_max_vsock_pkt_buf_size
+extern uint virtio_transport_max_vsock_pkt_buf_size;
 
 enum {
 	VSOCK_VQ_RX     = 0, /* for host to guest data */
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644
index 0000000..54f5caa
--- /dev/null
+++ b/include/linux/wakeup_reason.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+#ifdef CONFIG_SUSPEND
+void log_irq_wakeup_reason(int irq);
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
+void log_suspend_abort_reason(const char *fmt, ...);
+void log_abnormal_wakeup_reason(const char *fmt, ...);
+void clear_wakeup_reasons(void);
+#else
+static inline void log_irq_wakeup_reason(int irq) { }
+static inline void log_threaded_irq_wakeup_reason(int irq, int parent_irq) { }
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+static inline void log_abnormal_wakeup_reason(const char *fmt, ...) { }
+static inline void clear_wakeup_reasons(void) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/net/TEST_MAPPING b/include/net/TEST_MAPPING
new file mode 100644
index 0000000..1eb8d43
--- /dev/null
+++ b/include/net/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+  "presubmit": [
+    {
+      "name": "CtsNetTestCases",
+      "options": [
+        {
+          "exclude-annotation": "com.android.testutils.SkipPresubmit"
+        }
+      ]
+    }
+  ]
+}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 78ea3e3..d8873a9 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -269,6 +269,18 @@
 void addrconf_prefix_rcv(struct net_device *dev,
 			 u8 *opt, int len, bool sllao);
 
+/* Determines into what table to put autoconf PIO/RIO/default routes
+ * learned on this device.
+ *
+ * - If 0, use the same table for every device. This puts routes into
+ *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+ *   (but note that these three are currently all equal to
+ *   RT6_TABLE_MAIN).
+ * - If > 0, use the specified table.
+ * - If < 0, put routes into table dev->ifindex + (-rt_table).
+ */
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *	anycast prototypes (anycast.c)
  */
diff --git a/include/net/virt_wifi.h b/include/net/virt_wifi.h
new file mode 100644
index 0000000..343e739
--- /dev/null
+++ b/include/net/virt_wifi.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* include/net/virt_wifi.h
+ *
+ * Define the extension interface for the network data simulation
+ *
+ * Copyright (C) 2019 Google, Inc.
+ *
+ * Author: lesl@google.com
+ */
+#ifndef __VIRT_WIFI_H
+#define __VIRT_WIFI_H
+
+struct virt_wifi_network_simulation {
+	void (*notify_device_open)(struct net_device *dev);
+	void (*notify_device_stop)(struct net_device *dev);
+	void (*notify_scan_trigger)(struct wiphy *wiphy,
+				    struct cfg80211_scan_request *request);
+	int (*generate_virt_scan_result)(struct wiphy *wiphy);
+};
+
+int virt_wifi_register_network_simulation(
+	    struct virt_wifi_network_simulation *ops);
+int virt_wifi_unregister_network_simulation(void);
+#endif
+
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2308210..b6289cf7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1577,9 +1577,7 @@
 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
-#endif
 
 void xfrm_local_error(struct sk_buff *skb, int mtu);
 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8e6dd8a..3c8323d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -239,6 +239,14 @@
 	.get = xhandler_get, .put = xhandler_put, \
 	.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
 					    xmax, xinvert) }
+#define SOC_SINGLE_MULTI_EXT(xname, xreg, xshift, xmax, xinvert, xcount,\
+	xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_multi_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control) \
+		{.reg = xreg, .shift = xshift, .rshift = xshift, .max = xmax, \
+		.count = xcount, .platform_max = xmax, .invert = xinvert} }
 #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
 	 xhandler_get, xhandler_put, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -575,6 +583,8 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo);
 
 /* SoC PCM stream information */
 struct snd_soc_pcm_stream {
@@ -1139,6 +1149,11 @@
 	unsigned int regbase, regcount, nbits, invert;
 };
 
+struct soc_multi_mixer_control {
+	int min, max, platform_max, count;
+	unsigned int reg, rreg, shift, rshift, invert;
+};
+
 /* enumerated kcontrol */
 struct soc_enum {
 	int reg;
diff --git a/include/trace/events/OWNERS b/include/trace/events/OWNERS
new file mode 100644
index 0000000..a63dbf4
--- /dev/null
+++ b/include/trace/events/OWNERS
@@ -0,0 +1 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
new file mode 100644
index 0000000..7edb6bc
--- /dev/null
+++ b/include/trace/events/android_fs.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	     TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
+#ifndef ANDROID_FSTRACE_GET_PATHNAME
+#define ANDROID_FSTRACE_GET_PATHNAME
+
+/* Sizes an on-stack array, so careful if sizing this up ! */
+#define MAX_TRACE_PATHBUF_LEN	256
+
+static inline char *
+android_fstrace_get_pathname(char *buf, int buflen, struct inode *inode)
+{
+	char *path;
+	struct dentry *d;
+
+	/*
+	 * d_obtain_alias() will either iput() if it locates an existing
+	 * dentry or transfer the reference to the new dentry created.
+	 * So get an extra reference here.
+	 */
+	ihold(inode);
+	d = d_obtain_alias(inode);
+	if (likely(!IS_ERR(d))) {
+		path = dentry_path_raw(d, buf, buflen);
+		if (unlikely(IS_ERR(path))) {
+			strcpy(buf, "ERROR");
+			path = buf;
+		}
+		dput(d);
+	} else {
+		strcpy(buf, "ERROR");
+		path = buf;
+	}
+	return path;
+}
+#endif
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
new file mode 100644
index 0000000..efc4878
--- /dev/null
+++ b/include/trace/events/android_fs_template.h
@@ -0,0 +1,64 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command),
+	TP_STRUCT__entry(
+		__string(pathbuf, pathname)
+		__field(loff_t,	offset)
+		__field(int,	bytes)
+		__field(loff_t,	i_size)
+		__string(cmdline, command)
+		__field(pid_t,	pid)
+		__field(ino_t,	ino)
+	),
+	TP_fast_assign(
+		{
+			/*
+			 * Replace the spaces in filenames and cmdlines
+			 * because this screws up the tooling that parses
+			 * the traces.
+			 */
+			__assign_str(pathbuf, pathname);
+			(void)strreplace(__get_str(pathbuf), ' ', '_');
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+			__entry->i_size		= i_size_read(inode);
+			__assign_str(cmdline, command);
+			(void)strreplace(__get_str(cmdline), ' ', '_');
+			__entry->pid		= pid;
+			__entry->ino		= inode->i_ino;
+		}
+	),
+	TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+		  " pid %d, i_size %llu, ino %lu",
+		  __get_str(pathbuf), __entry->offset, __entry->bytes,
+		  __get_str(cmdline), __entry->pid, __entry->i_size,
+		  (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes),
+	TP_STRUCT__entry(
+		__field(ino_t,	ino)
+		__field(loff_t,	offset)
+		__field(int,	bytes)
+	),
+	TP_fast_assign(
+		{
+			__entry->ino		= inode->i_ino;
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+		}
+	),
+	TP_printk("ino %lu, offset %llu, bytes %d",
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index eeceafa..bb70f46 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -160,6 +160,51 @@
 	TP_ARGS(vec_nr)
 );
 
+DECLARE_EVENT_CLASS(tasklet,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func),
+
+	TP_STRUCT__entry(
+		__field( void *,	func)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+	),
+
+	TP_printk("function=%ps", __entry->func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_entry,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_exit,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_hi_entry,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_hi_exit,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
 #endif /*  _TRACE_IRQ_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9464048..a059a62 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -485,6 +485,30 @@
 	     TP_ARGS(tsk, delay));
 
 /*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+	TP_PROTO(struct task_struct *tsk),
+
+	TP_ARGS(tsk),
+
+	TP_STRUCT__entry(
+		__field( pid_t,	pid	)
+		__field( void*, caller	)
+		__field( bool, io_wait	)
+	),
+
+	TP_fast_assign(
+		__entry->pid	= tsk->pid;
+		__entry->caller = (void *)get_wchan(tsk);
+		__entry->io_wait = tsk->in_iowait;
+	),
+
+	TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
diff --git a/include/trace/hooks/avc.h b/include/trace/hooks/avc.h
new file mode 100644
index 0000000..1ac5fa3
--- /dev/null
+++ b/include/trace/hooks/avc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM avc
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_AVC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_AVC_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct avc_node;
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_insert,
+	TP_PROTO(const struct avc_node *node),
+	TP_ARGS(node), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_node_delete,
+	TP_PROTO(const struct avc_node *node),
+	TP_ARGS(node), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_node_replace,
+	TP_PROTO(const struct avc_node *old, const struct avc_node *new),
+	TP_ARGS(old, new), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_lookup,
+	TP_PROTO(const struct avc_node *node, u32 ssid, u32 tsid, u16 tclass),
+	TP_ARGS(node, ssid, tsid, tclass), 1);
+
+#endif /* _TRACE_HOOK_AVC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/binder.h b/include/trace/hooks/binder.h
new file mode 100644
index 0000000..f52f1be
--- /dev/null
+++ b/include/trace/hooks/binder.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM binder
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_BINDER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_BINDER_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct binder_transaction;
+struct task_struct;
+DECLARE_HOOK(android_vh_binder_transaction_init,
+	TP_PROTO(struct binder_transaction *t),
+	TP_ARGS(t));
+DECLARE_HOOK(android_vh_binder_set_priority,
+	TP_PROTO(struct binder_transaction *t, struct task_struct *task),
+	TP_ARGS(t, task));
+DECLARE_HOOK(android_vh_binder_restore_priority,
+	TP_PROTO(struct binder_transaction *t, struct task_struct *task),
+	TP_ARGS(t, task));
+struct binder_proc;
+struct binder_thread;
+DECLARE_HOOK(android_vh_binder_wakeup_ilocked,
+	TP_PROTO(struct task_struct *task, bool sync, struct binder_proc *proc),
+	TP_ARGS(task, sync, proc));
+DECLARE_HOOK(android_vh_binder_wait_for_work,
+	TP_PROTO(bool do_proc_work, struct binder_thread *tsk, struct binder_proc *proc),
+	TP_ARGS(do_proc_work, tsk, proc));
+DECLARE_HOOK(android_vh_sync_txn_recvd,
+	TP_PROTO(struct task_struct *tsk, struct task_struct *from),
+	TP_ARGS(tsk, from));
+#endif /* _TRACE_HOOK_BINDER_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cgroup.h b/include/trace/hooks/cgroup.h
new file mode 100644
index 0000000..1c22b84
--- /dev/null
+++ b/include/trace/hooks/cgroup.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cgroup
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CGROUP_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+DECLARE_HOOK(android_vh_cgroup_set_task,
+	TP_PROTO(int ret, struct task_struct *task),
+	TP_ARGS(ret, task));
+
+struct cgroup_subsys;
+struct cgroup_taskset;
+DECLARE_HOOK(android_vh_cgroup_attach,
+	TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
+	TP_ARGS(ss, tset))
+DECLARE_RESTRICTED_HOOK(android_rvh_cgroup_force_kthread_migration,
+	TP_PROTO(struct task_struct *tsk, struct cgroup *dst_cgrp, bool *force_migration),
+	TP_ARGS(tsk, dst_cgrp, force_migration), 1);
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpufreq.h b/include/trace/hooks/cpufreq.h
new file mode 100644
index 0000000..4f164b7
--- /dev/null
+++ b/include/trace/hooks/cpufreq.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUFREQ_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_show_max_freq,
+	TP_PROTO(struct cpufreq_policy *policy, unsigned int *max_freq),
+	TP_ARGS(policy, max_freq), 1);
+
+DECLARE_HOOK(android_vh_freq_table_limits,
+	TP_PROTO(struct cpufreq_policy *policy, unsigned int min_freq,
+		 unsigned int max_freq),
+	TP_ARGS(policy, min_freq, max_freq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_cpufreq_transition,
+	TP_PROTO(struct cpufreq_policy *policy),
+	TP_ARGS(policy), 1);
+
+#endif /* _TRACE_HOOK_CPUFREQ_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpuidle.h b/include/trace/hooks/cpuidle.h
new file mode 100644
index 0000000..ef1cd61
--- /dev/null
+++ b/include/trace/hooks/cpuidle.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUIDLE_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct cpuidle_device;
+
+DECLARE_HOOK(android_vh_cpu_idle_enter,
+	TP_PROTO(int *state, struct cpuidle_device *dev),
+	TP_ARGS(state, dev))
+DECLARE_HOOK(android_vh_cpu_idle_exit,
+	TP_PROTO(int state, struct cpuidle_device *dev),
+	TP_ARGS(state, dev))
+
+#endif /* _TRACE_HOOK_CPUIDLE_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/hooks/cpuidle_psci.h b/include/trace/hooks/cpuidle_psci.h
new file mode 100644
index 0000000..94b01eb
--- /dev/null
+++ b/include/trace/hooks/cpuidle_psci.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle_psci
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_CPUIDLE_PSCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUIDLE_PSCI_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+
+struct cpuidle_device;
+DECLARE_HOOK(android_vh_cpuidle_psci_enter,
+	TP_PROTO(struct cpuidle_device *dev, bool s2idle),
+	TP_ARGS(dev, s2idle));
+
+DECLARE_HOOK(android_vh_cpuidle_psci_exit,
+	TP_PROTO(struct cpuidle_device *dev, bool s2idle),
+	TP_ARGS(dev, s2idle));
+
+#endif /* _TRACE_HOOK_CPUIDLE_PSCI_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/creds.h b/include/trace/hooks/creds.h
new file mode 100644
index 0000000..3bf62aa
--- /dev/null
+++ b/include/trace/hooks/creds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM creds
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_CREDS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CREDS_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct cred;
+struct task_struct;
+DECLARE_RESTRICTED_HOOK(android_rvh_commit_creds,
+	TP_PROTO(const struct task_struct *task, const struct cred *new),
+	TP_ARGS(task, new), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_exit_creds,
+	TP_PROTO(const struct task_struct *task, const struct cred *cred),
+	TP_ARGS(task, cred), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_override_creds,
+	TP_PROTO(const struct task_struct *task, const struct cred *new),
+	TP_ARGS(task, new), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_revert_creds,
+	TP_PROTO(const struct task_struct *task, const struct cred *old),
+	TP_ARGS(task, old), 1);
+
+#endif /* _TRACE_HOOK_CREDS_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/debug.h b/include/trace/hooks/debug.h
new file mode 100644
index 0000000..ac006d7
--- /dev/null
+++ b/include/trace/hooks/debug.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM debug
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_DEBUG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_DEBUG_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct pt_regs;
+
+DECLARE_HOOK(android_vh_ipi_stop,
+	TP_PROTO(struct pt_regs *regs),
+	TP_ARGS(regs))
+#else
+#define trace_android_vh_ipi_stop(regs)
+#define trace_android_vh_ipi_stop_rcuidle(regs)
+#endif
+
+#endif /* _TRACE_HOOK_DEBUG_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/dtask.h b/include/trace/hooks/dtask.h
new file mode 100644
index 0000000..9051b26
--- /dev/null
+++ b/include/trace/hooks/dtask.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dtask
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_DTASK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_DTASK_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct task_struct;
+DECLARE_HOOK(android_vh_sched_show_task,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task));
+
+#endif /* _TRACE_HOOK_DTASK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/epoch.h b/include/trace/hooks/epoch.h
new file mode 100644
index 0000000..13dd9a6
--- /dev/null
+++ b/include/trace/hooks/epoch.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM epoch
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_EPOCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_EPOCH_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_show_suspend_epoch_val,
+	TP_PROTO(u64 suspend_ns, u64 suspend_cycles),
+	TP_ARGS(suspend_ns, suspend_cycles));
+
+DECLARE_HOOK(android_vh_show_resume_epoch_val,
+	TP_PROTO(u64 resume_cycles),
+	TP_ARGS(resume_cycles));
+
+#endif /* _TRACE_HOOK_EPOCH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/fpsimd.h b/include/trace/hooks/fpsimd.h
new file mode 100644
index 0000000..3c107f8
--- /dev/null
+++ b/include/trace/hooks/fpsimd.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fpsimd
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_FPSIMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_FPSIMD_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+
+DECLARE_HOOK(android_vh_is_fpsimd_save,
+	TP_PROTO(struct task_struct *prev, struct task_struct *next),
+	TP_ARGS(prev, next))
+
+#endif /* _TRACE_HOOK_FPSIMD_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/ftrace_dump.h b/include/trace/hooks/ftrace_dump.h
new file mode 100644
index 0000000..ce33479
--- /dev/null
+++ b/include/trace/hooks/ftrace_dump.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ftrace_dump
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_FTRACE_DUMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_FTRACE_DUMP_H
+
+#include <linux/trace_seq.h>
+#include <linux/trace_events.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_ftrace_oops_enter,
+	TP_PROTO(bool *ftrace_check),
+	TP_ARGS(ftrace_check));
+
+DECLARE_HOOK(android_vh_ftrace_oops_exit,
+	TP_PROTO(bool *ftrace_check),
+	TP_ARGS(ftrace_check));
+
+DECLARE_HOOK(android_vh_ftrace_size_check,
+	TP_PROTO(unsigned long size, bool *ftrace_check),
+	TP_ARGS(size, ftrace_check));
+
+DECLARE_HOOK(android_vh_ftrace_format_check,
+	TP_PROTO(bool *ftrace_check),
+	TP_ARGS(ftrace_check));
+
+DECLARE_HOOK(android_vh_ftrace_dump_buffer,
+	TP_PROTO(struct trace_seq *trace_buf, bool *dump_printk),
+	TP_ARGS(trace_buf, dump_printk));
+
+#else
+
+#define trace_android_vh_ftrace_oops_enter(ftrace_check)
+#define trace_android_vh_ftrace_oops_exit(ftrace_check)
+#define trace_android_vh_ftrace_size_check(size, ftrace_check)
+#define trace_android_vh_ftrace_format_check(ftrace_check)
+#define trace_android_vh_ftrace_dump_buffer(trace_buf, dump_printk)
+
+#endif
+
+#endif /* _TRACE_HOOK_FTRACE_DUMP_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic.h b/include/trace/hooks/gic.h
new file mode 100644
index 0000000..e9d9fce
--- /dev/null
+++ b/include/trace/hooks/gic.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_GIC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_H
+
+#include <linux/irqdomain.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_gic_resume,
+	TP_PROTO(struct irq_domain *domain, void __iomem *dist_base),
+	TP_ARGS(domain, dist_base));
+
+#endif /* _TRACE_HOOK_GIC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic_v3.h b/include/trace/hooks/gic_v3.h
new file mode 100644
index 0000000..964fa80
--- /dev/null
+++ b/include/trace/hooks/gic_v3.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic_v3
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_GIC_V3_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_V3_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct irq_data;
+struct cpumask;
+DECLARE_HOOK(android_vh_gic_v3_affinity_init,
+	TP_PROTO(int irq, u32 offset, u64 *affinity),
+	TP_ARGS(irq, offset, affinity));
+DECLARE_HOOK(android_vh_gic_v3_set_affinity,
+	TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
+		 u64 *affinity),
+	TP_ARGS(d, mask_val, affinity));
+
+#endif /* _TRACE_HOOK_GIC_V3_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/iommu.h b/include/trace/hooks/iommu.h
new file mode 100644
index 0000000..b5f2e47
--- /dev/null
+++ b/include/trace/hooks/iommu.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iommu
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_IOMMU_H
+
+#include <linux/types.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_iommu_setup_dma_ops,
+	TP_PROTO(struct device *dev, u64 dma_base, u64 dma_limit),
+	TP_ARGS(dev, dma_base, dma_limit));
+
+struct iova_domain;
+
+DECLARE_HOOK(android_vh_iommu_iovad_alloc_iova,
+	TP_PROTO(struct device *dev, struct iova_domain *iovad, dma_addr_t iova, size_t size),
+	TP_ARGS(dev, iovad, iova, size));
+
+DECLARE_HOOK(android_vh_iommu_iovad_free_iova,
+	TP_PROTO(struct iova_domain *iovad, dma_addr_t iova, size_t size),
+	TP_ARGS(iovad, iova, size));
+
+#endif /* _TRACE_HOOK_IOMMU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/memory.h b/include/trace/hooks/memory.h
new file mode 100644
index 0000000..f59a26e
--- /dev/null
+++ b/include/trace/hooks/memory.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM memory
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_MEMORY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MEMORY_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+DECLARE_HOOK(android_vh_set_memory_nx,
+	TP_PROTO(unsigned long addr, int nr_pages),
+	TP_ARGS(addr, nr_pages));
+
+DECLARE_HOOK(android_vh_set_memory_rw,
+	TP_PROTO(unsigned long addr, int nr_pages),
+	TP_ARGS(addr, nr_pages));
+
+#endif /* _TRACE_HOOK_MEMORY_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h
new file mode 100644
index 0000000..22dcee0d
--- /dev/null
+++ b/include/trace/hooks/mm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mm
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MM_H
+
+#include <linux/types.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_gfp_zone_flags,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_readahead_gfp_mask,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+
+#endif /* _TRACE_HOOK_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/module.h b/include/trace/hooks/module.h
new file mode 100644
index 0000000..281cb0d
--- /dev/null
+++ b/include/trace/hooks/module.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM module
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MODULE_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct module;
+DECLARE_HOOK(android_vh_set_module_permit_before_init,
+	TP_PROTO(const struct module *mod),
+	TP_ARGS(mod));
+
+DECLARE_HOOK(android_vh_set_module_permit_after_init,
+	TP_PROTO(const struct module *mod),
+	TP_ARGS(mod));
+
+#endif /* _TRACE_HOOK_MODULE_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mpam.h b/include/trace/hooks/mpam.h
new file mode 100644
index 0000000..7d28bc8
--- /dev/null
+++ b/include/trace/hooks/mpam.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mpam
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_MPAM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MPAM_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct task_struct;
+DECLARE_HOOK(android_vh_mpam_set,
+	TP_PROTO(struct task_struct *prev, struct task_struct *next),
+	TP_ARGS(prev, next));
+
+#endif /* _TRACE_HOOK_MPAM_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/net.h b/include/trace/hooks/net.h
new file mode 100644
index 0000000..31e0f21
--- /dev/null
+++ b/include/trace/hooks/net.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_NET_VH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_NET_VH_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct packet_type;
+struct list_head;
+struct sk_buff;
+DECLARE_HOOK(android_vh_ptype_head,
+	TP_PROTO(const struct packet_type *pt, struct list_head *vendor_pt),
+	TP_ARGS(pt, vendor_pt));
+DECLARE_HOOK(android_vh_kfree_skb,
+	TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
+
+/* macro versions of hooks are no longer required */
+
+#endif /* _TRACE_HOOK_NET_VH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/pm_domain.h b/include/trace/hooks/pm_domain.h
new file mode 100644
index 0000000..659117e
--- /dev/null
+++ b/include/trace/hooks/pm_domain.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm_domain
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PM_DOMAIN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PM_DOMAIN_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct generic_pm_domain;
+DECLARE_HOOK(android_vh_allow_domain_state,
+	TP_PROTO(struct generic_pm_domain *genpd, uint32_t idx, bool *allow),
+	TP_ARGS(genpd, idx, allow))
+
+#endif /* _TRACE_HOOK_PM_DOMAIN_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/preemptirq.h b/include/trace/hooks/preemptirq.h
new file mode 100644
index 0000000..7a5d648
--- /dev/null
+++ b/include/trace/hooks/preemptirq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PREEMPTIRQ_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_preempt_disable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_preempt_enable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_irqs_disable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_irqs_enable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+#endif /* _TRACE_HOOK_PREEMPTIRQ_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/printk.h b/include/trace/hooks/printk.h
new file mode 100644
index 0000000..44599c9
--- /dev/null
+++ b/include/trace/hooks/printk.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PRINTK_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_printk_hotplug,
+	TP_PROTO(int *flag),
+	TP_ARGS(flag));
+
+#endif /* _TRACE_HOOK_PRINTK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h
new file mode 100644
index 0000000..a383eec
--- /dev/null
+++ b/include/trace/hooks/sched.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SCHED_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct task_struct;
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_rt,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_fallback_rq,
+	TP_PROTO(int cpu, struct task_struct *p, int *new_cpu),
+	TP_ARGS(cpu, p, new_cpu), 1);
+
+struct rq;
+DECLARE_HOOK(android_vh_scheduler_tick,
+	TP_PROTO(struct rq *rq),
+	TP_ARGS(rq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+	TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+	TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_can_migrate_task,
+	TP_PROTO(struct task_struct *p, int dst_cpu, int *can_migrate),
+	TP_ARGS(p, dst_cpu, can_migrate), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_lowest_rq,
+	TP_PROTO(struct task_struct *p, struct cpumask *local_cpu_mask,
+			int ret, int *lowest_cpu),
+	TP_ARGS(p, local_cpu_mask, ret, lowest_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_prepare_prio_fork,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_finish_prio_fork,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_rtmutex_prepare_setprio,
+	TP_PROTO(struct task_struct *p, struct task_struct *pi_task),
+	TP_ARGS(p, pi_task), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice,
+	TP_PROTO(struct task_struct *p, long *nice, bool *allowed),
+	TP_ARGS(p, nice, allowed), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+struct sched_group;
+DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_group,
+	TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
+		TP_ARGS(busiest, dst_rq, out_balance), 1);
+
+DECLARE_HOOK(android_vh_dump_throttled_rt_tasks,
+	TP_PROTO(int cpu, u64 clock, ktime_t rt_period, u64 rt_runtime,
+			s64 rt_period_timer_expires),
+	TP_ARGS(cpu, clock, rt_period, rt_runtime, rt_period_timer_expires));
+
+DECLARE_HOOK(android_vh_jiffies_update,
+	TP_PROTO(void *unused),
+	TP_ARGS(unused));
+
+struct rq_flags;
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_newidle_balance,
+	TP_PROTO(struct rq *this_rq, struct rq_flags *rf,
+		 int *pulled_task, int *done),
+	TP_ARGS(this_rq, rf, pulled_task, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_nohz_balancer_kick,
+	TP_PROTO(struct rq *rq, unsigned int *flags, int *done),
+	TP_ARGS(rq, flags, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_queue,
+	TP_PROTO(int dst_cpu, struct sched_group *group,
+		 struct cpumask *env_cpus, struct rq **busiest,
+		 int *done),
+	TP_ARGS(dst_cpu, group, env_cpus, busiest, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_migrate_queued_task,
+	TP_PROTO(struct rq *rq, struct rq_flags *rf,
+		 struct task_struct *p, int new_cpu,
+		 int *detached),
+	TP_ARGS(rq, rf, p, new_cpu, detached), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_energy_efficient_cpu,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sync, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sync, new_cpu), 1);
+struct sched_attr;
+DECLARE_HOOK(android_vh_set_sugov_sched_attr,
+	TP_PROTO(struct sched_attr *attr),
+	TP_ARGS(attr));
+DECLARE_RESTRICTED_HOOK(android_rvh_set_iowait,
+	TP_PROTO(struct task_struct *p, struct rq *rq, int *should_iowait_boost),
+	TP_ARGS(p, rq, should_iowait_boost), 1);
+struct sugov_policy;
+DECLARE_RESTRICTED_HOOK(android_rvh_set_sugov_update,
+	TP_PROTO(struct sugov_policy *sg_policy, unsigned int next_freq, bool *should_update),
+	TP_ARGS(sg_policy, next_freq, should_update), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_setaffinity,
+	TP_PROTO(struct task_struct *p, const struct cpumask *in_mask, int *retval),
+	TP_ARGS(p, in_mask, retval), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
+	TP_PROTO(struct task_struct *p, cpumask_var_t cpus_requested,
+		 const struct cpumask *new_mask, int *ret),
+	TP_ARGS(p, cpus_requested, new_mask, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_task_cpu,
+	TP_PROTO(struct task_struct *p, unsigned int new_cpu),
+	TP_ARGS(p, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up_success,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_wake_up_new_task,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_new_task_stats,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_flush_task,
+	TP_PROTO(struct task_struct *prev),
+	TP_ARGS(prev), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
+	TP_PROTO(struct rq *rq),
+	TP_ARGS(rq), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
+	TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
+	TP_ARGS(prev, next, rq), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
+	TP_PROTO(int cpu),
+	TP_ARGS(cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_dying,
+	TP_PROTO(int cpu),
+	TP_ARGS(cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
+	TP_PROTO(struct task_struct *curr, int cpu, s64 delta),
+	TP_ARGS(curr, cpu, delta), 1);
+
+struct sched_entity;
+DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
+	TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 *vruntime),
+	TP_ARGS(cfs_rq, se, initial, vruntime), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_build_perf_domains,
+	TP_PROTO(bool *eas_check),
+	TP_ARGS(eas_check), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_cpu_capacity,
+	TP_PROTO(int cpu, unsigned long *capacity),
+	TP_ARGS(cpu, capacity), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
+	TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
+	TP_ARGS(p, rq, need_update), 1);
+
+struct cgroup_taskset;
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
+	TP_PROTO(struct cgroup_taskset *tset),
+	TP_ARGS(tset), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_can_attach,
+	TP_PROTO(struct cgroup_taskset *tset, int *retval),
+	TP_ARGS(tset, retval), 1);
+
+struct cgroup_subsys_state;
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_online,
+	TP_PROTO(struct cgroup_subsys_state *css),
+	TP_ARGS(css), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_ttwu_cond,
+	TP_PROTO(bool *cond),
+	TP_ARGS(cond), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_schedule_bug,
+	TP_PROTO(void *unused),
+	TP_ARGS(unused), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_exec,
+	TP_PROTO(bool *cond),
+	TP_ARGS(cond), 1);
+
+DECLARE_HOOK(android_vh_map_util_freq,
+	TP_PROTO(unsigned long util, unsigned long freq,
+		unsigned long cap, unsigned long *next_freq),
+	TP_ARGS(util, freq, cap, next_freq));
+
+struct em_perf_domain;
+DECLARE_HOOK(android_vh_em_cpu_energy,
+	TP_PROTO(struct em_perf_domain *pd,
+		unsigned long max_util, unsigned long sum_util,
+		unsigned long *energy),
+	TP_ARGS(pd, max_util, sum_util, energy));
+
+DECLARE_HOOK(android_vh_build_sched_domains,
+	TP_PROTO(bool has_asym),
+	TP_ARGS(has_asym));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_tick,
+	TP_PROTO(struct task_struct *p, unsigned long *ideal_runtime, bool *skip_preempt,
+			unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr,
+			unsigned int granularity),
+	TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, curr, granularity), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_wakeup_ignore,
+	TP_PROTO(struct task_struct *p, bool *ignore),
+	TP_ARGS(p, ignore), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_replace_next_task_fair,
+	TP_PROTO(struct rq *rq, struct task_struct **p, struct sched_entity **se, bool *repick,
+			bool simple, struct task_struct *prev),
+	TP_ARGS(rq, p, se, repick, simple, prev), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_balance_rt,
+	TP_PROTO(struct rq *rq, struct task_struct *p, int *done),
+	TP_ARGS(rq, p, done), 1);
+
+struct cfs_rq;
+DECLARE_RESTRICTED_HOOK(android_rvh_pick_next_entity,
+	TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+		 struct sched_entity **se),
+	TP_ARGS(cfs_rq, curr, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_wakeup,
+	TP_PROTO(struct rq *rq, struct task_struct *p, bool *preempt, bool *nopreempt,
+			int wake_flags, struct sched_entity *se, struct sched_entity *pse,
+			int next_buddy_marked, unsigned int granularity),
+	TP_ARGS(rq, p, preempt, nopreempt, wake_flags, se, pse, next_buddy_marked,
+			granularity), 1);
+
+DECLARE_HOOK(android_vh_free_task,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_after_enqueue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p),
+	TP_ARGS(rq, p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_after_dequeue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p),
+	TP_ARGS(rq, p), 1);
+
+struct cfs_rq;
+struct sched_entity;
+struct rq_flags;
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_entity,
+	TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
+	TP_ARGS(cfs, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_entity,
+	TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
+	TP_ARGS(cfs, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_entity_tick,
+	TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
+	TP_ARGS(cfs_rq, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task_fair,
+	TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+	TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_task_fair,
+	TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+	TP_ARGS(rq, p, flags), 1);
+
+#endif /* _TRACE_HOOK_SCHED_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/selinux.h b/include/trace/hooks/selinux.h
new file mode 100644
index 0000000..fb0e8a5
--- /dev/null
+++ b/include/trace/hooks/selinux.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM selinux
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SELINUX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SELINUX_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct selinux_state;
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_is_initialized,
+	TP_PROTO(const struct selinux_state *state),
+	TP_ARGS(state), 1);
+
+#endif /* _TRACE_HOOK_SELINUX_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sys.h b/include/trace/hooks/sys.h
new file mode 100644
index 0000000..9e5d7a5
--- /dev/null
+++ b/include/trace/hooks/sys.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sys
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SYS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYS_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+DECLARE_HOOK(android_vh_syscall_prctl_finished,
+	TP_PROTO(int option, struct task_struct *task),
+	TP_ARGS(option, task));
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/syscall_check.h b/include/trace/hooks/syscall_check.h
new file mode 100644
index 0000000..d39802a
--- /dev/null
+++ b/include/trace/hooks/syscall_check.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscall_check
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SYSCALL_CHECK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYSCALL_CHECK_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct file;
+union bpf_attr;
+DECLARE_HOOK(android_vh_check_mmap_file,
+	TP_PROTO(const struct file *file, unsigned long prot,
+		unsigned long flag, unsigned long ret),
+	TP_ARGS(file, prot, flag, ret));
+
+DECLARE_HOOK(android_vh_check_file_open,
+	TP_PROTO(const struct file *file),
+	TP_ARGS(file));
+
+DECLARE_HOOK(android_vh_check_bpf_syscall,
+	TP_PROTO(int cmd, const union bpf_attr *attr, unsigned int size),
+	TP_ARGS(cmd, attr, size));
+
+#endif /* _TRACE_HOOK_SYSCALL_CHECK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sysrqcrash.h b/include/trace/hooks/sysrqcrash.h
new file mode 100644
index 0000000..e109d21
--- /dev/null
+++ b/include/trace/hooks/sysrqcrash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sysrqcrash
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_SYSRQCRASH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYSRQCRASH_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+DECLARE_HOOK(android_vh_sysrq_crash,
+	TP_PROTO(void *data),
+	TP_ARGS(data));
+
+#endif /* _TRACE_HOOK_SYSRQCRASH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/timer.h b/include/trace/hooks/timer.h
new file mode 100644
index 0000000..174d958
--- /dev/null
+++ b/include/trace/hooks/timer.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_TIMER_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_timer_calc_index,
+	TP_PROTO(unsigned int lvl, unsigned long *expires),
+	TP_ARGS(lvl, expires));
+
+#endif /* _TRACE_HOOK_TIMER_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/topology.h b/include/trace/hooks/topology.h
new file mode 100644
index 0000000..521e07e
--- /dev/null
+++ b/include/trace/hooks/topology.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM topology
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_TOPOLOGY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_TOPOLOGY_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+#include <linux/cpumask.h>
+
+DECLARE_HOOK(android_vh_arch_set_freq_scale,
+	TP_PROTO(const struct cpumask *cpus, unsigned long freq,
+		 unsigned long max, unsigned long *scale),
+	TP_ARGS(cpus, freq, max, scale));
+
+DECLARE_HOOK(android_vh_update_topology_flags_workfn,
+	TP_PROTO(void *unused),
+	TP_ARGS(unused));
+
+#endif /* _TRACE_HOOK_TOPOLOGY_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/ufshcd.h b/include/trace/hooks/ufshcd.h
new file mode 100644
index 0000000..fb47084
--- /dev/null
+++ b/include/trace/hooks/ufshcd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufshcd
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_UFSHCD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_UFSHCD_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct ufs_hba;
+struct request;
+struct ufshcd_lrb;
+
+DECLARE_HOOK(android_vh_ufs_fill_prdt,
+	TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+		 unsigned int segments, int *err),
+	TP_ARGS(hba, lrbp, segments, err));
+
+DECLARE_HOOK(android_vh_ufs_prepare_command,
+	TP_PROTO(struct ufs_hba *hba, struct request *rq,
+		 struct ufshcd_lrb *lrbp, int *err),
+	TP_ARGS(hba, rq, lrbp, err));
+
+DECLARE_HOOK(android_vh_ufs_update_sysfs,
+	TP_PROTO(struct ufs_hba *hba),
+	TP_ARGS(hba));
+
+DECLARE_HOOK(android_vh_ufs_send_command,
+	TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp),
+	TP_ARGS(hba, lrbp));
+
+DECLARE_HOOK(android_vh_ufs_compl_command,
+	TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp),
+	TP_ARGS(hba, lrbp));
+
+struct uic_command;
+DECLARE_HOOK(android_vh_ufs_send_uic_command,
+	TP_PROTO(struct ufs_hba *hba, struct uic_command *ucmd, int str_t),
+	TP_ARGS(hba, ucmd, str_t));
+
+DECLARE_HOOK(android_vh_ufs_send_tm_command,
+	TP_PROTO(struct ufs_hba *hba, int tag, int str_t),
+	TP_ARGS(hba, tag, str_t));
+
+DECLARE_HOOK(android_vh_ufs_check_int_errors,
+	TP_PROTO(struct ufs_hba *hba, bool queue_eh_work),
+	TP_ARGS(hba, queue_eh_work));
+
+#endif /* _TRACE_HOOK_UFSHCD_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/vendor_hooks.h b/include/trace/hooks/vendor_hooks.h
new file mode 100644
index 0000000..0efbc46
--- /dev/null
+++ b/include/trace/hooks/vendor_hooks.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Note: we intentionally omit include file ifdef protection
+ *  This is due to the way trace events work. If a file includes two
+ *  trace event headers under one "CREATE_TRACE_POINTS" the first include
+ *  will override the DECLARE_RESTRICTED_HOOK and break the second include.
+ */
+
+#include <linux/tracepoint.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+#define DECLARE_HOOK DECLARE_TRACE
+
+int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data);
+
+#ifdef TRACE_HEADER_MULTI_READ
+
+#define DEFINE_HOOK_FN(_name, _reg, _unreg, proto, args)		\
+	static const char __tpstrtab_##_name[]				\
+	__section("__tracepoints_strings") = #_name;			\
+	extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name);	\
+	int __traceiter_##_name(void *__data, proto);			\
+	struct tracepoint __tracepoint_##_name	__used			\
+	__section("__tracepoints") = {					\
+		.name = __tpstrtab_##_name,				\
+		.key = STATIC_KEY_INIT_FALSE,				\
+		.static_call_key = &STATIC_CALL_KEY(tp_func_##_name),	\
+		.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
+		.iterator = &__traceiter_##_name,			\
+		.regfunc = _reg,					\
+		.unregfunc = _unreg,					\
+		.funcs = NULL };					\
+	__TRACEPOINT_ENTRY(_name);					\
+	int __traceiter_##_name(void *__data, proto)			\
+	{								\
+		struct tracepoint_func *it_func_ptr;			\
+		void *it_func;						\
+									\
+		it_func_ptr = (&__tracepoint_##_name)->funcs;		\
+		it_func = (it_func_ptr)->func;				\
+		do {							\
+			__data = (it_func_ptr)->data;			\
+			((void(*)(void *, proto))(it_func))(__data, args); \
+			it_func = READ_ONCE((++it_func_ptr)->func);	\
+		} while (it_func);					\
+		return 0;						\
+	}								\
+	DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
+
+#undef DECLARE_RESTRICTED_HOOK
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
+	DEFINE_HOOK_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args))
+
+/* prevent additional recursion */
+#undef TRACE_HEADER_MULTI_READ
+#else /* TRACE_HEADER_MULTI_READ */
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+#define __DO_RESTRICTED_HOOK_CALL(name, args)					\
+	do {								\
+		struct tracepoint_func *it_func_ptr;			\
+		void *__data;						\
+		it_func_ptr = (&__tracepoint_##name)->funcs;		\
+		if (it_func_ptr) {					\
+			__data = (it_func_ptr)->data;			\
+			static_call(tp_func_##name)(__data, args);	\
+		}							\
+	} while (0)
+#else
+#define __DO_RESTRICTED_HOOK_CALL(name, args)	__traceiter_##name(NULL, args)
+#endif
+
+#define DO_RESTRICTED_HOOK(name, args, cond)					\
+	do {								\
+		if (!(cond))						\
+			return;						\
+									\
+		__DO_RESTRICTED_HOOK_CALL(name, TP_ARGS(args));		\
+	} while (0)
+
+#define __DECLARE_RESTRICTED_HOOK(name, proto, args, cond, data_proto)	\
+	extern int __traceiter_##name(data_proto);			\
+	DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name);	\
+	extern struct tracepoint __tracepoint_##name;			\
+	static inline void trace_##name(proto)				\
+	{								\
+		if (static_key_false(&__tracepoint_##name.key))		\
+			DO_RESTRICTED_HOOK(name,			\
+					   TP_ARGS(args),		\
+					   TP_CONDITION(cond));		\
+	}								\
+	static inline bool						\
+	trace_##name##_enabled(void)					\
+	{								\
+		return static_key_false(&__tracepoint_##name.key);	\
+	}								\
+	static inline int						\
+	register_trace_##name(void (*probe)(data_proto), void *data) 	\
+	{								\
+		return android_rvh_probe_register(&__tracepoint_##name,	\
+						  (void *)probe, data);	\
+	}								\
+	/* vendor hooks cannot be unregistered */			\
+
+#undef DECLARE_RESTRICTED_HOOK
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond)		\
+	__DECLARE_RESTRICTED_HOOK(name, PARAMS(proto), PARAMS(args),	\
+			cond,						\
+			PARAMS(void *__data, proto))
+
+#endif /* TRACE_HEADER_MULTI_READ */
+
+#else /* !CONFIG_TRACEPOINTS || !CONFIG_ANDROID_VENDOR_HOOKS */
+/* suppress trace hooks */
+#define DECLARE_HOOK DECLARE_EVENT_NOP
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond)		\
+	DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
+#endif
diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h
new file mode 100644
index 0000000..0e815b8
--- /dev/null
+++ b/include/trace/hooks/vmscan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vmscan
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_VMSCAN_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim,
+			TP_PROTO(bool *balance_anon_file_reclaim),
+			TP_ARGS(balance_anon_file_reclaim), 1);
+#endif /* _TRACE_HOOK_VMSCAN_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/wqlockup.h b/include/trace/hooks/wqlockup.h
new file mode 100644
index 0000000..3cb33cd
--- /dev/null
+++ b/include/trace/hooks/wqlockup.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wqlockup
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_WQLOCKUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_WQLOCKUP_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+DECLARE_HOOK(android_vh_wq_lockup_pool,
+	TP_PROTO(int cpu, unsigned long pool_ts),
+	TP_ARGS(cpu, pool_ts));
+
+#endif /* _TRACE_HOOK_WQLOCKUP_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/OWNERS b/include/uapi/linux/OWNERS
new file mode 100644
index 0000000..8aed640
--- /dev/null
+++ b/include/uapi/linux/OWNERS
@@ -0,0 +1,3 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
+per-file fuse**=file:/fs/fuse/OWNERS
+per-file net**=file:/net/OWNERS
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 3246f2c..2d3f015 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -38,11 +38,59 @@
 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+	/**
+	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+	 *
+	 * These bits can be used to set the minimum scheduler priority
+	 * at which transactions into this node should run. Valid values
+	 * in these bits depend on the scheduler policy encoded in
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+	 *
+	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+	 */
 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	/**
+	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+	 */
 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
 
 	/**
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+	 *
+	 * These two bits can be used to set the min scheduling policy at which
+	 * transactions on this node should run. These match the UAPI
+	 * scheduler policy values, eg:
+	 * 00b: SCHED_NORMAL
+	 * 01b: SCHED_FIFO
+	 * 10b: SCHED_RR
+	 * 11b: SCHED_BATCH
+	 */
+	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+	/**
+	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+	 *
+	 * Only when set, calls into this node will inherit a real-time
+	 * scheduling policy from the caller (for synchronous transactions).
+	 */
+	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+	/**
 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
 	 *
 	 * Only when set, causes senders to include their security
diff --git a/include/uapi/linux/dm-user.h b/include/uapi/linux/dm-user.h
new file mode 100644
index 0000000..6d8f535b
--- /dev/null
+++ b/include/uapi/linux/dm-user.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020 Google, Inc
+ * Copyright (C) 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+ */
+
+#ifndef _LINUX_DM_USER_H
+#define _LINUX_DM_USER_H
+
+#include <linux/types.h>
+
+/*
+ * dm-user proxies device mapper ops between the kernel and userspace.  It's
+ * essentially just an RPC mechanism: all kernel calls create a request,
+ * userspace handles that with a response.  Userspace obtains requests via
+ * read() and provides responses via write().
+ *
+ * See Documentation/block/dm-user.rst for more information.
+ */
+
+#define DM_USER_REQ_MAP_READ 0
+#define DM_USER_REQ_MAP_WRITE 1
+#define DM_USER_REQ_MAP_FLUSH 2
+#define DM_USER_REQ_MAP_DISCARD 3
+#define DM_USER_REQ_MAP_SECURE_ERASE 4
+#define DM_USER_REQ_MAP_WRITE_SAME 5
+#define DM_USER_REQ_MAP_WRITE_ZEROES 6
+#define DM_USER_REQ_MAP_ZONE_OPEN 7
+#define DM_USER_REQ_MAP_ZONE_CLOSE 8
+#define DM_USER_REQ_MAP_ZONE_FINISH 9
+#define DM_USER_REQ_MAP_ZONE_APPEND 10
+#define DM_USER_REQ_MAP_ZONE_RESET 11
+#define DM_USER_REQ_MAP_ZONE_RESET_ALL 12
+
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DEV 0x00001
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_TRANSPORT 0x00002
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DRIVER 0x00004
+#define DM_USER_REQ_MAP_FLAG_SYNC 0x00008
+#define DM_USER_REQ_MAP_FLAG_META 0x00010
+#define DM_USER_REQ_MAP_FLAG_PRIO 0x00020
+#define DM_USER_REQ_MAP_FLAG_NOMERGE 0x00040
+#define DM_USER_REQ_MAP_FLAG_IDLE 0x00080
+#define DM_USER_REQ_MAP_FLAG_INTEGRITY 0x00100
+#define DM_USER_REQ_MAP_FLAG_FUA 0x00200
+#define DM_USER_REQ_MAP_FLAG_PREFLUSH 0x00400
+#define DM_USER_REQ_MAP_FLAG_RAHEAD 0x00800
+#define DM_USER_REQ_MAP_FLAG_BACKGROUND 0x01000
+#define DM_USER_REQ_MAP_FLAG_NOWAIT 0x02000
+#define DM_USER_REQ_MAP_FLAG_CGROUP_PUNT 0x04000
+#define DM_USER_REQ_MAP_FLAG_NOUNMAP 0x08000
+#define DM_USER_REQ_MAP_FLAG_HIPRI 0x10000
+#define DM_USER_REQ_MAP_FLAG_DRV 0x20000
+#define DM_USER_REQ_MAP_FLAG_SWAP 0x40000
+
+#define DM_USER_RESP_SUCCESS 0
+#define DM_USER_RESP_ERROR 1
+#define DM_USER_RESP_UNSUPPORTED 2
+
+struct dm_user_message {
+	__u64 seq;
+	__u64 type;
+	__u64 flags;
+	__u64 sector;
+	__u64 len;
+	__u8 buf[];
+};
+
+#endif
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 9f4428b..fceafb5 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -124,7 +124,10 @@
 	struct fscrypt_key_specifier key_spec;
 	__u32 raw_size;
 	__u32 key_id;
-	__u32 __reserved[8];
+	__u32 __reserved[7];
+	/* N.B.: "temporary" flag, not reserved upstream */
+#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED		0x00000001
+	__u32 __flags;
 	__u8 raw[];
 };
 
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index a1dc3ee..574cbae 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -372,6 +372,7 @@
 #define FUSE_SUBMOUNTS		(1 << 27)
 #define FUSE_HANDLE_KILLPRIV_V2	(1 << 28)
 #define FUSE_SETXATTR_EXT	(1 << 29)
+#define FUSE_PASSTHROUGH	(1 << 31)
 
 /**
  * CUSE INIT request/reply flags
@@ -518,6 +519,7 @@
 	FUSE_SETUPMAPPING	= 48,
 	FUSE_REMOVEMAPPING	= 49,
 	FUSE_SYNCFS		= 50,
+	FUSE_CANONICAL_PATH	= 2016,
 
 	/* CUSE specific operations */
 	CUSE_INIT		= 4096,
@@ -644,7 +646,7 @@
 struct fuse_open_out {
 	uint64_t	fh;
 	uint32_t	open_flags;
-	uint32_t	padding;
+	uint32_t	passthrough_fh;
 };
 
 struct fuse_release_in {
@@ -928,6 +930,9 @@
 /* Device ioctls: */
 #define FUSE_DEV_IOC_MAGIC		229
 #define FUSE_DEV_IOC_CLONE		_IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
+/* 127 is reserved for the V1 interface implementation in Android (deprecated) */
+/* 126 is reserved for the V2 interface implementation in Android */
+#define FUSE_DEV_IOC_PASSTHROUGH_OPEN	_IOW(FUSE_DEV_IOC_MAGIC, 126, __u32)
 
 struct fuse_lseek_in {
 	uint64_t	fh;
diff --git a/include/uapi/linux/incrementalfs.h b/include/uapi/linux/incrementalfs.h
new file mode 100644
index 0000000..f8338af
--- /dev/null
+++ b/include/uapi/linux/incrementalfs.h
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for Incremental FS.
+ *
+ * Incremental FS is special-purpose Linux virtual file system that allows
+ * execution of a program while its binary and resource files are still being
+ * lazily downloaded over the network, USB etc.
+ *
+ * Copyright 2019 Google LLC
+ */
+#ifndef _UAPI_LINUX_INCREMENTALFS_H
+#define _UAPI_LINUX_INCREMENTALFS_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/xattr.h>
+
+/* ===== constants ===== */
+#define INCFS_NAME "incremental-fs"
+
+/*
+ * Magic number used in file header and in memory superblock
+ * Note that it is a 5 byte unsigned long. Thus on 32 bit kernels, it is
+ * truncated to a 4 byte number
+ */
+#define INCFS_MAGIC_NUMBER (0x5346434e49ul & ULONG_MAX)
+
+#define INCFS_DATA_FILE_BLOCK_SIZE 4096
+#define INCFS_HEADER_VER 1
+
+/* TODO: This value is assumed in incfs_copy_signature_info_from_user to be the
+ * actual signature length. Set back to 64 when fixed.
+ */
+#define INCFS_MAX_HASH_SIZE 32
+#define INCFS_MAX_FILE_ATTR_SIZE 512
+
+#define INCFS_INDEX_NAME ".index"
+#define INCFS_INCOMPLETE_NAME ".incomplete"
+#define INCFS_PENDING_READS_FILENAME ".pending_reads"
+#define INCFS_LOG_FILENAME ".log"
+#define INCFS_BLOCKS_WRITTEN_FILENAME ".blocks_written"
+#define INCFS_XATTR_ID_NAME (XATTR_USER_PREFIX "incfs.id")
+#define INCFS_XATTR_SIZE_NAME (XATTR_USER_PREFIX "incfs.size")
+#define INCFS_XATTR_METADATA_NAME (XATTR_USER_PREFIX "incfs.metadata")
+#define INCFS_XATTR_VERITY_NAME (XATTR_USER_PREFIX "incfs.verity")
+
+#define INCFS_MAX_SIGNATURE_SIZE 8096
+#define INCFS_SIGNATURE_VERSION 2
+#define INCFS_SIGNATURE_SECTIONS 2
+
+#define INCFS_IOCTL_BASE_CODE 'g'
+
+/* ===== ioctl requests on the command dir ===== */
+
+/*
+ * Create a new file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_FILE \
+	_IOWR(INCFS_IOCTL_BASE_CODE, 30, struct incfs_new_file_args)
+
+/* Read file signature */
+#define INCFS_IOC_READ_FILE_SIGNATURE                                          \
+	_IOR(INCFS_IOCTL_BASE_CODE, 31, struct incfs_get_file_sig_args)
+
+/*
+ * Fill in one or more data block. This may only be called on a handle
+ * passed as a parameter to INCFS_IOC_PERMIT_FILLING
+ *
+ * Returns number of blocks filled in, or error if none were
+ */
+#define INCFS_IOC_FILL_BLOCKS                                                  \
+	_IOR(INCFS_IOCTL_BASE_CODE, 32, struct incfs_fill_blocks)
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Returns 0 on success or error
+ */
+#define INCFS_IOC_PERMIT_FILL                                                  \
+	_IOW(INCFS_IOCTL_BASE_CODE, 33, struct incfs_permit_fill)
+
+/*
+ * Fills buffer with ranges of populated blocks
+ *
+ * Returns 0 if all ranges written
+ *	   error otherwise
+ *
+ *	   Either way, range_buffer_size_out is set to the number
+ *	   of bytes written. Should be set to 0 by caller. The ranges
+ *	   filled are valid, but if an error was returned there might
+ *	   be more ranges to come.
+ *
+ *	   Ranges are ranges of filled blocks:
+ *
+ *	   1 2 7 9
+ *
+ *	   means blocks 1, 2, 7, 8, 9 are filled, 0, 3, 4, 5, 6 and 10 on
+ *	   are not
+ *
+ *	   If hashing is enabled for the file, the hash blocks are simply
+ *	   treated as though they immediately followed the data blocks.
+ */
+#define INCFS_IOC_GET_FILLED_BLOCKS                                            \
+	_IOR(INCFS_IOCTL_BASE_CODE, 34, struct incfs_get_filled_blocks_args)
+
+/*
+ * Creates a new mapped file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_MAPPED_FILE \
+	_IOWR(INCFS_IOCTL_BASE_CODE, 35, struct incfs_create_mapped_file_args)
+
+/*
+ * Get number of blocks, total and filled
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_BLOCK_COUNT \
+	_IOR(INCFS_IOCTL_BASE_CODE, 36, struct incfs_get_block_count_args)
+
+/*
+ * Get per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_READ_TIMEOUTS \
+	_IOR(INCFS_IOCTL_BASE_CODE, 37, struct incfs_get_read_timeouts_args)
+
+/*
+ * Set per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_SET_READ_TIMEOUTS \
+	_IOW(INCFS_IOCTL_BASE_CODE, 38, struct incfs_set_read_timeouts_args)
+
+/*
+ * Get last read error
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_LAST_READ_ERROR \
+	_IOW(INCFS_IOCTL_BASE_CODE, 39, struct incfs_get_last_read_error_args)
+
+/* ===== sysfs feature flags ===== */
+/*
+ * Each flag is represented by a file in /sys/fs/incremental-fs/features
+ * If the file exists the feature is supported
+ * Also the file contents will be the line "supported"
+ */
+
+/*
+ * Basic flag stating that the core incfs file system is available
+ */
+#define INCFS_FEATURE_FLAG_COREFS "corefs"
+
+/*
+ * zstd compression support
+ */
+#define INCFS_FEATURE_FLAG_ZSTD "zstd"
+
+/*
+ * v2 feature set support. Covers:
+ *   INCFS_IOC_CREATE_MAPPED_FILE
+ *   INCFS_IOC_GET_BLOCK_COUNT
+ *   INCFS_IOC_GET_READ_TIMEOUTS/INCFS_IOC_SET_READ_TIMEOUTS
+ *   .blocks_written status file
+ *   .incomplete folder
+ *   report_uid mount option
+ */
+#define INCFS_FEATURE_FLAG_V2 "v2"
+
+enum incfs_compression_alg {
+	COMPRESSION_NONE = 0,
+	COMPRESSION_LZ4 = 1,
+	COMPRESSION_ZSTD = 2,
+};
+
+enum incfs_block_flags {
+	INCFS_BLOCK_FLAGS_NONE = 0,
+	INCFS_BLOCK_FLAGS_HASH = 1,
+};
+
+typedef struct {
+	__u8 bytes[16];
+} incfs_uuid_t __attribute__((aligned (8)));
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * Reads from .pending_reads and .log return an array of these structure
+ */
+struct incfs_pending_read_info {
+	/* Id of a file that is being read from. */
+	incfs_uuid_t file_id;
+
+	/* A number of microseconds since system boot to the read. */
+	__aligned_u64 timestamp_us;
+
+	/* Index of a file block that is being read. */
+	__u32 block_index;
+
+	/* A serial number of this pending read. */
+	__u32 serial_number;
+};
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * This version of incfs_pending_read_info is used whenever the file system is
+ * mounted with the report_uid flag
+ */
+struct incfs_pending_read_info2 {
+	/* Id of a file that is being read from. */
+	incfs_uuid_t file_id;
+
+	/* A number of microseconds since system boot to the read. */
+	__aligned_u64 timestamp_us;
+
+	/* Index of a file block that is being read. */
+	__u32 block_index;
+
+	/* A serial number of this pending read. */
+	__u32 serial_number;
+
+	/* The UID of the reading process */
+	__u32 uid;
+
+	__u32 reserved;
+};
+
+/*
+ * Description of a data or hash block to add to a data file.
+ */
+struct incfs_fill_block {
+	/* Index of a data block. */
+	__u32 block_index;
+
+	/* Length of data */
+	__u32 data_len;
+
+	/*
+	 * A pointer to an actual data for the block.
+	 *
+	 * Equivalent to: __u8 *data;
+	 */
+	__aligned_u64 data;
+
+	/*
+	 * Compression algorithm used to compress the data block.
+	 * Values from enum incfs_compression_alg.
+	 */
+	__u8 compression;
+
+	/* Values from enum incfs_block_flags */
+	__u8 flags;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	__aligned_u64 reserved3;
+};
+
+/*
+ * Description of a number of blocks to add to a data file
+ *
+ * Argument for INCFS_IOC_FILL_BLOCKS
+ */
+struct incfs_fill_blocks {
+	/* Number of blocks */
+	__u64 count;
+
+	/* A pointer to an array of incfs_fill_block structs */
+	__aligned_u64 fill_blocks;
+};
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Argument for INCFS_IOC_PERMIT_FILL
+ */
+struct incfs_permit_fill {
+	/* File to permit fills on */
+	__u32 file_descriptor;
+};
+
+enum incfs_hash_tree_algorithm {
+	INCFS_HASH_TREE_NONE = 0,
+	INCFS_HASH_TREE_SHA256 = 1
+};
+
+/*
+ * Create a new file or directory.
+ */
+struct incfs_new_file_args {
+	/* Id of a file to create. */
+	incfs_uuid_t file_id;
+
+	/*
+	 * Total size of the new file. Ignored if S_ISDIR(mode).
+	 */
+	__aligned_u64 size;
+
+	/*
+	 * File mode. Permissions and dir flag.
+	 */
+	__u16 mode;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	/*
+	 * A pointer to a null-terminated relative path to the file's parent
+	 * dir.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *directory_path;
+	 */
+	__aligned_u64 directory_path;
+
+	/*
+	 * A pointer to a null-terminated file's name.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *file_name;
+	 */
+	__aligned_u64 file_name;
+
+	/*
+	 * A pointer to a file attribute to be set on creation.
+	 *
+	 * Equivalent to: u8 *file_attr;
+	 */
+	__aligned_u64 file_attr;
+
+	/*
+	 * Length of the data buffer specfied by file_attr.
+	 * Max value: INCFS_MAX_FILE_ATTR_SIZE
+	 */
+	__u32 file_attr_len;
+
+	__u32 reserved4;
+
+	/*
+	 * Points to an APK V4 Signature data blob
+	 * Signature must have two sections
+	 * Format is:
+	 *	u32 version
+	 *	u32 size_of_hash_info_section
+	 *	u8 hash_info_section[]
+	 *	u32 size_of_signing_info_section
+	 *	u8 signing_info_section[]
+	 *
+	 * Note that incfs does not care about what is in signing_info_section
+	 *
+	 * hash_info_section has following format:
+	 *	u32 hash_algorithm; // Must be SHA256 == 1
+	 *	u8 log2_blocksize;  // Must be 12 for 4096 byte blocks
+	 *	u32 salt_size;
+	 *	u8 salt[];
+	 *	u32 hash_size;
+	 *	u8 root_hash[];
+	 */
+	__aligned_u64 signature_info;
+
+	/* Size of signature_info */
+	__aligned_u64 signature_size;
+
+	__aligned_u64 reserved6;
+};
+
+/*
+ * Request a digital signature blob for a given file.
+ * Argument for INCFS_IOC_READ_FILE_SIGNATURE ioctl
+ */
+struct incfs_get_file_sig_args {
+	/*
+	 * A pointer to the data buffer to save an signature blob to.
+	 *
+	 * Equivalent to: u8 *file_signature;
+	 */
+	__aligned_u64 file_signature;
+
+	/* Size of the buffer at file_signature. */
+	__u32 file_signature_buf_size;
+
+	/*
+	 * Number of bytes save file_signature buffer.
+	 * It is set after ioctl done.
+	 */
+	__u32 file_signature_len_out;
+};
+
+struct incfs_filled_range {
+	__u32 begin;
+	__u32 end;
+};
+
+/*
+ * Request ranges of filled blocks
+ * Argument for INCFS_IOC_GET_FILLED_BLOCKS
+ */
+struct incfs_get_filled_blocks_args {
+	/*
+	 * A buffer to populate with ranges of filled blocks
+	 *
+	 * Equivalent to struct incfs_filled_ranges *range_buffer
+	 */
+	__aligned_u64 range_buffer;
+
+	/* Size of range_buffer */
+	__u32 range_buffer_size;
+
+	/* Start index to read from */
+	__u32 start_index;
+
+	/*
+	 * End index to read to. 0 means read to end. This is a range,
+	 * so incfs will read from start_index to end_index - 1
+	 */
+	__u32 end_index;
+
+	/* Actual number of blocks in file */
+	__u32 total_blocks_out;
+
+	/* The  number of data blocks in file */
+	__u32 data_blocks_out;
+
+	/* Number of bytes written to range buffer */
+	__u32 range_buffer_size_out;
+
+	/* Sector scanned up to, if the call was interrupted */
+	__u32 index_out;
+};
+
+/*
+ * Create a new mapped file
+ * Argument for INCFS_IOC_CREATE_MAPPED_FILE
+ */
+struct incfs_create_mapped_file_args {
+	/*
+	 * Total size of the new file.
+	 */
+	__aligned_u64 size;
+
+	/*
+	 * File mode. Permissions and dir flag.
+	 */
+	__u16 mode;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	/*
+	 * A pointer to a null-terminated relative path to the incfs mount
+	 * point
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *directory_path;
+	 */
+	__aligned_u64 directory_path;
+
+	/*
+	 * A pointer to a null-terminated file name.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *file_name;
+	 */
+	__aligned_u64 file_name;
+
+	/* Id of source file to map. */
+	incfs_uuid_t source_file_id;
+
+	/*
+	 * Offset in source file to start mapping. Must be a multiple of
+	 * INCFS_DATA_FILE_BLOCK_SIZE
+	 */
+	__aligned_u64 source_offset;
+};
+
+/*
+ * Get information about the blocks in this file
+ * Argument for INCFS_IOC_GET_BLOCK_COUNT
+ */
+struct incfs_get_block_count_args {
+	/* Total number of data blocks in the file */
+	__u32 total_data_blocks_out;
+
+	/* Number of filled data blocks in the file */
+	__u32 filled_data_blocks_out;
+
+	/* Total number of hash blocks in the file */
+	__u32 total_hash_blocks_out;
+
+	/* Number of filled hash blocks in the file */
+	__u32 filled_hash_blocks_out;
+};
+
+/* Description of timeouts for one UID */
+struct incfs_per_uid_read_timeouts {
+	/* UID to apply these timeouts to */
+	__u32 uid;
+
+	/*
+	 * Min time in microseconds to read any block. Note that this doesn't
+	 * apply to reads which are satisfied from the page cache.
+	 */
+	__u32 min_time_us;
+
+	/*
+	 * Min time in microseconds to satisfy a pending read. Any pending read
+	 * which is filled before this time will be delayed so that the total
+	 * read time >= this value.
+	 */
+	__u32 min_pending_time_us;
+
+	/*
+	 * Max time in microseconds to satisfy a pending read before the read
+	 * times out. If set to U32_MAX, defaults to mount options
+	 * read_timeout_ms * 1000. Must be >= min_pending_time_us
+	 */
+	__u32 max_pending_time_us;
+};
+
+/*
+ * Get the read timeouts array
+ * Argument for INCFS_IOC_GET_READ_TIMEOUTS
+ */
+struct incfs_get_read_timeouts_args {
+	/*
+	 * A pointer to a buffer to fill with the current timeouts
+	 *
+	 * Equivalent to struct incfs_per_uid_read_timeouts *
+	 */
+	__aligned_u64 timeouts_array;
+
+	/* Size of above buffer in bytes */
+	__u32 timeouts_array_size;
+
+	/* Size used in bytes, or size needed if -ENOMEM returned */
+	__u32 timeouts_array_size_out;
+};
+
+/*
+ * Set the read timeouts array
+ * Arguments for INCFS_IOC_SET_READ_TIMEOUTS
+ */
+struct incfs_set_read_timeouts_args {
+	/*
+	 * A pointer to an array containing the new timeouts
+	 * This will replace any existing timeouts
+	 *
+	 * Equivalent to struct incfs_per_uid_read_timeouts *
+	 */
+	__aligned_u64 timeouts_array;
+
+	/* Size of above array in bytes. Must be < 256 */
+	__u32 timeouts_array_size;
+};
+
+/*
+ * Get last read error struct
+ * Arguments for INCFS_IOC_GET_LAST_READ_ERROR
+ */
+struct incfs_get_last_read_error_args {
+	/* File id of last file that had a read error */
+	incfs_uuid_t	file_id_out;
+
+	/* Time of last read error, in us, from CLOCK_MONOTONIC */
+	__u64	time_us_out;
+
+	/* Index of page that was being read at last read error */
+	__u32	page_out;
+
+	/* errno of last read error */
+	__u32	errno_out;
+
+	/* uid of last read error */
+	__u32	uid_out;
+
+	__u32	reserved1;
+	__u64	reserved2;
+};
+
+#endif /* _UAPI_LINUX_INCREMENTALFS_H */
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 49ddcdc..07ae4e1 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -48,7 +48,7 @@
 
 	char label[MAX_IDLETIMER_LABEL_SIZE];
 
-	__u8 send_nl_msg;   /* unused: for compatibility with Android */
+	__u8 send_nl_msg;
 	__u8 timer_type;
 
 	/* for kernel module internal use only */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index bb73e9a..e998764 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -272,4 +272,7 @@
 # define PR_SCHED_CORE_SCOPE_THREAD_GROUP	1
 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP	2
 
+#define PR_SET_VMA		0x53564d41
+# define PR_SET_VMA_ANON_NAME		0
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644
index 0000000..0baeb7d
--- /dev/null
+++ b/include/uapi/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_GET_PROTOCOL
+ *	value:          0
+ *	index:          0
+ *	data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_STRING
+ *	value:          0
+ *	index:          string ID
+ *	data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_START
+ *	value:          0
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID_DEVICE
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          total length of the HID report descriptor
+ *	data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_HID_REPORT_DESC
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *	data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_HID_EVENT
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *	requestType:	USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_AUDIO_MODE
+ *	value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/init/Kconfig b/init/Kconfig
index 4b7bac1..a67a1e1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -139,7 +139,7 @@
 
 config WERROR
 	bool "Compile the kernel with warnings as errors"
-	default COMPILE_TEST
+	default y
 	help
 	  A kernel build should not cause any compiler warnings, and this
 	  enables the '-Werror' flag to enforce that rule by default.
@@ -1274,6 +1274,17 @@
 	  desktop applications.  Task group autogeneration is currently based
 	  upon task session.
 
+config RT_SOFTINT_OPTIMIZATION
+       bool "Improve RT scheduling during long softint execution"
+       depends on ARM64
+       depends on SMP
+       default n
+       help
+         Enable an optimization which tries to avoid placing RT tasks on CPUs
+	 occupied by nonpreemptible tasks, such as a long softint, or CPUs
+	 which may soon block preemptions, such as a CPU running a ksoftirq
+	 thread which handles slow softints.
+
 config SYSFS_DEPRECATED
 	bool "Enable deprecated sysfs features to support old userspace tools"
 	depends on SYSFS
@@ -2149,6 +2160,20 @@
 	  the version).  With this option, such a "srcversion" field
 	  will be created for all modules.  If unsure, say N.
 
+config MODULE_SCMVERSION
+	bool "SCM version for modules"
+	depends on LOCALVERSION_AUTO
+	help
+	  This enables the module attribute "scmversion" which can be used
+	  by developers to identify the SCM version of a given module, e.g.
+	  git sha1 or hg sha1. The SCM version can be queried by modinfo or
+	  via the sysfs node: /sys/modules/MODULENAME/scmversion. This is
+	  useful when the kernel or kernel modules are updated separately
+	  since that causes the vermagic of the kernel and the module to
+	  differ.
+
+	  If unsure, say N.
+
 config MODULE_SIG
 	bool "Module signature verification"
 	select MODULE_SIG_FORMAT
@@ -2383,3 +2408,5 @@
 # <asm/syscall_wrapper.h>.
 config ARCH_HAS_SYSCALL_WRAPPER
 	def_bool n
+
+source "init/Kconfig.gki"
diff --git a/init/Kconfig.gki b/init/Kconfig.gki
new file mode 100644
index 0000000..bdf5db9
--- /dev/null
+++ b/init/Kconfig.gki
@@ -0,0 +1,264 @@
+config GKI_HIDDEN_DRM_CONFIGS
+	bool "Hidden DRM configs needed for GKI"
+	select DRM_KMS_HELPER if (HAS_IOMEM && DRM)
+	select DRM_GEM_SHMEM_HELPER if (DRM)
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_MIPI_DSI
+	select DRM_TTM if (HAS_IOMEM && DRM)
+	select VIDEOMODE_HELPERS
+	select WANT_DEV_COREDUMP
+	select INTERVAL_TREE
+	help
+	  Dummy config option used to enable hidden DRM configs.
+	  These are normally selected implicitly when including a
+	  DRM module, but for GKI, the modules are built out-of-tree.
+
+config GKI_HIDDEN_REGMAP_CONFIGS
+	bool "Hidden Regmap configs needed for GKI"
+	select REGMAP_IRQ
+	select REGMAP_MMIO
+	help
+	  Dummy config option used to enable hidden regmap configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_CRYPTO_CONFIGS
+	bool "Hidden CRYPTO configs needed for GKI"
+	select CRYPTO_ENGINE
+	help
+	  Dummy config option used to enable hidden CRYPTO configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_SND_CONFIGS
+	bool "Hidden SND configs needed for GKI"
+	select SND_VMASTER
+	select SND_PCM_ELD
+	select SND_JACK
+	select SND_JACK_INPUT_DEV
+	select SND_INTEL_NHLT if (ACPI)
+	help
+	  Dummy config option used to enable hidden SND configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_SND_SOC_CONFIGS
+	bool "Hidden SND_SOC configs needed for GKI"
+	select SND_SOC_GENERIC_DMAENGINE_PCM if (SND_SOC && SND)
+	select SND_PCM_IEC958
+	select SND_SOC_COMPRESS if (SND_SOC && SND)
+	select SND_SOC_TOPOLOGY if (SND_SOC && SND)
+	select DMADEVICES
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Dummy config option used to enable hidden SND_SOC configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_MMC_CONFIGS
+	bool "Hidden MMC configs needed for GKI"
+	select MMC_SDHCI_IO_ACCESSORS if (MMC_SDHCI)
+	help
+	  Dummy config option used to enable hidden MMC configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_GPIO_CONFIGS
+	bool "Hidden GPIO configs needed for GKI"
+	select PINCTRL_SINGLE if (PINCTRL && OF && HAS_IOMEM)
+	select GPIO_PL061 if (HAS_IOMEM && ARM_AMBA && GPIOLIB)
+	help
+	  Dummy config option used to enable hidden GPIO configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_QCOM_CONFIGS
+	bool "Hidden QCOM configs needed for GKI"
+	select QCOM_SMEM_STATE
+	select QCOM_GDSC if (ARCH_QCOM)
+	select IOMMU_IO_PGTABLE_LPAE if (ARCH_QCOM)
+
+	help
+	  Dummy config option used to enable hidden QCOM configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_MEDIA_CONFIGS
+	bool "Hidden Media configs needed for GKI"
+	select VIDEOBUF2_CORE
+	select V4L2_MEM2MEM_DEV
+	select MEDIA_CONTROLLER
+	select MEDIA_CONTROLLER_REQUEST_API
+	select MEDIA_SUPPORT
+	select FRAME_VECTOR
+	select CEC_CORE
+	select CEC_NOTIFIER
+	select CEC_PIN
+	select VIDEOBUF2_DMA_CONTIG
+	help
+	  Dummy config option used to enable hidden media configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_VIRTUAL_CONFIGS
+	bool "Hidden Virtual configs needed for GKI"
+	select HVC_DRIVER
+	help
+	  Dummy config option used to enable hidden virtual device configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+# LEGACY_WEXT_ALLCONFIG Discussed upstream, soundly rejected as a unique
+# problem for GKI to solve.  It should be noted that these extensions are
+# in-effect deprecated and generally unsupported and we should pressure
+# the SOC vendors to drop any modules that require these extensions.
+config GKI_LEGACY_WEXT_ALLCONFIG
+	bool "Hidden wireless extension configs needed for GKI"
+	select WIRELESS_EXT
+	select WEXT_CORE
+	select WEXT_PROC
+	select WEXT_SPY
+	select WEXT_PRIV
+	help
+	  Dummy config option used to enable all the hidden legacy wireless
+	  extensions to the core wireless network functionality used by
+	  add-in modules.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel built wireless modules, say N here.
+
+config GKI_HIDDEN_USB_CONFIGS
+	bool "Hiddel USB configurations needed for GKI"
+	select USB_PHY
+	help
+	  Dummy config option used to enable all USB related hidden configs.
+	  These configurations are usually only selected by another config
+	  option or a combination of them.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel build USB drivers, say N here.
+
+config GKI_HIDDEN_SOC_BUS_CONFIGS
+	bool "Hidden SoC bus configuration needed for GKI"
+	select SOC_BUS
+	  help
+	    Dummy config option used to enable SOC_BUS hidden Kconfig.
+	    The configuration is required for SoCs to register themselves to the bus.
+
+	    If you are not building a kernel to be used for a variety of SoCs and
+	    out-of-tree drivers, say N here.
+
+config GKI_HIDDEN_RPMSG_CONFIGS
+	bool "Hidden RPMSG configuration needed for GKI"
+	select RPMSG
+	help
+	  Dummy config option used to enable the hidden RPMSG config.
+	  This configuration is usually only selected by another config
+	  option or a combination of them.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel build RPMSG drivers, say N here.
+
+config GKI_HIDDEN_GPU_CONFIGS
+	bool "Hidden GPU configuration needed for GKI"
+	select TRACE_GPU_MEM
+	help
+	  Dummy config option used to enable the hidden GPU config.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_IRQ_CONFIGS
+	bool "Hidden IRQ configuration needed for GKI"
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN_HIERARCHY
+	select IRQ_FASTEOI_HIERARCHY_HANDLERS
+	help
+	  Dummy config option used to enable GENERIC_IRQ_CHIP hidden
+	  config, required by various SoC platforms. This is usually
+	  selected by ARCH_*.
+
+config GKI_HIDDEN_HYPERVISOR_CONFIGS
+	bool "Hidden hypervisor configuration needed for GKI"
+	select SYS_HYPERVISOR
+	help
+	  Dummy config option used to enable the SYS_HYPERVISOR hidden
+	  config, required by various SoC platforms. This is usually
+	  selected by XEN or S390.
+
+config GKI_HIDDEN_NET_CONFIGS
+	bool "Hidden networking configuration needed for GKI"
+	select PAGE_POOL
+	select NET_PTP_CLASSIFY
+	help
+	  Dummy config option used to enable the networking hidden
+	  config, required by various SoC platforms.
+
+config GKI_HIDDEN_PHY_CONFIGS
+	bool "Hidden PHY configuration needed for GKI"
+	select GENERIC_PHY_MIPI_DPHY
+	help
+	  Dummy config option used to enable the hidden PHY configs,
+	  required by various SoC platforms.
+
+config GKI_HIDDEN_MM_CONFIGS
+	bool "Hidden MM configuration needed for GKI"
+	select PAGE_REPORTING
+	select BALLOON_COMPACTION
+	select MEMORY_BALLOON
+	help
+	  Dummy config option used to enable hidden MM configs,
+	  currently required for VIRTIO_BALLOON
+
+config GKI_HIDDEN_ETHERNET_CONFIGS
+	bool "Hidden Ethernet configuration needed for GKI"
+	select PHYLINK
+	help
+	  Dummy config option used to enable the hidden Ethernet PHYLINK
+	  configs, required by various ethernet devices.
+
+config GKI_HIDDEN_DMA_CONFIGS
+	bool "Hidden DMA configuration needed for GKI"
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	help
+	  Dummy config option used to enable the hidden DMA configs,
+	  required by various SoC platforms.
+
+# Atrocities needed for
+# a) building GKI modules in separate tree, or
+# b) building drivers that are not modularizable
+#
+# All of these should be reworked into an upstream solution
+# if possible.
+#
+config GKI_HACKS_TO_FIX
+	bool "GKI Dummy config options"
+	select GKI_HIDDEN_CRYPTO_CONFIGS
+	select GKI_HIDDEN_DRM_CONFIGS
+	select GKI_HIDDEN_REGMAP_CONFIGS
+	select GKI_HIDDEN_SND_CONFIGS
+	select GKI_HIDDEN_SND_SOC_CONFIGS
+	select GKI_HIDDEN_MMC_CONFIGS
+	select GKI_HIDDEN_GPIO_CONFIGS
+	select GKI_HIDDEN_QCOM_CONFIGS
+	select GKI_LEGACY_WEXT_ALLCONFIG
+	select GKI_HIDDEN_MEDIA_CONFIGS
+	select GKI_HIDDEN_VIRTUAL_CONFIGS
+	select GKI_HIDDEN_USB_CONFIGS
+	select GKI_HIDDEN_SOC_BUS_CONFIGS
+	select GKI_HIDDEN_RPMSG_CONFIGS
+	select GKI_HIDDEN_GPU_CONFIGS
+	select GKI_HIDDEN_IRQ_CONFIGS
+	select GKI_HIDDEN_HYPERVISOR_CONFIGS
+	select GKI_HIDDEN_NET_CONFIGS
+	select GKI_HIDDEN_PHY_CONFIGS
+	select GKI_HIDDEN_MM_CONFIGS
+	select GKI_HIDDEN_ETHERNET_CONFIGS
+	select GKI_HIDDEN_DMA_CONFIGS
+
+	help
+	  Dummy config option used to enable core functionality used by
+	  modules that may not be selectable in this config.
+
+	  Unless you are building a GKI kernel to be used with modules
+	  built from a different config, say N here.
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1033ee8..982cba9 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -32,6 +32,8 @@
 #include <linux/rcupdate_trace.h>
 #include <linux/memcontrol.h>
 
+#include <trace/hooks/syscall_check.h>
+
 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
@@ -4606,6 +4608,8 @@
 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
 		return -EFAULT;
 
+	trace_android_vh_check_bpf_syscall(cmd, &attr, size);
+
 	err = security_bpf(cmd, &attr, size);
 	if (err < 0)
 		return err;
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index bfbeabc..02f32b1 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -232,7 +232,8 @@
 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
 		       bool threadgroup);
 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
-					     bool *locked)
+					     bool *locked,
+					     struct cgroup *dst_cgrp);
 	__acquires(&cgroup_threadgroup_rwsem);
 void cgroup_procs_write_finish(struct task_struct *task, bool locked)
 	__releases(&cgroup_threadgroup_rwsem);
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 81c9e06..2ece597 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -17,6 +17,7 @@
 #include <linux/fs_parser.h>
 
 #include <trace/events/cgroup.h>
+#include <trace/hooks/cgroup.h>
 
 /*
  * pidlists linger the following amount before being destroyed.  The goal
@@ -498,7 +499,7 @@
 	if (!cgrp)
 		return -ENODEV;
 
-	task = cgroup_procs_write_start(buf, threadgroup, &locked);
+	task = cgroup_procs_write_start(buf, threadgroup, &locked, cgrp);
 	ret = PTR_ERR_OR_ZERO(task);
 	if (ret)
 		goto out_unlock;
@@ -511,13 +512,15 @@
 	tcred = get_task_cred(task);
 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 	    !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->euid, tcred->suid))
+	    !uid_eq(cred->euid, tcred->suid) &&
+	    !ns_capable(tcred->user_ns, CAP_SYS_NICE))
 		ret = -EACCES;
 	put_cred(tcred);
 	if (ret)
 		goto out_finish;
 
 	ret = cgroup_attach_task(cgrp, task, threadgroup);
+	trace_android_vh_cgroup_set_task(ret, task);
 
 out_finish:
 	cgroup_procs_write_finish(task, locked);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 919194d..4859641 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -61,6 +61,9 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/cgroup.h>
+#undef CREATE_TRACE_POINTS
+
+#include <trace/hooks/cgroup.h>
 
 #define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
 					 MAX_CFTYPE_NAME + 2)
@@ -2396,6 +2399,7 @@
 
 	return cgroup_taskset_next(tset, dst_cssp);
 }
+EXPORT_SYMBOL_GPL(cgroup_taskset_first);
 
 /**
  * cgroup_taskset_next - iterate to the next task in taskset
@@ -2442,6 +2446,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(cgroup_taskset_next);
 
 /**
  * cgroup_migrate_execute - migrate a taskset
@@ -2512,6 +2517,7 @@
 		do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
 			if (ss->attach) {
 				tset->ssid = ssid;
+				trace_android_vh_cgroup_attach(ss, tset);
 				ss->attach(tset);
 			}
 		} while_each_subsys_mask();
@@ -2812,11 +2818,13 @@
 }
 
 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
-					     bool *locked)
+					     bool *locked,
+					     struct cgroup *dst_cgrp)
 	__acquires(&cgroup_threadgroup_rwsem)
 {
 	struct task_struct *tsk;
 	pid_t pid;
+	bool force_migration = false;
 
 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
 		return ERR_PTR(-EINVAL);
@@ -2851,13 +2859,16 @@
 	if (threadgroup)
 		tsk = tsk->group_leader;
 
+	if (tsk->flags & PF_KTHREAD)
+		trace_android_rvh_cgroup_force_kthread_migration(tsk, dst_cgrp, &force_migration);
+
 	/*
 	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
 	 * If userland migrates such a kthread to a non-root cgroup, it can
 	 * become trapped in a cpuset, or RT kthread may be born in a
 	 * cgroup with no rt_runtime allocated.  Just say no.
 	 */
-	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
+	if (!force_migration && (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY))) {
 		tsk = ERR_PTR(-EINVAL);
 		goto out_unlock_threadgroup;
 	}
@@ -4324,6 +4335,7 @@
 		return next;
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(css_next_child);
 
 /**
  * css_next_descendant_pre - find the next descendant for pre-order walk
@@ -4899,7 +4911,7 @@
 	if (!dst_cgrp)
 		return -ENODEV;
 
-	task = cgroup_procs_write_start(buf, threadgroup, &locked);
+	task = cgroup_procs_write_start(buf, threadgroup, &locked, dst_cgrp);
 	ret = PTR_ERR_OR_ZERO(task);
 	if (ret)
 		goto out_unlock;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index d0e163a..71de414 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -66,6 +66,8 @@
 #include <linux/cgroup.h>
 #include <linux/wait.h>
 
+#include <trace/hooks/sched.h>
+
 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
 
@@ -112,6 +114,7 @@
 
 	/* user-configured CPUs and Memory Nodes allow to tasks */
 	cpumask_var_t cpus_allowed;
+	cpumask_var_t cpus_requested;
 	nodemask_t mems_allowed;
 
 	/* effective CPUs and Memory Nodes allow to tasks */
@@ -356,17 +359,6 @@
  */
 
 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
-
-void cpuset_read_lock(void)
-{
-	percpu_down_read(&cpuset_rwsem);
-}
-
-void cpuset_read_unlock(void)
-{
-	percpu_up_read(&cpuset_rwsem);
-}
-
 static DEFINE_SPINLOCK(callback_lock);
 
 static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -493,7 +485,7 @@
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+	return	cpumask_subset(p->cpus_requested, q->cpus_requested) &&
 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 		is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -530,8 +522,13 @@
 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
 		goto free_two;
 
+	if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+		goto free_three;
+
 	return 0;
 
+free_three:
+	free_cpumask_var(*pmask3);
 free_two:
 	free_cpumask_var(*pmask2);
 free_one:
@@ -548,6 +545,7 @@
 {
 	if (cs) {
 		free_cpumask_var(cs->cpus_allowed);
+		free_cpumask_var(cs->cpus_requested);
 		free_cpumask_var(cs->effective_cpus);
 		free_cpumask_var(cs->subparts_cpus);
 	}
@@ -576,6 +574,7 @@
 	}
 
 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+	cpumask_copy(trial->cpus_requested, cs->cpus_requested);
 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
 	return trial;
 }
@@ -644,7 +643,7 @@
 	cpuset_for_each_child(c, css, par) {
 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 		    c != cur &&
-		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+		    cpumask_intersects(trial->cpus_requested, c->cpus_requested))
 			goto out;
 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 		    c != cur &&
@@ -1093,6 +1092,18 @@
 	cpus_read_unlock();
 }
 
+static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
+				const struct cpumask *new_mask)
+{
+	int ret = -EINVAL;
+
+	trace_android_rvh_update_cpus_allowed(p, cs->cpus_requested, new_mask, &ret);
+	if (!ret)
+		return ret;
+
+	return set_cpus_allowed_ptr(p, new_mask);
+}
+
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -1108,7 +1119,7 @@
 
 	css_task_iter_start(&cs->css, 0, &it);
 	while ((task = css_task_iter_next(&it)))
-		set_cpus_allowed_ptr(task, cs->effective_cpus);
+		update_cpus_allowed(cs, task, cs->effective_cpus);
 	css_task_iter_end(&it);
 }
 
@@ -1129,10 +1140,10 @@
 	if (parent->nr_subparts_cpus) {
 		cpumask_or(new_cpus, parent->effective_cpus,
 			   parent->subparts_cpus);
-		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
+		cpumask_and(new_cpus, new_cpus, cs->cpus_requested);
 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
 	} else {
-		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
+		cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
 	}
 }
 
@@ -1564,25 +1575,26 @@
 		return -EACCES;
 
 	/*
-	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
+	 * An empty cpus_requested is ok only if the cpuset has no tasks.
 	 * Since cpulist_parse() fails on an empty mask, we special case
 	 * that parsing.  The validate_change() call ensures that cpusets
 	 * with tasks have cpus.
 	 */
 	if (!*buf) {
-		cpumask_clear(trialcs->cpus_allowed);
+		cpumask_clear(trialcs->cpus_requested);
 	} else {
-		retval = cpulist_parse(buf, trialcs->cpus_allowed);
+		retval = cpulist_parse(buf, trialcs->cpus_requested);
 		if (retval < 0)
 			return retval;
-
-		if (!cpumask_subset(trialcs->cpus_allowed,
-				    top_cpuset.cpus_allowed))
-			return -EINVAL;
 	}
 
+	if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
+		return -EINVAL;
+
+	cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
+
 	/* Nothing to do if the cpus didn't change */
-	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+	if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
 		return 0;
 
 	retval = validate_change(cs, trialcs);
@@ -1610,6 +1622,7 @@
 
 	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
 
 	/*
 	 * Make sure that subparts_cpus is a subset of cpus_allowed.
@@ -2273,7 +2286,7 @@
 		 * can_attach beforehand should guarantee that this doesn't
 		 * fail.  TODO: have a better way to handle failure here
 		 */
-		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+		WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
 
 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
 		cpuset_update_task_spread_flag(cs, task);
@@ -2497,7 +2510,7 @@
 
 	switch (type) {
 	case FILE_CPULIST:
-		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
 		break;
 	case FILE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -2871,6 +2884,7 @@
 	cs->mems_allowed = parent->mems_allowed;
 	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
 	spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2987,8 +3001,10 @@
 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
+	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL));
 
 	cpumask_setall(top_cpuset.cpus_allowed);
+	cpumask_setall(top_cpuset.cpus_requested);
 	nodes_setall(top_cpuset.mems_allowed);
 	cpumask_setall(top_cpuset.effective_cpus);
 	nodes_setall(top_cpuset.effective_mems);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 407a256..0b18133 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -34,6 +34,7 @@
 #include <linux/scs.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/cpuset.h>
+#include <uapi/linux/sched/types.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -1319,6 +1320,25 @@
 	complete_ap_thread(st, true);
 }
 
+static int switch_to_rt_policy(void)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	unsigned int policy = current->policy;
+
+	if (policy == SCHED_NORMAL)
+		/* Switch to SCHED_FIFO from SCHED_NORMAL. */
+		return sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	else
+		return 1;
+}
+
+static int switch_to_fair_policy(void)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
 /* Requires cpu_add_remove_lock to be held */
 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 {
@@ -1383,6 +1403,7 @@
 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
 {
 	int err = 0;
+	int switch_err;
 
 	if (!cpu_possible(cpu)) {
 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -1393,9 +1414,21 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * CPU hotplug operations consists of many steps and each step
+	 * calls a callback of core kernel subsystem. CPU hotplug-in
+	 * operation may get preempted by other CFS tasks and whole
+	 * operation of cpu hotplug in CPU gets delayed. Switch the
+	 * current task to SCHED_FIFO from SCHED_NORMAL, so that
+	 * hotplug in operation may complete quickly in heavy loaded
+	 * conditions and new CPU will start handle the workload.
+	 */
+
+	switch_err = switch_to_rt_policy();
+
 	err = try_online_node(cpu_to_node(cpu));
 	if (err)
-		return err;
+		goto switch_out;
 
 	cpu_maps_update_begin();
 
@@ -1411,6 +1444,14 @@
 	err = _cpu_up(cpu, 0, target);
 out:
 	cpu_maps_update_done();
+switch_out:
+	if (!switch_err) {
+		switch_err = switch_to_fair_policy();
+		if (switch_err)
+			pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+				switch_err, current->comm, current->pid);
+	}
+
 	return err;
 }
 
diff --git a/kernel/cred.c b/kernel/cred.c
index 473d17c..dfa48f5 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -17,6 +17,8 @@
 #include <linux/cn_proc.h>
 #include <linux/uidgid.h>
 
+#include <trace/hooks/creds.h>
+
 #if 0
 #define kdebug(FMT, ...)						\
 	printk("[%-5.5s%5u] " FMT "\n",					\
@@ -181,6 +183,7 @@
 	key_put(tsk->cached_requested_key);
 	tsk->cached_requested_key = NULL;
 #endif
+	trace_android_rvh_exit_creds(tsk, cred);
 }
 
 /**
@@ -499,6 +502,7 @@
 		inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
 	rcu_assign_pointer(task->real_cred, new);
 	rcu_assign_pointer(task->cred, new);
+	trace_android_rvh_commit_creds(task, new);
 	if (new->user != old->user || new->user_ns != old->user_ns)
 		dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
 	alter_cred_subscribers(old, -2);
@@ -576,6 +580,7 @@
 	get_new_cred((struct cred *)new);
 	alter_cred_subscribers(new, 1);
 	rcu_assign_pointer(current->cred, new);
+	trace_android_rvh_override_creds(current, new);
 	alter_cred_subscribers(old, -1);
 
 	kdebug("override_creds() = %p{%d,%d}", old,
@@ -604,6 +609,7 @@
 	validate_creds(override);
 	alter_cred_subscribers(old, 1);
 	rcu_assign_pointer(current->cred, old);
+	trace_android_rvh_revert_creds(current, old);
 	alter_cred_subscribers(override, -1);
 	put_cred(override);
 }
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 3d63d91..7e97771 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -58,6 +58,7 @@
 #endif
 
 struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
 
 /*
  * Default global CMA area size can be defined in kernel's .config.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 30d94f6..6f508d8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4487,6 +4487,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(perf_event_read_local);
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
diff --git a/kernel/fork.c b/kernel/fork.c
index 3244cc5..96ec299 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -96,6 +96,7 @@
 #include <linux/scs.h>
 #include <linux/io_uring.h>
 #include <linux/bpf.h>
+#include <linux/cpufreq_times.h>
 
 #include <asm/pgalloc.h>
 #include <linux/uaccess.h>
@@ -108,6 +109,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/task.h>
 
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/sched.h>
 /*
  * Minimum number of threads to boot the kernel
  */
@@ -118,6 +121,8 @@
  */
 #define MAX_THREADS FUTEX_TID_MASK
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(task_newtask);
+
 /*
  * Protected counters by write_lock_irq(&tasklist_lock)
  */
@@ -138,6 +143,7 @@
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
 
 #ifdef CONFIG_PROVE_RCU
 int lockdep_tasklist_lock_is_held(void)
@@ -445,9 +451,11 @@
 
 void free_task(struct task_struct *tsk)
 {
+	cpufreq_task_times_exit(tsk);
 	release_user_cpus_ptr(tsk);
 	scs_release(tsk);
 
+	trace_android_vh_free_task(tsk);
 #ifndef CONFIG_THREAD_INFO_IN_TASK
 	/*
 	 * The task is finally done with both the stack and thread_info,
@@ -2032,6 +2040,8 @@
 		siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
 	}
 
+	cpufreq_task_times_init(p);
+
 	/*
 	 * This _must_ happen before we call free_task(), i.e. before we jump
 	 * to any of the bad_fork_* labels. This is to avoid freeing
@@ -2585,6 +2595,8 @@
 	if (IS_ERR(p))
 		return PTR_ERR(p);
 
+	cpufreq_task_times_alloc(p);
+
 	/*
 	 * Do this prior waking up the new thread - the thread pointer
 	 * might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f895265..2359703 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -14,6 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/irqdomain.h>
+#include <linux/wakeup_reason.h>
 
 #include <trace/events/irq.h>
 
@@ -510,8 +511,22 @@
 	 * If the interrupt is not in progress and is not an armed
 	 * wakeup interrupt, proceed.
 	 */
-	if (!irqd_has_set(&desc->irq_data, mask))
+	if (!irqd_has_set(&desc->irq_data, mask)) {
+#ifdef CONFIG_PM_SLEEP
+		if (unlikely(desc->no_suspend_depth &&
+			     irqd_is_wakeup_set(&desc->irq_data))) {
+			unsigned int irq = irq_desc_get_irq(desc);
+			const char *name = "(unnamed)";
+
+			if (desc->action && desc->action->name)
+				name = desc->action->name;
+
+			log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
+						   irq, name);
+		}
+#endif
 		return true;
+	}
 
 	/*
 	 * If the interrupt is an armed wakeup source, mark it pending
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 2267e65..3fa64ce 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -352,9 +352,7 @@
 {
 	return radix_tree_lookup(&irq_desc_tree, irq);
 }
-#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
 EXPORT_SYMBOL_GPL(irq_to_desc);
-#endif
 
 static void delete_irq_desc(unsigned int irq)
 {
@@ -885,6 +883,7 @@
 	return desc && desc->kstat_irqs ?
 			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
 }
+EXPORT_SYMBOL_GPL(kstat_irqs_cpu);
 
 static bool irq_is_nmi(struct irq_desc *desc)
 {
@@ -942,3 +941,4 @@
 }
 EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
 #endif
+EXPORT_SYMBOL_GPL(kstat_irqs_usr);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index f7df715..aef17f8 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -170,6 +170,7 @@
 	return true;
 #endif /* CONFIG_SMP */
 }
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
 
 bool irq_work_needs_cpu(void)
 {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 7113003..d3b4bbe 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -482,6 +482,7 @@
 {
 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
 
 /**
  * kthread_bind - bind a just-created kthread to a cpu.
diff --git a/kernel/module.c b/kernel/module.c
index 84a9141..76a1b8f 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -63,6 +63,10 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/module.h>
 
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/module.h>
+#include <trace/hooks/memory.h>
+
 #ifndef ARCH_SHF_SMALL
 #define ARCH_SHF_SMALL 0
 #endif
@@ -743,6 +747,7 @@
 
 MODINFO_ATTR(version);
 MODINFO_ATTR(srcversion);
+MODINFO_ATTR(scmversion);
 
 static char last_unloaded_module[MODULE_NAME_LEN+1];
 
@@ -1206,6 +1211,7 @@
 	&module_uevent,
 	&modinfo_version,
 	&modinfo_srcversion,
+	&modinfo_scmversion,
 	&modinfo_initstate,
 	&modinfo_coresize,
 	&modinfo_initsize,
@@ -2200,6 +2206,10 @@
 
 	/* This may be empty, but that's OK */
 	module_arch_freeing_init(mod);
+	trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
+	trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
 	module_memfree(mod->init_layout.base);
 	kfree(mod->args);
 	percpu_modfree(mod);
@@ -2208,6 +2218,10 @@
 	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
 
 	/* Finally, free the core (containing the module structure) */
+	trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
+		(mod->core_layout.size)>>PAGE_SHIFT);
+	trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
+		(mod->core_layout.size)>>PAGE_SHIFT);
 	module_memfree(mod->core_layout.base);
 }
 
@@ -3624,7 +3638,15 @@
 {
 	percpu_modfree(mod);
 	module_arch_freeing_init(mod);
+	trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
+	trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
 	module_memfree(mod->init_layout.base);
+	trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
+		(mod->core_layout.size)>>PAGE_SHIFT);
+	trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
+		(mod->core_layout.size)>>PAGE_SHIFT);
 	module_memfree(mod->core_layout.base);
 }
 
@@ -3782,8 +3804,13 @@
 	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
 	module_enable_ro(mod, true);
+	trace_android_vh_set_module_permit_after_init(mod);
 	mod_tree_remove_init(mod);
 	module_arch_freeing_init(mod);
+	trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
+	trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
+		(mod->init_layout.size)>>PAGE_SHIFT);
 	mod->init_layout.base = NULL;
 	mod->init_layout.size = 0;
 	mod->init_layout.ro_size = 0;
@@ -3894,6 +3921,7 @@
 	module_enable_ro(mod, false);
 	module_enable_nx(mod);
 	module_enable_x(mod);
+	trace_android_vh_set_module_permit_before_init(mod);
 
 	/*
 	 * Mark state as coming so strong_try_module_get() ignores us,
diff --git a/kernel/pid.c b/kernel/pid.c
index 2fc0a16..2104665 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -421,6 +421,7 @@
 {
 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 }
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
 
 struct task_struct *find_get_task_by_vpid(pid_t nr)
 {
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 5899260..9770575 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -17,4 +17,5 @@
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
 
+obj-$(CONFIG_SUSPEND)		+= wakeup_reason.o
 obj-$(CONFIG_ENERGY_MODEL)	+= energy_model.o
diff --git a/kernel/power/process.c b/kernel/power/process.c
index b7e7798..a204738 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -85,18 +85,21 @@
 	elapsed = ktime_sub(end, start);
 	elapsed_msecs = ktime_to_ms(elapsed);
 
-	if (todo) {
+	if (wakeup) {
 		pr_cont("\n");
-		pr_err("Freezing of tasks %s after %d.%03d seconds "
-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
+		pr_err("Freezing of tasks aborted after %d.%03d seconds",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
+	} else if (todo) {
+		pr_cont("\n");
+		pr_err("Freezing of tasks failed after %d.%03d seconds"
+		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
 		       todo - wq_busy, wq_busy);
 
 		if (wq_busy)
 			show_all_workqueues();
 
-		if (!wakeup || pm_debug_messages_on) {
+		if (pm_debug_messages_on) {
 			read_lock(&tasklist_lock);
 			for_each_process_thread(g, p) {
 				if (p != current && !freezer_should_skip(p)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 80cc1f0..c3fd86f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -30,6 +30,7 @@
 #include <trace/events/power.h>
 #include <linux/compiler.h>
 #include <linux/moduleparam.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -137,6 +138,7 @@
 		}
 
 		pm_wakeup_clear(false);
+		clear_wakeup_reasons();
 
 		s2idle_enter();
 	}
@@ -361,6 +363,7 @@
 	if (!error)
 		return 0;
 
+	log_suspend_abort_reason("One or more tasks refusing to freeze");
 	suspend_stats.failed_freeze++;
 	dpm_save_failed_step(SUSPEND_FREEZE);
 	pm_notifier_call_chain(PM_POST_SUSPEND);
@@ -390,7 +393,7 @@
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-	int error;
+	int error, last_dev;
 
 	error = platform_suspend_prepare(state);
 	if (error)
@@ -398,7 +401,11 @@
 
 	error = dpm_suspend_late(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("late suspend of devices failed\n");
+		log_suspend_abort_reason("late suspend of %s device failed",
+					 suspend_stats.failed_devs[last_dev]);
 		goto Platform_finish;
 	}
 	error = platform_suspend_prepare_late(state);
@@ -407,7 +414,11 @@
 
 	error = dpm_suspend_noirq(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("noirq suspend of devices failed\n");
+		log_suspend_abort_reason("noirq suspend of %s device failed",
+					 suspend_stats.failed_devs[last_dev]);
 		goto Platform_early_resume;
 	}
 	error = platform_suspend_prepare_noirq(state);
@@ -423,8 +434,10 @@
 	}
 
 	error = pm_sleep_disable_secondary_cpus();
-	if (error || suspend_test(TEST_CPUS))
+	if (error || suspend_test(TEST_CPUS)) {
+		log_suspend_abort_reason("Disabling non-boot cpus failed");
 		goto Enable_cpus;
+	}
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
@@ -495,6 +508,8 @@
 	error = dpm_suspend_start(PMSG_SUSPEND);
 	if (error) {
 		pr_err("Some devices failed to suspend, or early wake event detected\n");
+		log_suspend_abort_reason(
+				"Some devices failed to suspend, or early wake event detected");
 		goto Recover_platform;
 	}
 	suspend_test_finish("suspend devices");
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644
index 0000000..8fefaa3
--- /dev/null
+++ b/kernel/power/wakeup_reason.c
@@ -0,0 +1,438 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+/*
+ * struct wakeup_irq_node - stores data and relationships for IRQs logged as
+ * either base or nested wakeup reasons during suspend/resume flow.
+ * @siblings - for membership on leaf or parent IRQ lists
+ * @irq      - the IRQ number
+ * @irq_name - the name associated with the IRQ, or a default if none
+ */
+struct wakeup_irq_node {
+	struct list_head siblings;
+	int irq;
+	const char *irq_name;
+};
+
+enum wakeup_reason_flag {
+	RESUME_NONE = 0,
+	RESUME_IRQ,
+	RESUME_ABORT,
+	RESUME_ABNORMAL,
+};
+
+static DEFINE_SPINLOCK(wakeup_reason_lock);
+
+static LIST_HEAD(leaf_irqs);   /* kept in ascending IRQ sorted order */
+static LIST_HEAD(parent_irqs); /* unordered */
+
+static struct kmem_cache *wakeup_irq_nodes_cache;
+
+static const char *default_irq_name = "(unnamed)";
+
+static struct kobject *kobj;
+
+static bool capture_reasons;
+static int wakeup_reason;
+static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static void init_node(struct wakeup_irq_node *p, int irq)
+{
+	struct irq_desc *desc;
+
+	INIT_LIST_HEAD(&p->siblings);
+
+	p->irq = irq;
+	desc = irq_to_desc(irq);
+	if (desc && desc->action && desc->action->name)
+		p->irq_name = desc->action->name;
+	else
+		p->irq_name = default_irq_name;
+}
+
+static struct wakeup_irq_node *create_node(int irq)
+{
+	struct wakeup_irq_node *result;
+
+	result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
+	if (unlikely(!result))
+		pr_warn("Failed to log wakeup IRQ %d\n", irq);
+	else
+		init_node(result, irq);
+
+	return result;
+}
+
+static void delete_list(struct list_head *head)
+{
+	struct wakeup_irq_node *n;
+
+	while (!list_empty(head)) {
+		n = list_first_entry(head, struct wakeup_irq_node, siblings);
+		list_del(&n->siblings);
+		kmem_cache_free(wakeup_irq_nodes_cache, n);
+	}
+}
+
+static bool add_sibling_node_sorted(struct list_head *head, int irq)
+{
+	struct wakeup_irq_node *n = NULL;
+	struct list_head *predecessor = head;
+
+	if (unlikely(WARN_ON(!head)))
+		return NULL;
+
+	if (!list_empty(head))
+		list_for_each_entry(n, head, siblings) {
+			if (n->irq < irq)
+				predecessor = &n->siblings;
+			else if (n->irq == irq)
+				return true;
+			else
+				break;
+		}
+
+	n = create_node(irq);
+	if (n) {
+		list_add(&n->siblings, predecessor);
+		return true;
+	}
+
+	return false;
+}
+
+static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
+						 int irq)
+{
+	struct wakeup_irq_node *n;
+
+	if (unlikely(WARN_ON(!head)))
+		return NULL;
+
+	list_for_each_entry(n, head, siblings)
+		if (n->irq == irq)
+			return n;
+
+	return NULL;
+}
+
+void log_irq_wakeup_reason(int irq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (!capture_reasons) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (find_node_in_list(&parent_irqs, irq) == NULL)
+		add_sibling_node_sorted(&leaf_irqs, irq);
+
+	wakeup_reason = RESUME_IRQ;
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
+{
+	struct wakeup_irq_node *parent;
+	unsigned long flags;
+
+	/*
+	 * Intentionally unsynchronized.  Calls that come in after we have
+	 * resumed should have a fast exit path since there's no work to be
+	 * done, any any coherence issue that could cause a wrong value here is
+	 * both highly improbable - given the set/clear timing - and very low
+	 * impact (parent IRQ gets logged instead of the specific child).
+	 */
+	if (!capture_reasons)
+		return;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	parent = find_node_in_list(&parent_irqs, parent_irq);
+	if (parent != NULL)
+		add_sibling_node_sorted(&leaf_irqs, irq);
+	else {
+		parent = find_node_in_list(&leaf_irqs, parent_irq);
+		if (parent != NULL) {
+			list_del_init(&parent->siblings);
+			list_add_tail(&parent->siblings, &parent_irqs);
+			add_sibling_node_sorted(&leaf_irqs, irq);
+		}
+	}
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
+
+static void __log_abort_or_abnormal_wake(bool abort, const char *fmt,
+					 va_list args)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	/* Suspend abort or abnormal wake reason has already been logged. */
+	if (wakeup_reason != RESUME_NONE) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (abort)
+		wakeup_reason = RESUME_ABORT;
+	else
+		wakeup_reason = RESUME_ABNORMAL;
+
+	vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__log_abort_or_abnormal_wake(true, fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
+
+void log_abnormal_wakeup_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__log_abort_or_abnormal_wake(false, fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
+
+void clear_wakeup_reasons(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	delete_list(&leaf_irqs);
+	delete_list(&parent_irqs);
+	wakeup_reason = RESUME_NONE;
+	capture_reasons = true;
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static void print_wakeup_sources(void)
+{
+	struct wakeup_irq_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	capture_reasons = false;
+
+	if (wakeup_reason == RESUME_ABORT) {
+		pr_info("Abort: %s\n", non_irq_wake_reason);
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+		list_for_each_entry(n, &leaf_irqs, siblings)
+			pr_info("Resume caused by IRQ %d, %s\n", n->irq,
+				n->irq_name);
+	else if (wakeup_reason == RESUME_ABNORMAL)
+		pr_info("Resume caused by %s\n", non_irq_wake_reason);
+	else
+		pr_info("Resume cause unknown\n");
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static ssize_t last_resume_reason_show(struct kobject *kobj,
+				       struct kobj_attribute *attr, char *buf)
+{
+	ssize_t buf_offset = 0;
+	struct wakeup_irq_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	if (wakeup_reason == RESUME_ABORT) {
+		buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
+				       non_irq_wake_reason);
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return buf_offset;
+	}
+
+	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+		list_for_each_entry(n, &leaf_irqs, siblings)
+			buf_offset += scnprintf(buf + buf_offset,
+						PAGE_SIZE - buf_offset,
+						"%d %s\n", n->irq, n->irq_name);
+	else if (wakeup_reason == RESUME_ABNORMAL)
+		buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
+				       non_irq_wake_reason);
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+
+	return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct timespec64 sleep_time;
+	struct timespec64 total_time;
+	struct timespec64 suspend_resume_time;
+
+	/*
+	 * total_time is calculated from monotonic bootoffsets because
+	 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+	 */
+	total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
+
+	/*
+	 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+	 * time interval before entering suspend and post suspend.
+	 */
+	suspend_resume_time =
+		ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
+
+	/* sleep_time = total_time - suspend_resume_time */
+	sleep_time = timespec64_sub(total_time, suspend_resume_time);
+
+	/* Export suspend_resume_time and sleep_time in pair here. */
+	return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
+		       (unsigned long long)suspend_resume_time.tv_sec,
+		       suspend_resume_time.tv_nsec,
+		       (unsigned long long)sleep_time.tv_sec,
+		       sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+	&resume_reason.attr,
+	&suspend_time.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		/* monotonic time since boot */
+		last_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		last_stime = ktime_get_boottime();
+		clear_wakeup_reasons();
+		break;
+	case PM_POST_SUSPEND:
+		/* monotonic time since boot */
+		curr_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		curr_stime = ktime_get_boottime();
+		print_wakeup_sources();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+	.notifier_call = wakeup_reason_pm_event,
+};
+
+static int __init wakeup_reason_init(void)
+{
+	if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
+		pr_warn("[%s] failed to register PM notifier\n", __func__);
+		goto fail;
+	}
+
+	kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+	if (!kobj) {
+		pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
+		goto fail_unregister_pm_notifier;
+	}
+
+	if (sysfs_create_group(kobj, &attr_group)) {
+		pr_warn("[%s] failed to create a sysfs group\n", __func__);
+		goto fail_kobject_put;
+	}
+
+	wakeup_irq_nodes_cache =
+		kmem_cache_create("wakeup_irq_node_cache",
+				  sizeof(struct wakeup_irq_node), 0, 0, NULL);
+	if (!wakeup_irq_nodes_cache)
+		goto fail_remove_group;
+
+	return 0;
+
+fail_remove_group:
+	sysfs_remove_group(kobj, &attr_group);
+fail_kobject_put:
+	kobject_put(kobj);
+fail_unregister_pm_notifier:
+	unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
+fail:
+	return 1;
+}
+
+late_initcall(wakeup_reason_init);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 57b132b6..b2b791e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -54,6 +54,8 @@
 #include <trace/events/initcall.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/printk.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/printk.h>
 
 #include "printk_ringbuffer.h"
 #include "console_cmdline.h"
@@ -2501,6 +2503,12 @@
  */
 static int console_cpu_notify(unsigned int cpu)
 {
+	int flag = 0;
+
+	trace_android_vh_printk_hotplug(&flag);
+	if (flag)
+		return 0;
+
 	if (!cpuhp_tasks_frozen) {
 		/* If trylock fails, someone else is doing the printing */
 		if (console_trylock())
@@ -3279,6 +3287,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(_printk_deferred);
 
 /*
  * printk rate limiting, lifted from the networking subsystem.
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 6bcc5d6..d1c642d 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -35,6 +35,7 @@
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
 EXPORT_SYMBOL_GPL(reboot_mode);
 enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
+EXPORT_SYMBOL_GPL(panic_reboot_mode);
 
 /*
  * This variable is used privately to keep track of whether or not
diff --git a/kernel/sched/OWNERS b/kernel/sched/OWNERS
new file mode 100644
index 0000000..09a9f8e
--- /dev/null
+++ b/kernel/sched/OWNERS
@@ -0,0 +1,4 @@
+connoro@google.com
+elavila@google.com
+qperret@google.com
+tkjos@google.com
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7756310..bf2ede4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -27,6 +27,9 @@
 #include "pelt.h"
 #include "smp.h"
 
+#include <trace/hooks/sched.h>
+#include <trace/hooks/dtask.h>
+
 /*
  * Export tracepoints that act as a bare tracehook (ie: have no trace event
  * associated with them) to allow external modules to probe them.
@@ -41,8 +44,10 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+EXPORT_SYMBOL_GPL(runqueues);
 
 #ifdef CONFIG_SCHED_DEBUG
 /*
@@ -57,6 +62,7 @@
 const_debug unsigned int sysctl_sched_features =
 #include "features.h"
 	0;
+EXPORT_SYMBOL_GPL(sysctl_sched_features);
 #undef SCHED_FEAT
 
 /*
@@ -492,6 +498,7 @@
 		raw_spin_unlock(lock);
 	}
 }
+EXPORT_SYMBOL_GPL(raw_spin_rq_lock_nested);
 
 bool raw_spin_rq_trylock(struct rq *rq)
 {
@@ -521,6 +528,7 @@
 {
 	raw_spin_unlock(rq_lockp(rq));
 }
+EXPORT_SYMBOL_GPL(raw_spin_rq_unlock);
 
 #ifdef CONFIG_SMP
 /*
@@ -539,6 +547,7 @@
 
 	raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
 }
+EXPORT_SYMBOL_GPL(double_rq_lock);
 #endif
 
 /*
@@ -564,6 +573,7 @@
 			cpu_relax();
 	}
 }
+EXPORT_SYMBOL_GPL(__task_rq_lock);
 
 /*
  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -686,6 +696,7 @@
 	rq->clock += delta;
 	update_rq_clock_task(rq, delta);
 }
+EXPORT_SYMBOL_GPL(update_rq_clock);
 
 #ifdef CONFIG_SCHED_HRTICK
 /*
@@ -890,6 +901,7 @@
 	 */
 	*head->lastp = node;
 	head->lastp = &node->next;
+	head->count++;
 	return true;
 }
 
@@ -945,12 +957,14 @@
 		/* Task can safely be re-inserted now: */
 		node = node->next;
 		task->wake_q.next = NULL;
+		task->wake_q_count = head->count;
 
 		/*
 		 * wake_up_process() executes a full barrier, which pairs with
 		 * the queueing in wake_q_add() so as not to miss wakeups.
 		 */
 		wake_up_process(task);
+		task->wake_q_count = 0;
 		put_task_struct(task);
 	}
 }
@@ -985,6 +999,7 @@
 	else
 		trace_sched_wake_idle_without_ipi(cpu);
 }
+EXPORT_SYMBOL_GPL(resched_curr);
 
 void resched_cpu(int cpu)
 {
@@ -1287,6 +1302,7 @@
  *   * An admin modifying the cgroup cpu.uclamp.{min, max}
  */
 DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
+EXPORT_SYMBOL_GPL(sched_uclamp_used);
 
 /* Integer rounded range for each bucket */
 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
@@ -1480,6 +1496,7 @@
 
 	return (unsigned long)uc_eff.value;
 }
+EXPORT_SYMBOL_GPL(uclamp_eff_value);
 
 /*
  * When a task is enqueued on a rq, the clamp bucket currently defined by the
@@ -1996,7 +2013,9 @@
 	}
 
 	uclamp_rq_inc(rq, p);
+	trace_android_rvh_enqueue_task(rq, p, flags);
 	p->sched_class->enqueue_task(rq, p, flags);
+	trace_android_rvh_after_enqueue_task(rq, p);
 
 	if (sched_core_enabled(rq))
 		sched_core_enqueue(rq, p);
@@ -2016,7 +2035,9 @@
 	}
 
 	uclamp_rq_dec(rq, p);
+	trace_android_rvh_dequeue_task(rq, p, flags);
 	p->sched_class->dequeue_task(rq, p, flags);
+	trace_android_rvh_after_dequeue_task(rq, p);
 }
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2025,6 +2046,7 @@
 
 	p->on_rq = TASK_ON_RQ_QUEUED;
 }
+EXPORT_SYMBOL_GPL(activate_task);
 
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -2032,6 +2054,7 @@
 
 	dequeue_task(rq, p, flags);
 }
+EXPORT_SYMBOL_GPL(deactivate_task);
 
 static inline int __normal_prio(int policy, int rt_prio, int nice)
 {
@@ -2124,6 +2147,7 @@
 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
 		rq_clock_skip_update(rq);
 }
+EXPORT_SYMBOL_GPL(check_preempt_curr);
 
 #ifdef CONFIG_SMP
 
@@ -2249,12 +2273,24 @@
 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
 				   struct task_struct *p, int new_cpu)
 {
+	int detached = 0;
+
 	lockdep_assert_rq_held(rq);
 
+	/*
+	 * The vendor hook may drop the lock temporarily, so
+	 * pass the rq flags to unpin lock. We expect the
+	 * rq lock to be held after return.
+	 */
+	trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached);
+	if (detached)
+		goto attach;
+
 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, new_cpu);
-	rq_unlock(rq, rf);
 
+attach:
+	rq_unlock(rq, rf);
 	rq = cpu_rq(new_cpu);
 
 	rq_lock(rq, rf);
@@ -3059,12 +3095,13 @@
 		p->se.nr_migrations++;
 		rseq_migrate(p);
 		perf_event_task_migrate(p);
+		trace_android_rvh_set_task_cpu(p, new_cpu);
 	}
 
 	__set_task_cpu(p, new_cpu);
 }
+EXPORT_SYMBOL_GPL(set_task_cpu);
 
-#ifdef CONFIG_NUMA_BALANCING
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
 	if (task_on_rq_queued(p)) {
@@ -3179,7 +3216,7 @@
 out:
 	return ret;
 }
-#endif /* CONFIG_NUMA_BALANCING */
+EXPORT_SYMBOL_GPL(migrate_swap);
 
 /*
  * wait_task_inactive - wait for a thread to unschedule.
@@ -3341,7 +3378,11 @@
 	int nid = cpu_to_node(cpu);
 	const struct cpumask *nodemask = NULL;
 	enum { cpuset, possible, fail } state = cpuset;
-	int dest_cpu;
+	int dest_cpu = -1;
+
+	trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu);
+	if (dest_cpu >= 0)
+		return dest_cpu;
 
 	/*
 	 * If the node that the CPU is on has been offlined, cpu_to_node()
@@ -3723,6 +3764,7 @@
 out:
 	rcu_read_unlock();
 }
+EXPORT_SYMBOL_GPL(wake_up_if_idle);
 
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
@@ -3762,7 +3804,12 @@
 
 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
 {
-	if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
+	bool cond = false;
+
+	trace_android_rvh_ttwu_cond(&cond);
+
+	if ((sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) ||
+			cond) {
 		if (WARN_ON_ONCE(cpu == smp_processor_id()))
 			return false;
 
@@ -4033,6 +4080,9 @@
 	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
 		goto unlock;
 
+	if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE)
+		trace_sched_blocked_reason(p);
+
 #ifdef CONFIG_SMP
 	/*
 	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
@@ -4101,6 +4151,8 @@
 	 */
 	smp_cond_load_acquire(&p->on_cpu, !VAL);
 
+	trace_android_rvh_try_to_wake_up(p);
+
 	cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
 	if (task_cpu(p) != cpu) {
 		if (p->in_iowait) {
@@ -4120,8 +4172,10 @@
 unlock:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 out:
-	if (success)
+	if (success) {
+		trace_android_rvh_try_to_wake_up_success(p);
 		ttwu_stat(p, task_cpu(p), wake_flags);
+	}
 	preempt_enable();
 
 	return success;
@@ -4231,6 +4285,8 @@
 	p->se.cfs_rq			= NULL;
 #endif
 
+	trace_android_rvh_sched_fork_init(p);
+
 #ifdef CONFIG_SCHEDSTATS
 	/* Even if schedstat is disabled, there should not be garbage */
 	memset(&p->stats, 0, sizeof(p->stats));
@@ -4365,6 +4421,8 @@
  */
 int sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
+	trace_android_rvh_sched_fork(p);
+
 	__sched_fork(clone_flags, p);
 	/*
 	 * We mark the process as NEW here. This guarantees that
@@ -4377,6 +4435,7 @@
 	 * Make sure we do not leak PI boosting priority to the child.
 	 */
 	p->prio = current->normal_prio;
+	trace_android_rvh_prepare_prio_fork(p);
 
 	uclamp_fork(p);
 
@@ -4409,6 +4468,7 @@
 		p->sched_class = &fair_sched_class;
 
 	init_entity_runnable_average(&p->se);
+	trace_android_rvh_finish_prio_fork(p);
 
 #ifdef CONFIG_SCHED_INFO
 	if (likely(sched_info_on()))
@@ -4479,6 +4539,8 @@
 	struct rq_flags rf;
 	struct rq *rq;
 
+	trace_android_rvh_wake_up_new_task(p);
+
 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 	WRITE_ONCE(p->__state, TASK_RUNNING);
 #ifdef CONFIG_SMP
@@ -4497,6 +4559,7 @@
 	rq = __task_rq_lock(p, &rf);
 	update_rq_clock(rq);
 	post_init_entity_util_avg(p);
+	trace_android_rvh_new_task_stats(p);
 
 	activate_task(rq, p, ENQUEUE_NOCLOCK);
 	trace_sched_wakeup_new(p);
@@ -4658,6 +4721,7 @@
 	.next = NULL,
 	.func = (void (*)(struct callback_head *))balance_push,
 };
+EXPORT_SYMBOL_GPL(balance_push_callback);
 
 static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
 {
@@ -4880,6 +4944,8 @@
 		if (prev->sched_class->task_dead)
 			prev->sched_class->task_dead(prev);
 
+		trace_android_rvh_flush_task(prev);
+
 		/* Task is done with its stack. */
 		put_task_stack(prev);
 
@@ -5084,6 +5150,11 @@
 	struct task_struct *p = current;
 	unsigned long flags;
 	int dest_cpu;
+	bool cond = false;
+
+	trace_android_rvh_sched_exec(&cond);
+	if (cond)
+		return;
 
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
@@ -5237,6 +5308,7 @@
 
 	rq_lock(rq, &rf);
 
+	trace_android_rvh_tick_entry(rq);
 	update_rq_clock(rq);
 	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
 	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
@@ -5256,6 +5328,8 @@
 	rq->idle_balance = idle_cpu(cpu);
 	trigger_load_balance(rq);
 #endif
+
+	trace_android_vh_scheduler_tick(rq);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -5512,6 +5586,8 @@
 	if (panic_on_warn)
 		panic("scheduling while atomic\n");
 
+	trace_android_rvh_schedule_bug(prev);
+
 	dump_stack();
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -6221,6 +6297,7 @@
 	rq->last_seen_need_resched_ns = 0;
 #endif
 
+	trace_android_rvh_schedule(prev, next, rq);
 	if (likely(prev != next)) {
 		rq->nr_switches++;
 		/*
@@ -6729,6 +6806,7 @@
 	struct rq_flags rf;
 	struct rq *rq;
 
+	trace_android_rvh_rtmutex_prepare_setprio(p, pi_task);
 	/* XXX used to be waiter->prio, not waiter->task->prio */
 	prio = __rt_effective_prio(pi_task, p->normal_prio);
 
@@ -6847,12 +6925,13 @@
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-	bool queued, running;
+	bool queued, running, allowed = false;
 	int old_prio;
 	struct rq_flags rf;
 	struct rq *rq;
 
-	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
+	trace_android_rvh_set_user_nice(p, &nice, &allowed);
+	if ((task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) && !allowed)
 		return;
 	/*
 	 * We have to be careful, if called from sys_setpriority(),
@@ -7005,6 +7084,7 @@
 
 	return 1;
 }
+EXPORT_SYMBOL_GPL(available_idle_cpu);
 
 /**
  * idle_task - return the idle task for a given CPU.
@@ -7279,6 +7359,10 @@
 		/* Normal users shall not reset the sched_reset_on_fork flag: */
 		if (p->sched_reset_on_fork && !reset_on_fork)
 			return -EPERM;
+
+		/* Can't change util-clamps */
+		if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
+			return -EPERM;
 	}
 
 	if (user) {
@@ -7297,9 +7381,6 @@
 			return retval;
 	}
 
-	if (pi)
-		cpuset_read_lock();
-
 	/*
 	 * Make sure no PI-waiters arrive (or leave) while we are
 	 * changing the priority of the task:
@@ -7374,8 +7455,6 @@
 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
 		policy = oldpolicy = -1;
 		task_rq_unlock(rq, p, &rf);
-		if (pi)
-			cpuset_read_unlock();
 		goto recheck;
 	}
 
@@ -7441,10 +7520,8 @@
 	head = splice_balance_callbacks(rq);
 	task_rq_unlock(rq, p, &rf);
 
-	if (pi) {
-		cpuset_read_unlock();
+	if (pi)
 		rt_mutex_adjust_pi(p);
-	}
 
 	/* Run balance callbacks after we've adjusted the PI chain: */
 	balance_callbacks(rq, head);
@@ -7454,8 +7531,6 @@
 
 unlock:
 	task_rq_unlock(rq, p, &rf);
-	if (pi)
-		cpuset_read_unlock();
 	return retval;
 }
 
@@ -7494,11 +7569,13 @@
 {
 	return _sched_setscheduler(p, policy, param, true);
 }
+EXPORT_SYMBOL_GPL(sched_setscheduler);
 
 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
 {
 	return __sched_setscheduler(p, attr, true, true);
 }
+EXPORT_SYMBOL_GPL(sched_setattr);
 
 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
 {
@@ -7524,6 +7601,7 @@
 {
 	return _sched_setscheduler(p, policy, param, false);
 }
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
 
 /*
  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
@@ -7585,14 +7663,9 @@
 	rcu_read_lock();
 	retval = -ESRCH;
 	p = find_process_by_pid(pid);
-	if (likely(p))
-		get_task_struct(p);
-	rcu_read_unlock();
-
-	if (likely(p)) {
+	if (p != NULL)
 		retval = sched_setscheduler(p, policy, &lparam);
-		put_task_struct(p);
-	}
+	rcu_read_unlock();
 
 	return retval;
 }
@@ -7990,6 +8063,8 @@
 		goto out_put_task;
 
 	retval = __sched_setaffinity(p, in_mask);
+	trace_android_rvh_sched_setaffinity(p, in_mask, &retval);
+
 out_put_task:
 	put_task_struct(p);
 	return retval;
@@ -8524,6 +8599,7 @@
 
 	print_worker_info(KERN_INFO, p);
 	print_stop_info(KERN_INFO, p);
+	trace_android_vh_sched_show_task(p);
 	show_stack(p, NULL, KERN_INFO);
 	put_task_stack(p);
 }
@@ -9117,6 +9193,7 @@
 	sched_core_cpu_starting(cpu);
 	sched_rq_cpu_starting(cpu);
 	sched_tick_start(cpu);
+	trace_android_rvh_sched_cpu_starting(cpu);
 	return 0;
 }
 
@@ -9190,6 +9267,8 @@
 	}
 	rq_unlock_irqrestore(rq, &rf);
 
+	trace_android_rvh_sched_cpu_dying(cpu);
+
 	calc_load_migrate(rq);
 	update_max_interval();
 	hrtick_clear(rq);
@@ -9250,7 +9329,9 @@
  * Every task in system belongs to this group at bootup.
  */
 struct task_group root_task_group;
+EXPORT_SYMBOL_GPL(root_task_group);
 LIST_HEAD(task_groups);
+EXPORT_SYMBOL_GPL(task_groups);
 
 /* Cacheline aligned slab cache for task_group */
 static struct kmem_cache *task_group_cache __read_mostly;
@@ -9535,6 +9616,8 @@
 	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
 				 preempt_disable_ip);
 
+	trace_android_rvh_schedule_bug(NULL);
+
 	dump_stack();
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -9914,6 +9997,7 @@
 	mutex_unlock(&uclamp_mutex);
 #endif
 
+	trace_android_rvh_cpu_cgroup_online(css);
 	return 0;
 }
 
@@ -9979,6 +10063,9 @@
 		if (ret)
 			break;
 	}
+
+	trace_android_rvh_cpu_cgroup_can_attach(tset, &ret);
+
 	return ret;
 }
 
@@ -9989,6 +10076,8 @@
 
 	cgroup_taskset_for_each(task, css, tset)
 		sched_move_task(task);
+
+	trace_android_rvh_cpu_cgroup_attach(tset);
 }
 
 #ifdef CONFIG_UCLAMP_TASK_GROUP
@@ -10166,6 +10255,27 @@
 	cpu_uclamp_print(sf, UCLAMP_MAX);
 	return 0;
 }
+
+static int cpu_uclamp_ls_write_u64(struct cgroup_subsys_state *css,
+				   struct cftype *cftype, u64 ls)
+{
+	struct task_group *tg;
+
+	if (ls > 1)
+		return -EINVAL;
+	tg = css_tg(css);
+	tg->latency_sensitive = (unsigned int) ls;
+
+	return 0;
+}
+
+static u64 cpu_uclamp_ls_read_u64(struct cgroup_subsys_state *css,
+				  struct cftype *cft)
+{
+	struct task_group *tg = css_tg(css);
+
+	return (u64) tg->latency_sensitive;
+}
 #endif /* CONFIG_UCLAMP_TASK_GROUP */
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -10608,6 +10718,12 @@
 		.seq_show = cpu_uclamp_max_show,
 		.write = cpu_uclamp_max_write,
 	},
+	{
+		.name = "uclamp.latency_sensitive",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.read_u64 = cpu_uclamp_ls_read_u64,
+		.write_u64 = cpu_uclamp_ls_write_u64,
+	},
 #endif
 	{ }	/* Terminate */
 };
@@ -10806,6 +10922,12 @@
 		.seq_show = cpu_uclamp_max_show,
 		.write = cpu_uclamp_max_write,
 	},
+	{
+		.name = "uclamp.latency_sensitive",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.read_u64 = cpu_uclamp_ls_read_u64,
+		.write_u64 = cpu_uclamp_ls_write_u64,
+	},
 #endif
 	{ }	/* terminate */
 };
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 7c2fe50..3d5f5a8 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -75,3 +75,4 @@
 		(policy->dvfs_possible_from_any_cpu &&
 		 rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
 }
+EXPORT_SYMBOL_GPL(cpufreq_this_cpu_can_update);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e7af188..045638e 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -12,6 +12,7 @@
 
 #include <linux/sched/cpufreq.h>
 #include <trace/events/power.h>
+#include <trace/hooks/sched.h>
 
 #define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
 
@@ -103,11 +104,17 @@
 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
 				   unsigned int next_freq)
 {
+	bool should_update = true;
+
 	if (sg_policy->need_freq_update)
 		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
 	else if (sg_policy->next_freq == next_freq)
 		return false;
 
+	trace_android_rvh_set_sugov_update(sg_policy, next_freq, &should_update);
+	if (!should_update)
+		return false;
+
 	sg_policy->next_freq = next_freq;
 	sg_policy->last_freq_update_time = time;
 
@@ -150,9 +157,14 @@
 	struct cpufreq_policy *policy = sg_policy->policy;
 	unsigned int freq = arch_scale_freq_invariant() ?
 				policy->cpuinfo.max_freq : policy->cur;
+	unsigned long next_freq = 0;
 
 	util = map_util_perf(util);
-	freq = map_util_freq(util, freq, max);
+	trace_android_vh_map_util_freq(util, freq, max, &next_freq);
+	if (next_freq)
+		freq = next_freq;
+	else
+		freq = map_util_freq(util, freq, max);
 
 	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
 		return sg_policy->next_freq;
@@ -596,6 +608,7 @@
 	if (policy->fast_switch_enabled)
 		return 0;
 
+	trace_android_vh_set_sugov_sched_attr(&attr);
 	kthread_init_work(&sg_policy->work, sugov_work);
 	kthread_init_worker(&sg_policy->worker);
 	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
@@ -835,29 +848,3 @@
 #endif
 
 cpufreq_governor_init(schedutil_gov);
-
-#ifdef CONFIG_ENERGY_MODEL
-static void rebuild_sd_workfn(struct work_struct *work)
-{
-	rebuild_sched_domains_energy();
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-				  struct cpufreq_governor *old_gov)
-{
-	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
-		/*
-		 * When called from the cpufreq_register_driver() path, the
-		 * cpu_hotplug_lock is already held, so use a work item to
-		 * avoid nested locking in rebuild_sched_domains().
-		 */
-		schedule_work(&rebuild_sd_work);
-	}
-
-}
-#endif
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d583f2a..6f5fd1d 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,29 @@
 	return cpupri;
 }
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/**
+ * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+	unsigned int cpu = cpumask_first(lowest_mask);
+	while (cpu < nr_cpu_ids) {
+		/* unlocked access */
+		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+		if (task_may_not_preempt(task, cpu)) {
+			cpumask_clear_cpu(cpu, lowest_mask);
+		}
+		cpu = cpumask_next(cpu, lowest_mask);
+	}
+}
+#endif
+
 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
-				struct cpumask *lowest_mask, int idx)
+				struct cpumask *lowest_mask, int idx,
+				bool drop_nopreempts)
 {
 	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 	int skip = 0;
@@ -103,6 +124,11 @@
 	if (lowest_mask) {
 		cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+		if (drop_nopreempts)
+			drop_nopreempt_cpus(lowest_mask);
+#endif
+
 		/*
 		 * We have to ensure that we have at least one bit
 		 * still set in the array, since the map could have
@@ -147,12 +173,16 @@
 {
 	int task_pri = convert_prio(p->prio);
 	int idx, cpu;
+	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
 
 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+retry:
+#endif
 	for (idx = 0; idx < task_pri; idx++) {
 
-		if (!__cpupri_find(cp, p, lowest_mask, idx))
+		if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
 			continue;
 
 		if (!lowest_mask || !fitness_fn)
@@ -175,6 +205,17 @@
 	}
 
 	/*
+	 * If we can't find any non-preemptible cpu's, retry so we can
+	 * find the lowest priority target and avoid priority inversion.
+	 */
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+	if (drop_nopreempts) {
+		drop_nopreempts = false;
+		goto retry;
+	}
+#endif
+
+	/*
 	 * If we failed to find a fitting lowest_mask, kick off a new search
 	 * but without taking into account any fitness criteria this time.
 	 *
@@ -196,6 +237,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpupri_find_fitness);
 
 /**
  * cpupri_set - update the CPU priority setting
@@ -314,3 +356,16 @@
 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 		free_cpumask_var(cp->pri_to_cpu[i].mask);
 }
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+	int cpu = raw_smp_processor_id();
+
+	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
+#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 9392aea..5ced2cf 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -2,7 +2,9 @@
 /*
  * Simple CPU accounting cgroup controller
  */
+#include <linux/cpufreq_times.h>
 #include "sched.h"
+#include <trace/hooks/sched.h>
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 
@@ -18,6 +20,7 @@
  * compromise in place of having locks on each irq in account_system_time.
  */
 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
 
 static int sched_clock_irqtime;
 
@@ -72,6 +75,8 @@
 		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
 	else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
 		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
+
+	trace_android_rvh_account_irq(curr, cpu, delta);
 }
 
 static u64 irqtime_tick_accounted(u64 maxtime)
@@ -130,6 +135,9 @@
 
 	/* Account for user time used */
 	acct_account_cputime(p);
+
+	/* Account power usage for user time */
+	cpufreq_acct_update_power(p, cputime);
 }
 
 /*
@@ -174,6 +182,9 @@
 
 	/* Account for system time used */
 	acct_account_cputime(p);
+
+	/* Account power usage for system time */
+	cpufreq_acct_update_power(p, cputime);
 }
 
 /*
@@ -458,6 +469,7 @@
 	*ut = cputime.utime;
 	*st = cputime.stime;
 }
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
 
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
 
@@ -628,6 +640,8 @@
 	thread_group_cputime(p, &cputime);
 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
 }
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
+
 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 7dcbaa3..d2bbc65 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -48,9 +48,10 @@
 #define SCHED_FEAT(name, enabled)	\
 	#name ,
 
-static const char * const sched_feat_names[] = {
+const char * const sched_feat_names[] = {
 #include "features.h"
 };
+EXPORT_SYMBOL_GPL(sched_feat_names);
 
 #undef SCHED_FEAT
 
@@ -79,6 +80,7 @@
 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 #include "features.h"
 };
+EXPORT_SYMBOL_GPL(sched_feat_keys);
 
 #undef SCHED_FEAT
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e476f6..17bf4caa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -22,6 +22,8 @@
  */
 #include "sched.h"
 
+#include <trace/hooks/sched.h>
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  *
@@ -36,6 +38,7 @@
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  */
 unsigned int sysctl_sched_latency			= 6000000ULL;
+EXPORT_SYMBOL_GPL(sysctl_sched_latency);
 static unsigned int normalized_sysctl_sched_latency	= 6000000ULL;
 
 /*
@@ -585,11 +588,13 @@
  */
 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+	trace_android_rvh_enqueue_entity(cfs_rq, se);
 	rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
 }
 
 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
+	trace_android_rvh_dequeue_entity(cfs_rq, se);
 	rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
 }
 
@@ -4077,7 +4082,10 @@
 
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 {
-	if (!static_branch_unlikely(&sched_asym_cpucapacity))
+	bool need_update = true;
+
+	trace_android_rvh_update_misfit_status(p, rq, &need_update);
+	if (!static_branch_unlikely(&sched_asym_cpucapacity) || !need_update)
 		return;
 
 	if (!p || p->nr_cpus_allowed == 1) {
@@ -4186,6 +4194,7 @@
 
 	/* ensure we never gain time by being placed backwards. */
 	se->vruntime = max_vruntime(se->vruntime, vruntime);
+	trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime);
 }
 
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4389,9 +4398,14 @@
 	unsigned long ideal_runtime, delta_exec;
 	struct sched_entity *se;
 	s64 delta;
+	bool skip_preempt = false;
 
 	ideal_runtime = sched_slice(cfs_rq, curr);
 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+	trace_android_rvh_check_preempt_tick(current, &ideal_runtime, &skip_preempt,
+			delta_exec, cfs_rq, curr, sysctl_sched_min_granularity);
+	if (skip_preempt)
+		return;
 	if (delta_exec > ideal_runtime) {
 		resched_curr(rq_of(cfs_rq));
 		/*
@@ -4420,8 +4434,7 @@
 		resched_curr(rq_of(cfs_rq));
 }
 
-static void
-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	clear_buddies(cfs_rq, se);
 
@@ -4457,6 +4470,7 @@
 
 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
+EXPORT_SYMBOL_GPL(set_next_entity);
 
 static int
 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
@@ -4472,7 +4486,11 @@
 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
 	struct sched_entity *left = __pick_first_entity(cfs_rq);
-	struct sched_entity *se;
+	struct sched_entity *se = NULL;
+
+	trace_android_rvh_pick_next_entity(cfs_rq, curr, &se);
+	if (se)
+		goto done;
 
 	/*
 	 * If curr is set we have to see if its left of the leftmost entity
@@ -4514,6 +4532,7 @@
 		se = cfs_rq->last;
 	}
 
+done:
 	return se;
 }
 
@@ -4576,6 +4595,7 @@
 
 	if (cfs_rq->nr_running > 1)
 		check_preempt_tick(cfs_rq, curr);
+	trace_android_rvh_entity_tick(cfs_rq, curr);
 }
 
 
@@ -5564,6 +5584,7 @@
 	struct sched_entity *se = &p->se;
 	int idle_h_nr_running = task_has_idle_policy(p);
 	int task_new = !(flags & ENQUEUE_WAKEUP);
+	int should_iowait_boost;
 
 	/*
 	 * The code below (indirectly) updates schedutil which looks at
@@ -5578,7 +5599,9 @@
 	 * utilization updates, so do it here explicitly with the IOWAIT flag
 	 * passed.
 	 */
-	if (p->in_iowait)
+	should_iowait_boost = p->in_iowait;
+	trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
+	if (should_iowait_boost)
 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
 
 	for_each_sched_entity(se) {
@@ -5600,6 +5623,7 @@
 		flags = ENQUEUE_WAKEUP;
 	}
 
+	trace_android_rvh_enqueue_task_fair(rq, p, flags);
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 
@@ -5712,6 +5736,7 @@
 		flags |= DEQUEUE_SLEEP;
 	}
 
+	trace_android_rvh_dequeue_task_fair(rq, p, flags);
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 
@@ -6649,6 +6674,7 @@
 	unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask));
 	unsigned long max_util = 0, sum_util = 0;
 	unsigned long _cpu_cap = cpu_cap;
+	unsigned long energy = 0;
 	int cpu;
 
 	_cpu_cap -= arch_scale_thermal_pressure(cpumask_first(pd_mask));
@@ -6705,7 +6731,11 @@
 		max_util = max(max_util, min(cpu_util, _cpu_cap));
 	}
 
-	return em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap);
+	trace_android_vh_em_cpu_energy(pd->em_pd, max_util, sum_util, &energy);
+	if (!energy)
+		energy = em_cpu_energy(pd->em_pd, max_util, sum_util, _cpu_cap);
+
+	return energy;
 }
 
 /*
@@ -6747,20 +6777,39 @@
  * other use-cases too. So, until someone finds a better way to solve this,
  * let's keep things simple by re-using the existing slow path.
  */
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
 {
 	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+	int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
 	int cpu, best_energy_cpu = prev_cpu, target = -1;
+	unsigned long max_spare_cap_ls = 0, target_cap;
 	unsigned long cpu_cap, util, base_energy = 0;
+	bool boosted, latency_sensitive = false;
+	unsigned int min_exit_lat = UINT_MAX;
+	struct cpuidle_state *idle;
 	struct sched_domain *sd;
 	struct perf_domain *pd;
+	int new_cpu = INT_MAX;
+
+	sync_entity_load_avg(&p->se);
+	trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
+	if (new_cpu != INT_MAX)
+		return new_cpu;
 
 	rcu_read_lock();
 	pd = rcu_dereference(rd->pd);
 	if (!pd || READ_ONCE(rd->overutilized))
 		goto unlock;
 
+	cpu = smp_processor_id();
+	if (sync && cpu_rq(cpu)->nr_running == 1 &&
+	    cpumask_test_cpu(cpu, p->cpus_ptr) &&
+	    task_fits_capacity(p, capacity_of(cpu))) {
+		rcu_read_unlock();
+		return cpu;
+	}
+
 	/*
 	 * Energy-aware wake-up happens on the lowest sched_domain starting
 	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
@@ -6773,10 +6822,13 @@
 
 	target = prev_cpu;
 
-	sync_entity_load_avg(&p->se);
 	if (!task_util_est(p))
 		goto unlock;
 
+	latency_sensitive = uclamp_latency_sensitive(p);
+	boosted = uclamp_boosted(p);
+	target_cap = boosted ? 0 : ULONG_MAX;
+
 	for (; pd; pd = pd->next) {
 		unsigned long cur_delta, spare_cap, max_spare_cap = 0;
 		bool compute_prev_delta = false;
@@ -6803,7 +6855,7 @@
 			if (!fits_capacity(util, cpu_cap))
 				continue;
 
-			if (cpu == prev_cpu) {
+			if (!latency_sensitive && cpu == prev_cpu) {
 				/* Always use prev_cpu as a candidate. */
 				compute_prev_delta = true;
 			} else if (spare_cap > max_spare_cap) {
@@ -6814,9 +6866,32 @@
 				max_spare_cap = spare_cap;
 				max_spare_cap_cpu = cpu;
 			}
+
+			if (!latency_sensitive)
+				continue;
+
+			if (idle_cpu(cpu)) {
+				cpu_cap = capacity_orig_of(cpu);
+				if (boosted && cpu_cap < target_cap)
+					continue;
+				if (!boosted && cpu_cap > target_cap)
+					continue;
+				idle = idle_get_state(cpu_rq(cpu));
+				if (idle && idle->exit_latency > min_exit_lat &&
+						cpu_cap == target_cap)
+					continue;
+
+				if (idle)
+					min_exit_lat = idle->exit_latency;
+				target_cap = cpu_cap;
+				best_idle_cpu = cpu;
+			} else if (spare_cap > max_spare_cap_ls) {
+				max_spare_cap_ls = spare_cap;
+				max_spare_cap_cpu_ls = cpu;
+			}
 		}
 
-		if (max_spare_cap_cpu < 0 && !compute_prev_delta)
+		if (!latency_sensitive && max_spare_cap_cpu < 0 && !compute_prev_delta)
 			continue;
 
 		/* Compute the 'base' energy of the pd, without @p */
@@ -6846,6 +6921,9 @@
 	}
 	rcu_read_unlock();
 
+	if (latency_sensitive)
+		return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
+
 	/*
 	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
 	 * least 6% of the energy used by prev_cpu.
@@ -6880,9 +6958,18 @@
 	int cpu = smp_processor_id();
 	int new_cpu = prev_cpu;
 	int want_affine = 0;
+	int target_cpu = -1;
 	/* SD_flags and WF_flags share the first nibble */
 	int sd_flag = wake_flags & 0xF;
 
+	if (trace_android_rvh_select_task_rq_fair_enabled() &&
+	    !(sd_flag & SD_BALANCE_FORK))
+		sync_entity_load_avg(&p->se);
+	trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
+			wake_flags, &target_cpu);
+	if (target_cpu >= 0)
+		return target_cpu;
+
 	/*
 	 * required for stable ->cpus_allowed
 	 */
@@ -6891,7 +6978,7 @@
 		record_wakee(p);
 
 		if (sched_energy_enabled()) {
-			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
 			if (new_cpu >= 0)
 				return new_cpu;
 			new_cpu = prev_cpu;
@@ -7100,9 +7187,14 @@
 	int scale = cfs_rq->nr_running >= sched_nr_latency;
 	int next_buddy_marked = 0;
 	int cse_is_idle, pse_is_idle;
+	bool ignore = false;
+	bool preempt = false;
 
 	if (unlikely(se == pse))
 		return;
+	trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore);
+	if (ignore)
+		return;
 
 	/*
 	 * This is possible from callers such as attach_tasks(), in which we
@@ -7159,6 +7251,13 @@
 		return;
 
 	update_curr(cfs_rq_of(se));
+	trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &ignore,
+			wake_flags, se, pse, next_buddy_marked, sysctl_sched_wakeup_granularity);
+	if (preempt)
+		goto preempt;
+	if (ignore)
+		return;
+
 	if (wakeup_preempt_entity(se, pse) == 1) {
 		/*
 		 * Bias pick_next to pick the sched entity that is
@@ -7226,9 +7325,10 @@
 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
 	struct cfs_rq *cfs_rq = &rq->cfs;
-	struct sched_entity *se;
-	struct task_struct *p;
+	struct sched_entity *se = NULL;
+	struct task_struct *p = NULL;
 	int new_tasks;
+	bool repick = false;
 
 again:
 	if (!sched_fair_runnable(rq))
@@ -7282,7 +7382,7 @@
 	} while (cfs_rq);
 
 	p = task_of(se);
-
+	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev);
 	/*
 	 * Since we haven't yet done put_prev_entity and if the selected task
 	 * is a different task than we started out with, try and touch the
@@ -7315,6 +7415,10 @@
 	if (prev)
 		put_prev_task(rq, prev);
 
+	trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev);
+	if (repick)
+		goto done;
+
 	do {
 		se = pick_next_entity(cfs_rq, NULL);
 		set_next_entity(cfs_rq, se);
@@ -7556,7 +7660,8 @@
  *      rewrite all of this once again.]
  */
 
-static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+unsigned long __read_mostly max_load_balance_interval = HZ/10;
+EXPORT_SYMBOL_GPL(max_load_balance_interval);
 
 enum fbq_type { regular, remote, all };
 
@@ -7636,6 +7741,7 @@
 	enum fbq_type		fbq_type;
 	enum migration_type	migration_type;
 	struct list_head	tasks;
+	struct rq_flags		*src_rq_rf;
 };
 
 /*
@@ -7750,9 +7856,14 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
+	int can_migrate = 1;
 
 	lockdep_assert_rq_held(env->src_rq);
 
+	trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
+	if (!can_migrate)
+		return 0;
+
 	/*
 	 * We do not migrate tasks that are:
 	 * 1) throttled_lb_pair, or
@@ -7840,8 +7951,20 @@
  */
 static void detach_task(struct task_struct *p, struct lb_env *env)
 {
+	int detached = 0;
+
 	lockdep_assert_rq_held(env->src_rq);
 
+	/*
+	 * The vendor hook may drop the lock temporarily, so
+	 * pass the rq flags to unpin lock. We expect the
+	 * rq lock to be held after return.
+	 */
+	trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
+					      env->dst_cpu, &detached);
+	if (detached)
+		return;
+
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, env->dst_cpu);
 }
@@ -8367,6 +8490,7 @@
 	if (!capacity)
 		capacity = 1;
 
+	trace_android_rvh_update_cpu_capacity(cpu, &capacity);
 	cpu_rq(cpu)->cpu_capacity = capacity;
 	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
 
@@ -9491,8 +9615,12 @@
 
 	if (sched_energy_enabled()) {
 		struct root_domain *rd = env->dst_rq->rd;
+		int out_balance = 1;
 
-		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+		trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
+					&out_balance);
+		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
+					&& out_balance)
 			goto out_balanced;
 	}
 
@@ -9611,7 +9739,12 @@
 	struct rq *busiest = NULL, *rq;
 	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
 	unsigned int busiest_nr = 0;
-	int i;
+	int i, done = 0;
+
+	trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
+					     &busiest, &done);
+	if (done)
+		return busiest;
 
 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		unsigned long capacity, load, util;
@@ -9908,6 +10041,7 @@
 
 more_balance:
 		rq_lock_irqsave(busiest, &rf);
+		env.src_rq_rf = &rf;
 		update_rq_clock(busiest);
 
 		/*
@@ -10201,6 +10335,7 @@
 			.src_rq		= busiest_rq,
 			.idle		= CPU_IDLE,
 			.flags		= LBF_ACTIVE_LB,
+			.src_rq_rf	= &rf,
 		};
 
 		schedstat_inc(sd->alb_count);
@@ -10432,6 +10567,7 @@
 	struct sched_domain *sd;
 	int nr_busy, i, cpu = rq->cpu;
 	unsigned int flags = 0;
+	int done = 0;
 
 	if (unlikely(rq->idle_balance))
 		return;
@@ -10456,6 +10592,10 @@
 	if (time_before(now, nohz.next_balance))
 		goto out;
 
+	trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
+	if (done)
+		goto out;
+
 	if (rq->nr_running >= 2) {
 		flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
 		goto out;
@@ -10862,6 +11002,11 @@
 	u64 t0, t1, curr_cost = 0;
 	struct sched_domain *sd;
 	int pulled_task = 0;
+	int done = 0;
+
+	trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
+	if (done)
+		return pulled_task;
 
 	update_misfit_status(NULL, this_rq);
 
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index a554e3b..3734bc6 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -306,6 +306,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
 
 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b48baab..5ea227d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,8 @@
 
 #include "pelt.h"
 
+#include <trace/hooks/sched.h>
+
 int sched_rr_timeslice = RR_TIMESLICE;
 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
 /* More than 4 hours if BW_SHIFT equals 20. */
@@ -978,6 +980,13 @@
 		if (likely(rt_b->rt_runtime)) {
 			rt_rq->rt_throttled = 1;
 			printk_deferred_once("sched: RT throttling activated\n");
+
+			trace_android_vh_dump_throttled_rt_tasks(
+				raw_smp_processor_id(),
+				rq_clock(rq_of_rt_rq(rt_rq)),
+				sched_rt_period(rt_rq),
+				runtime,
+				hrtimer_get_expires_ns(&rt_b->rt_period_timer));
 		} else {
 			/*
 			 * In case we did anyway, make it go away,
@@ -1550,12 +1559,40 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	__u32 softirqs = per_cpu(active_softirqs, cpu) |
+			local_softirq_pending();
+
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+	return ((softirqs & LONG_SOFTIRQ_MASK) &&
+		(task == cpu_ksoftirqd ||
+		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+EXPORT_SYMBOL_GPL(task_may_not_preempt);
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
+
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
 {
 	struct task_struct *curr;
 	struct rq *rq;
 	bool test;
+	int target_cpu = -1;
+	bool may_not_preempt;
+
+	trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
+					flags, &target_cpu);
+	if (target_cpu >= 0)
+		return target_cpu;
 
 	/* For anything but wake ups, just return the task_cpu */
 	if (!(flags & (WF_TTWU | WF_FORK)))
@@ -1567,7 +1604,12 @@
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 
 	/*
-	 * If the current task on @p's runqueue is an RT task, then
+	 * If the current task on @p's runqueue is a softirq task,
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
 	 * runqueue. Otherwise simply start this RT task
 	 * on its current runqueue.
@@ -1592,9 +1634,10 @@
 	 * requirement of the task - which is only important on heterogeneous
 	 * systems like big.LITTLE.
 	 */
-	test = curr &&
-	       unlikely(rt_task(curr)) &&
-	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
+	may_not_preempt = task_may_not_preempt(curr, cpu);
+	test = (curr && (may_not_preempt ||
+			 (unlikely(rt_task(curr)) &&
+			  (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
 
 	if (test || !rt_task_fits_capacity(p, cpu)) {
 		int target = find_lowest_rq(p);
@@ -1607,11 +1650,14 @@
 			goto out_unlock;
 
 		/*
-		 * Don't bother moving it if the destination CPU is
+		 * If cpu is non-preemptible, prefer remote cpu
+		 * even if it's running a higher-prio task.
+		 * Otherwise: Don't bother moving it if the destination CPU is
 		 * not running a lower priority task.
 		 */
 		if (target != -1 &&
-		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
+		    (may_not_preempt ||
+		     p->prio < cpu_rq(target)->rt.highest_prio.curr))
 			cpu = target;
 	}
 
@@ -1652,6 +1698,8 @@
 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
 {
 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
+		int done = 0;
+
 		/*
 		 * This is OK, because current is on_cpu, which avoids it being
 		 * picked for load-balance and preemption/IRQs are still
@@ -1659,7 +1707,9 @@
 		 * not yet started the picking loop.
 		 */
 		rq_unpin_lock(rq, rf);
-		pull_rt_task(rq);
+		trace_android_rvh_sched_balance_rt(rq, p, &done);
+		if (!done)
+			pull_rt_task(rq);
 		rq_repin_lock(rq, rf);
 	}
 
@@ -1812,7 +1862,7 @@
  * Return the highest pushable rq's task, which is suitable to be executed
  * on the CPU, NULL otherwise
  */
-static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
+struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
 {
 	struct plist_head *head = &rq->rt.pushable_tasks;
 	struct task_struct *p;
@@ -1827,6 +1877,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
@@ -1835,7 +1886,7 @@
 	struct sched_domain *sd;
 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
 	int this_cpu = smp_processor_id();
-	int cpu      = task_cpu(task);
+	int cpu      = -1;
 	int ret;
 
 	/* Make sure the mask is initialized first */
@@ -1860,9 +1911,15 @@
 				  task, lowest_mask);
 	}
 
+	trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
+	if (cpu >= 0)
+		return cpu;
+
 	if (!ret)
 		return -1; /* No targets found */
 
+	cpu = task_cpu(task);
+
 	/*
 	 * At this point we have built a mask of CPUs representing the
 	 * lowest priority tasks in the system.  Now we want to elect
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0e66749..c441761 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -66,6 +66,7 @@
 #include <linux/syscalls.h>
 #include <linux/task_work.h>
 #include <linux/tsacct_kern.h>
+#include <linux/android_vendor.h>
 
 #include <asm/tlb.h>
 
@@ -438,6 +439,10 @@
 	struct uclamp_se	uclamp_req[UCLAMP_CNT];
 	/* Effective clamp values used for a task group */
 	struct uclamp_se	uclamp[UCLAMP_CNT];
+	/* Latency-sensitive flag used for a task group */
+	unsigned int		latency_sensitive;
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 4);
 #endif
 
 };
@@ -859,6 +864,8 @@
 	 * CPUs of the rd. Protected by RCU.
 	 */
 	struct perf_domain __rcu *pd;
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 4);
 };
 
 extern void init_defrootdomain(void);
@@ -870,6 +877,7 @@
 #ifdef HAVE_RT_PUSH_IPI
 extern void rto_push_irq_work_func(struct irq_work *work);
 #endif
+extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_UCLAMP_TASK
@@ -1114,6 +1122,8 @@
 	unsigned char		core_forceidle;
 	unsigned int		core_forceidle_seq;
 #endif
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 96);
 };
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1684,8 +1694,6 @@
 };
 extern void sched_setnuma(struct task_struct *p, int node);
 extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
-			int cpu, int scpu);
 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
 #else
 static inline void
@@ -1696,6 +1704,9 @@
 
 #ifdef CONFIG_SMP
 
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+			int cpu, int scpu);
+
 static inline void
 queue_balance_callback(struct rq *rq,
 		       struct callback_head *head,
@@ -1957,6 +1968,8 @@
 #undef SCHED_FEAT
 
 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
+
 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 
 #else /* !CONFIG_JUMP_LABEL */
@@ -2258,6 +2271,7 @@
 
 extern int push_cpu_stop(void *arg);
 
+extern unsigned long __read_mostly max_load_balance_interval;
 #endif
 
 #ifdef CONFIG_CPU_IDLE
@@ -2869,6 +2883,11 @@
 	return clamp(util, min_util, max_util);
 }
 
+static inline bool uclamp_boosted(struct task_struct *p)
+{
+	return uclamp_eff_value(p, UCLAMP_MIN) > 0;
+}
+
 /*
  * When uclamp is compiled in, the aggregation at rq level is 'turned off'
  * by default in the fast path and only gets turned on once userspace performs
@@ -2889,12 +2908,36 @@
 	return util;
 }
 
+static inline bool uclamp_boosted(struct task_struct *p)
+{
+	return false;
+}
+
 static inline bool uclamp_is_used(void)
 {
 	return false;
 }
 #endif /* CONFIG_UCLAMP_TASK */
 
+#ifdef CONFIG_UCLAMP_TASK_GROUP
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
+{
+	struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id);
+	struct task_group *tg;
+
+	if (!css)
+		return false;
+	tg = container_of(css, struct task_group, css);
+
+	return tg->latency_sensitive;
+}
+#else
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
+{
+	return false;
+}
+#endif /* CONFIG_UCLAMP_TASK_GROUP */
+
 #ifdef arch_scale_freq_capacity
 # ifndef arch_scale_freq_invariant
 #  define arch_scale_freq_invariant()	true
@@ -3054,3 +3097,14 @@
 extern void sched_dynamic_update(int mode);
 #endif
 
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+#else
+static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	return false;
+}
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d201a70..72a52e2 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -4,6 +4,8 @@
  */
 #include "sched.h"
 
+#include <trace/hooks/sched.h>
+
 DEFINE_MUTEX(sched_domains_mutex);
 
 /* Protected by sched_domains_mutex: */
@@ -327,8 +329,7 @@
  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
  *    3. no SMT is detected.
  *    4. the EM complexity is low enough to keep scheduling overheads low;
- *    5. schedutil is driving the frequency of all CPUs of the rd;
- *    6. frequency invariance support is present;
+ *    5. frequency invariance support is present;
  *
  * The complexity of the Energy Model is defined as:
  *
@@ -348,21 +349,23 @@
  */
 #define EM_MAX_COMPLEXITY 2048
 
-extern struct cpufreq_governor schedutil_gov;
 static bool build_perf_domains(const struct cpumask *cpu_map)
 {
 	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
 	struct perf_domain *pd = NULL, *tmp;
 	int cpu = cpumask_first(cpu_map);
 	struct root_domain *rd = cpu_rq(cpu)->rd;
-	struct cpufreq_policy *policy;
-	struct cpufreq_governor *gov;
+	bool eas_check = false;
 
 	if (!sysctl_sched_energy_aware)
 		goto free;
 
-	/* EAS is enabled for asymmetric CPU capacity topologies. */
-	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
+	/*
+	 * EAS is enabled for asymmetric CPU capacity topologies.
+	 * Allow vendor to override if desired.
+	 */
+	trace_android_rvh_build_perf_domains(&eas_check);
+	if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) {
 		if (sched_debug()) {
 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
 					cpumask_pr_args(cpu_map));
@@ -390,19 +393,6 @@
 		if (find_pd(pd, i))
 			continue;
 
-		/* Do not attempt EAS if schedutil is not being used. */
-		policy = cpufreq_cpu_get(i);
-		if (!policy)
-			goto free;
-		gov = policy->governor;
-		cpufreq_cpu_put(policy);
-		if (gov != &schedutil_gov) {
-			if (rd->pd)
-				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
-						cpumask_pr_args(cpu_map));
-			goto free;
-		}
-
 		/* Create the new pd and add it to the local list. */
 		tmp = pd_init(i);
 		if (!tmp)
@@ -2274,6 +2264,7 @@
 		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
 	}
+	trace_android_vh_build_sched_domains(has_asym);
 
 	ret = 0;
 error:
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 41f4709..5b49d16 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -33,6 +33,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
+
 /*
    - No shared variables, all the data are CPU local.
    - If a softirq needs serialization, let it serialize itself
@@ -59,6 +61,14 @@
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
+
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
 
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
@@ -80,21 +90,6 @@
 		wake_up_process(tsk);
 }
 
-/*
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness,
- * unless we're doing some of the synchronous softirqs.
- */
-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
-static bool ksoftirqd_running(unsigned long pending)
-{
-	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
-
-	if (pending & SOFTIRQ_NOW_MASK)
-		return false;
-	return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
-}
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 DEFINE_PER_CPU(int, hardirqs_enabled);
 DEFINE_PER_CPU(int, hardirq_context);
@@ -236,7 +231,7 @@
 		goto out;
 
 	pending = local_softirq_pending();
-	if (!pending || ksoftirqd_running(pending))
+	if (!pending)
 		goto out;
 
 	/*
@@ -419,9 +414,6 @@
 
 static inline void invoke_softirq(void)
 {
-	if (ksoftirqd_running(local_softirq_pending()))
-		return;
-
 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
@@ -455,7 +447,7 @@
 
 	pending = local_softirq_pending();
 
-	if (pending && !ksoftirqd_running(pending))
+	if (pending)
 		do_softirq_own_stack();
 
 	local_irq_restore(flags);
@@ -512,6 +504,17 @@
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
+static inline __u32 softirq_deferred_for_rt(__u32 *pending)
+{
+	__u32 deferred = 0;
+
+	if (cpupri_check_rt()) {
+		deferred = *pending & LONG_SOFTIRQ_MASK;
+		*pending &= ~LONG_SOFTIRQ_MASK;
+	}
+	return deferred;
+}
+
 asmlinkage __visible void __softirq_entry __do_softirq(void)
 {
 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -519,6 +522,7 @@
 	int max_restart = MAX_SOFTIRQ_RESTART;
 	struct softirq_action *h;
 	bool in_hardirq;
+	__u32 deferred;
 	__u32 pending;
 	int softirq_bit;
 
@@ -531,13 +535,15 @@
 
 	pending = local_softirq_pending();
 
+	deferred = softirq_deferred_for_rt(&pending);
 	softirq_handle_begin();
 	in_hardirq = lockdep_softirq_start();
 	account_softirq_enter(current);
 
 restart:
 	/* Reset the pending bitmask before enabling irqs */
-	set_softirq_pending(0);
+	set_softirq_pending(deferred);
+	__this_cpu_write(active_softirqs, pending);
 
 	local_irq_enable();
 
@@ -567,6 +573,7 @@
 		pending >>= softirq_bit;
 	}
 
+	__this_cpu_write(active_softirqs, 0);
 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
 	    __this_cpu_read(ksoftirqd) == current)
 		rcu_softirq_qs();
@@ -574,14 +581,17 @@
 	local_irq_disable();
 
 	pending = local_softirq_pending();
+	deferred = softirq_deferred_for_rt(&pending);
+
 	if (pending) {
 		if (time_before(jiffies, end) && !need_resched() &&
 		    --max_restart)
 			goto restart;
-
-		wakeup_softirqd();
 	}
 
+	if (pending | deferred)
+		wakeup_softirqd();
+
 	account_softirq_exit(current);
 	lockdep_softirq_end(in_hardirq);
 	softirq_handle_end();
@@ -780,10 +790,15 @@
 		if (tasklet_trylock(t)) {
 			if (!atomic_read(&t->count)) {
 				if (tasklet_clear_sched(t)) {
-					if (t->use_callback)
+					if (t->use_callback) {
+						trace_tasklet_entry(t->callback);
 						t->callback(t);
-					else
+						trace_tasklet_exit(t->callback);
+					} else {
+						trace_tasklet_entry(t->func);
 						t->func(t->data);
+						trace_tasklet_exit(t->func);
+					}
 				}
 				tasklet_unlock(t);
 				continue;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index cbc3027..5e0e2c3 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -387,6 +387,7 @@
 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
 	return cpu_stop_queue_work(cpu, work_buf);
 }
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
 
 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
 				 cpu_stop_fn_t fn, void *arg,
diff --git a/kernel/sys.c b/kernel/sys.c
index 8fdac0d..9dd3aa3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -42,6 +42,8 @@
 #include <linux/version.h>
 #include <linux/ctype.h>
 #include <linux/syscall_user_dispatch.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -74,6 +76,8 @@
 
 #include "uid16.h"
 
+#include <trace/hooks/sys.h>
+
 #ifndef SET_UNALIGN_CTL
 # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
 #endif
@@ -2259,6 +2263,153 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+		struct vm_area_struct **prev,
+		unsigned long start, unsigned long end,
+		const char __user *name_addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int error = 0;
+	pgoff_t pgoff;
+
+	if (name_addr == vma_get_anon_name(vma)) {
+		*prev = vma;
+		goto out;
+	}
+
+	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+	*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+				vma->vm_file, pgoff, vma_policy(vma),
+				vma->vm_userfaultfd_ctx, name_addr);
+	if (*prev) {
+		vma = *prev;
+		goto success;
+	}
+
+	*prev = vma;
+
+	if (start != vma->vm_start) {
+		error = split_vma(mm, vma, start, 1);
+		if (error)
+			goto out;
+	}
+
+	if (end != vma->vm_end) {
+		error = split_vma(mm, vma, end, 0);
+		if (error)
+			goto out;
+	}
+
+success:
+	if (!vma->vm_file)
+		vma->anon_name = name_addr;
+
+out:
+	if (error == -ENOMEM)
+		error = -EAGAIN;
+	return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+			unsigned long arg)
+{
+	unsigned long tmp;
+	struct vm_area_struct *vma, *prev;
+	int unmapped_error = 0;
+	int error = -EINVAL;
+
+	/*
+	 * If the interval [start,end) covers some unmapped address
+	 * ranges, just ignore them, but return -ENOMEM at the end.
+	 * - this matches the handling in madvise.
+	 */
+	vma = find_vma_prev(current->mm, start, &prev);
+	if (vma && start > vma->vm_start)
+		prev = vma;
+
+	for (;;) {
+		/* Still start < end. */
+		error = -ENOMEM;
+		if (!vma)
+			return error;
+
+		/* Here start < (end|vma->vm_end). */
+		if (start < vma->vm_start) {
+			unmapped_error = -ENOMEM;
+			start = vma->vm_start;
+			if (start >= end)
+				return error;
+		}
+
+		/* Here vma->vm_start <= start < (end|vma->vm_end) */
+		tmp = vma->vm_end;
+		if (end < tmp)
+			tmp = end;
+
+		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+		error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+				(const char __user *)arg);
+		if (error)
+			return error;
+		start = tmp;
+		if (prev && start < prev->vm_end)
+			start = prev->vm_end;
+		error = unmapped_error;
+		if (start >= end)
+			return error;
+		if (prev)
+			vma = prev->vm_next;
+		else	/* madvise_remove dropped mmap_lock */
+			vma = find_vma(current->mm, start);
+	}
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	struct mm_struct *mm = current->mm;
+	int error;
+	unsigned long len;
+	unsigned long end;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+	/* Check to see whether len was rounded up from small -ve to zero */
+	if (len_in && !len)
+		return -EINVAL;
+
+	end = start + len;
+	if (end < start)
+		return -EINVAL;
+
+	if (end == start)
+		return 0;
+
+	mmap_write_lock(mm);
+
+	switch (opt) {
+	case PR_SET_VMA_ANON_NAME:
+		error = prctl_set_vma_anon_name(start, end, arg);
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+	mmap_write_unlock(mm);
+
+	return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2473,6 +2624,9 @@
 			return -EINVAL;
 		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
 		break;
+	case PR_SET_VMA:
+		error = prctl_set_vma(arg2, arg3, arg4, arg5);
+		break;
 	case PR_PAC_RESET_KEYS:
 		if (arg3 || arg4 || arg5)
 			return -EINVAL;
@@ -2534,6 +2688,7 @@
 		error = -EINVAL;
 		break;
 	}
+	trace_android_vh_syscall_prctl_finished(option, me);
 	return error;
 }
 
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index b1b9b12..d5ab3fb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -16,6 +16,7 @@
 #include <linux/sched_clock.h>
 #include <linux/seqlock.h>
 #include <linux/bitops.h>
+#include <trace/hooks/epoch.h>
 
 #include "timekeeping.h"
 
@@ -270,6 +271,7 @@
 	update_sched_clock();
 	hrtimer_cancel(&sched_clock_timer);
 	rd->read_sched_clock = suspended_sched_clock_read;
+	trace_android_vh_show_suspend_epoch_val(rd->epoch_ns, rd->epoch_cyc);
 
 	return 0;
 }
@@ -281,6 +283,7 @@
 	rd->epoch_cyc = cd.actual_read_sched_clock();
 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
 	rd->read_sched_clock = cd.actual_read_sched_clock;
+	trace_android_vh_show_resume_epoch_val(rd->epoch_cyc);
 }
 
 static struct syscore_ops sched_clock_ops = {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4678935..5c47cc5 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <trace/events/power.h>
+#include <trace/hooks/sched.h>
 
 #include <asm/irq_regs.h>
 
@@ -95,6 +96,7 @@
 		write_seqcount_end(&jiffies_seq);
 		raw_spin_unlock(&jiffies_lock);
 		update_wall_time();
+		trace_android_vh_jiffies_update(NULL);
 	}
 
 	update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 17a283c..18866cd 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -26,6 +26,7 @@
 #include <linux/posix-timers.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
+#include <trace/hooks/sched.h>
 
 #include <asm/irq_regs.h>
 
@@ -193,8 +194,10 @@
 #endif
 
 	/* Check, if the jiffies need an update */
-	if (tick_do_timer_cpu == cpu)
+	if (tick_do_timer_cpu == cpu) {
 		tick_do_update_jiffies64(now);
+		trace_android_vh_jiffies_update(NULL);
+	}
 
 	if (ts->inidle)
 		ts->got_idle_tick = 1;
@@ -1202,6 +1205,7 @@
 
 	return ktime_sub(next_event, now);
 }
+EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
 
 /**
  * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
@@ -1215,6 +1219,7 @@
 
 	return ts->idle_calls;
 }
+EXPORT_SYMBOL_GPL(tick_nohz_get_idle_calls_cpu);
 
 /**
  * tick_nohz_get_idle_calls - return the current idle calls counter value
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 29923b2..ecc59b45f 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -686,6 +686,7 @@
 	return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 #endif
 }
+EXPORT_SYMBOL_GPL(nsec_to_clock_t);
 
 u64 jiffies64_to_nsecs(u64 j)
 {
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021..4c54b37 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -55,6 +55,8 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/timer.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/timer.h>
 
 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 
@@ -503,6 +505,7 @@
 	 * Round up with level granularity to prevent this.
 	 */
 	expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+	trace_android_vh_timer_calc_index(lvl, &expires);
 	*bucket_expiry = expires << LVL_SHIFT(lvl);
 	return LVL_OFFS(lvl) + (expires & LVL_MASK);
 }
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 21bb161..3ca551c 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -18,4 +18,6 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
 EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
-
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(clock_set_rate);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88de94da..695eafa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -49,6 +49,7 @@
 #include <linux/fsnotify.h>
 #include <linux/irq_work.h>
 #include <linux/workqueue.h>
+#include <trace/hooks/ftrace_dump.h>
 
 #include "trace.h"
 #include "trace_output.h"
@@ -9713,8 +9714,17 @@
 static int trace_panic_handler(struct notifier_block *this,
 			       unsigned long event, void *unused)
 {
+	bool ftrace_check = false;
+
+	trace_android_vh_ftrace_oops_enter(&ftrace_check);
+
+	if (ftrace_check)
+		return NOTIFY_OK;
+
 	if (ftrace_dump_on_oops)
 		ftrace_dump(ftrace_dump_on_oops);
+
+	trace_android_vh_ftrace_oops_exit(&ftrace_check);
 	return NOTIFY_OK;
 }
 
@@ -9728,6 +9738,13 @@
 			     unsigned long val,
 			     void *data)
 {
+	bool ftrace_check = false;
+
+	trace_android_vh_ftrace_oops_enter(&ftrace_check);
+
+	if (ftrace_check)
+		return NOTIFY_OK;
+
 	switch (val) {
 	case DIE_OOPS:
 		if (ftrace_dump_on_oops)
@@ -9736,6 +9753,8 @@
 	default:
 		break;
 	}
+
+	trace_android_vh_ftrace_oops_exit(&ftrace_check);
 	return NOTIFY_OK;
 }
 
@@ -9760,6 +9779,8 @@
 void
 trace_printk_seq(struct trace_seq *s)
 {
+	bool dump_printk = true;
+
 	/* Probably should print a warning here. */
 	if (s->seq.len >= TRACE_MAX_PRINT)
 		s->seq.len = TRACE_MAX_PRINT;
@@ -9775,7 +9796,9 @@
 	/* should be zero ended, but we are paranoid. */
 	s->buffer[s->seq.len] = 0;
 
-	printk(KERN_TRACE "%s", s->buffer);
+	trace_android_vh_ftrace_dump_buffer(s, &dump_printk);
+	if (dump_printk)
+		printk(KERN_TRACE "%s", s->buffer);
 
 	trace_seq_init(s);
 }
@@ -9808,6 +9831,8 @@
 	unsigned int old_userobj;
 	unsigned long flags;
 	int cnt = 0, cpu;
+	bool ftrace_check = false;
+	unsigned long size;
 
 	/* Only allow one dump user at a time. */
 	if (atomic_inc_return(&dump_running) != 1) {
@@ -9837,6 +9862,8 @@
 
 	for_each_tracing_cpu(cpu) {
 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
+		size = ring_buffer_size(iter.array_buffer->buffer, cpu);
+		trace_android_vh_ftrace_size_check(size, &ftrace_check);
 	}
 
 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -9844,6 +9871,9 @@
 	/* don't look at user memory in panic mode */
 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
 
+	if (ftrace_check)
+		goto out_enable;
+
 	switch (oops_dump_mode) {
 	case DUMP_ALL:
 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
@@ -9874,6 +9904,7 @@
 	 */
 
 	while (!trace_empty(&iter)) {
+		ftrace_check = true;
 
 		if (!cnt)
 			printk(KERN_TRACE "---------------------------------\n");
@@ -9881,7 +9912,9 @@
 		cnt++;
 
 		trace_iterator_reset(&iter);
-		iter.iter_flags |= TRACE_FILE_LAT_FMT;
+		trace_android_vh_ftrace_format_check(&ftrace_check);
+		if (ftrace_check)
+			iter.iter_flags |= TRACE_FILE_LAT_FMT;
 
 		if (trace_find_next_entry_inc(&iter) != NULL) {
 			int ret;
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index f493804..c881e5a 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -14,6 +14,8 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/preemptirq.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/preemptirq.h>
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 /* Per-cpu variable to prevent redundant calls when IRQs already off */
@@ -28,8 +30,11 @@
 void trace_hardirqs_on_prepare(void)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      CALLER_ADDR1);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -40,8 +45,11 @@
 void trace_hardirqs_on(void)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      CALLER_ADDR1);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -63,8 +71,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_disable(CALLER_ADDR0,
+						       CALLER_ADDR1);
+		}
 	}
 
 }
@@ -78,8 +89,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_disable(CALLER_ADDR0,
+						       CALLER_ADDR1);
+		}
 	}
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
@@ -88,8 +102,11 @@
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      caller_addr);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -107,8 +124,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      caller_addr);
+		}
 	}
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -119,15 +139,19 @@
 
 void trace_preempt_on(unsigned long a0, unsigned long a1)
 {
-	if (!in_nmi())
+	if (!in_nmi()) {
 		trace_preempt_enable_rcuidle(a0, a1);
+		trace_android_rvh_preempt_enable(a0, a1);
+	}
 	tracer_preempt_on(a0, a1);
 }
 
 void trace_preempt_off(unsigned long a0, unsigned long a1)
 {
-	if (!in_nmi())
+	if (!in_nmi()) {
 		trace_preempt_disable_rcuidle(a0, a1);
+		trace_android_rvh_preempt_disable(a0, a1);
+	}
 	tracer_preempt_off(a0, a1);
 }
 #endif
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 64ea283f..d05628d 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -786,3 +786,82 @@
 	}
 }
 #endif
+
+#ifdef CONFIG_ANDROID_VENDOR_HOOKS
+
+static void *rvh_zalloc_funcs(int count)
+{
+	return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
+}
+
+#define ANDROID_RVH_NR_PROBES_MAX	2
+static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
+{
+	int i;
+
+	if (!static_key_enabled(&tp->key)) {
+		/* '+ 1' for the last NULL element */
+		tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
+		if (!tp->funcs)
+			return ENOMEM;
+	}
+
+	for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
+		if (!tp->funcs[i].func) {
+			if (!static_key_enabled(&tp->key))
+				tp->funcs[i].data = func->data;
+			WRITE_ONCE(tp->funcs[i].func, func->func);
+
+			return 0;
+		}
+	}
+
+	return -EBUSY;
+}
+
+static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
+{
+	int ret;
+
+	if (tp->regfunc && !static_key_enabled(&tp->key)) {
+		ret = tp->regfunc();
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = rvh_func_add(tp, func);
+	if (ret)
+		return ret;
+	tracepoint_update_call(tp, tp->funcs);
+	static_key_enable(&tp->key);
+
+	return 0;
+}
+
+int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
+{
+	struct tracepoint_func tp_func;
+	int ret;
+
+	/*
+	 * Once the static key has been flipped, the array may be read
+	 * concurrently. Although __traceiter_*()  always checks .func first,
+	 * it doesn't enforce read->read dependencies, and we can't strongly
+	 * guarantee it will see the correct .data for the second element
+	 * without adding smp_load_acquire() in the fast path. But this is a
+	 * corner case which is unlikely to be needed by anybody in practice,
+	 * so let's just forbid it and keep the fast path clean.
+	 */
+	if (WARN_ON(static_key_enabled(&tp->key) && data))
+		return -EINVAL;
+
+	mutex_lock(&tracepoints_mutex);
+	tp_func.func = probe;
+	tp_func.data = data;
+	ret = android_rvh_add_func(tp, &tp_func);
+	mutex_unlock(&tracepoints_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(android_rvh_probe_register);
+#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 613917b..c346adb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -54,6 +54,10 @@
 
 #include "workqueue_internal.h"
 
+#include <trace/hooks/wqlockup.h>
+/* events/workqueue.h uses default TRACE_INCLUDE_PATH */
+#undef TRACE_INCLUDE_PATH
+
 enum {
 	/*
 	 * worker_pool flags
@@ -5890,6 +5894,7 @@
 			pr_cont_pool_info(pool);
 			pr_cont(" stuck for %us!\n",
 				jiffies_to_msecs(now - pool_ts) / 1000);
+			trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
 		}
 	}
 
diff --git a/mm/OWNERS b/mm/OWNERS
new file mode 100644
index 0000000..02326c9
--- /dev/null
+++ b/mm/OWNERS
@@ -0,0 +1,4 @@
+hridya@google.com
+kaleshsingh@google.com
+surenb@google.com
+minchan@google.com
diff --git a/mm/cma.c b/mm/cma.c
index bc9ca8f..cf730c0 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -24,6 +24,7 @@
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/log2.h>
@@ -31,6 +32,8 @@
 #include <linux/highmem.h>
 #include <linux/io.h>
 #include <linux/kmemleak.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
 #include <trace/events/cma.h>
 
 #include "cma.h"
@@ -52,6 +55,7 @@
 {
 	return cma->name;
 }
+EXPORT_SYMBOL_GPL(cma_get_name);
 
 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
 					     unsigned int align_order)
@@ -433,6 +437,8 @@
 	unsigned long i;
 	struct page *page = NULL;
 	int ret = -ENOMEM;
+	int num_attempts = 0;
+	int max_retries = 5;
 
 	if (!cma || !cma->count || !cma->bitmap)
 		goto out;
@@ -459,8 +465,28 @@
 				bitmap_maxno, start, bitmap_count, mask,
 				offset);
 		if (bitmap_no >= bitmap_maxno) {
-			spin_unlock_irq(&cma->lock);
-			break;
+			if ((num_attempts < max_retries) && (ret == -EBUSY)) {
+				spin_unlock_irq(&cma->lock);
+
+				if (fatal_signal_pending(current))
+					break;
+
+				/*
+				 * Page may be momentarily pinned by some other
+				 * process which has been scheduled out, e.g.
+				 * in exit path, during unmap call, or process
+				 * fork and so cannot be freed there. Sleep
+				 * for 100ms and retry the allocation.
+				 */
+				start = 0;
+				ret = -ENOMEM;
+				schedule_timeout_killable(msecs_to_jiffies(100));
+				num_attempts++;
+				continue;
+			} else {
+				spin_unlock_irq(&cma->lock);
+				break;
+			}
 		}
 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 		/*
@@ -523,6 +549,7 @@
 
 	return page;
 }
+EXPORT_SYMBOL_GPL(cma_alloc);
 
 bool cma_pages_valid(struct cma *cma, const struct page *pages,
 		     unsigned long count)
@@ -573,6 +600,7 @@
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(cma_release);
 
 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
 {
@@ -587,3 +615,4 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cma_for_each_area);
diff --git a/mm/madvise.c b/mm/madvise.c
index 8c927202..e0b57fb 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -138,7 +138,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/memblock.c b/mm/memblock.c
index 1018e50..26bceff 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1648,6 +1648,7 @@
 
 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
 }
+EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);
 
 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
 {
diff --git a/mm/memory.c b/mm/memory.c
index 8f1de81..056ee6d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -73,6 +73,7 @@
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/vmalloc.h>
+#include <trace/hooks/mm.h>
 
 #include <trace/events/kmem.h>
 
@@ -171,6 +172,7 @@
 {
 	trace_rss_stat(mm, member, count);
 }
+EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
 
 #if defined(SPLIT_RSS_COUNTING)
 
@@ -3515,8 +3517,10 @@
 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
 		    __swap_count(entry) == 1) {
 			/* skip swapcache */
-			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
-							vmf->address);
+			gfp_t flags = GFP_HIGHUSER_MOVABLE;
+
+			trace_android_rvh_set_skip_swapcache_flags(&flags);
+			page = alloc_page_vma(flags, vma, vmf->address);
 			if (page) {
 				__SetPageLocked(page);
 				__SetPageSwapBacked(page);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f6248af..bc526e08 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -810,7 +810,8 @@
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 				 vma->anon_vma, vma->vm_file, pgoff,
-				 new_pol, vma->vm_userfaultfd_ctx);
+				 new_pol, vma->vm_userfaultfd_ctx,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
diff --git a/mm/mlock.c b/mm/mlock.c
index e263d62..c755bbc 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -512,7 +512,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index bfb0ea1..8325a19 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1029,7 +1029,8 @@
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 				struct file *file, unsigned long vm_flags,
-				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+				const char __user *anon_name)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1047,6 +1048,8 @@
 		return 0;
 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
 		return 0;
+	if (vma_get_anon_name(vma) != anon_name)
+		return 0;
 	return 1;
 }
 
@@ -1079,9 +1082,10 @@
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 		     struct anon_vma *anon_vma, struct file *file,
 		     pgoff_t vm_pgoff,
-		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		     const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -1100,9 +1104,10 @@
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 		    struct anon_vma *anon_vma, struct file *file,
 		    pgoff_t vm_pgoff,
-		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		    const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1113,9 +1118,9 @@
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -1160,7 +1165,8 @@
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy,
-			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+			const char __user *anon_name)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1190,7 +1196,8 @@
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
 					    anon_vma, file, pgoff,
-					    vm_userfaultfd_ctx)) {
+					    vm_userfaultfd_ctx,
+					    anon_name)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
@@ -1199,7 +1206,8 @@
 				can_vma_merge_before(next, vm_flags,
 						     anon_vma, file,
 						     pgoff+pglen,
-						     vm_userfaultfd_ctx) &&
+						     vm_userfaultfd_ctx,
+						     anon_name) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1222,7 +1230,8 @@
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
 					     anon_vma, file, pgoff+pglen,
-					     vm_userfaultfd_ctx)) {
+					     vm_userfaultfd_ctx,
+					     anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = __vma_adjust(prev, prev->vm_start,
 					 addr, prev->vm_pgoff, NULL, next);
@@ -1754,7 +1763,7 @@
 	 * Can we just expand an old mapping?
 	 */
 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -1803,7 +1812,8 @@
 		 */
 		if (unlikely(vm_flags != vma->vm_flags && prev)) {
 			merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
-				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
+				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX,
+				vma_get_anon_name(vma));
 			if (merge) {
 				/* ->mmap() can change vma->vm_file and fput the original file. So
 				 * fput the vma->vm_file here or we would add an extra fput for file
@@ -3056,7 +3066,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -3249,7 +3259,7 @@
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			    vma->vm_userfaultfd_ctx);
+			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
diff --git a/mm/mmzone.c b/mm/mmzone.c
index eb89d6e..b02fb2c 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -9,6 +9,7 @@
 #include <linux/stddef.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
+#include <trace/hooks/mm.h>
 
 struct pglist_data *first_online_pgdat(void)
 {
@@ -100,3 +101,19 @@
 	return last_cpupid;
 }
 #endif
+
+enum zone_type gfp_zone(gfp_t flags)
+{
+	enum zone_type z;
+	gfp_t local_flags = flags;
+	int bit;
+
+	trace_android_rvh_set_gfp_zone_flags(&local_flags);
+
+	bit = (__force int) ((local_flags) & GFP_ZONEMASK);
+
+	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
+					 ((1 << GFP_ZONES_SHIFT) - 1);
+	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+	return z;
+}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e552f5e..def2189 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -464,7 +464,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
 			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			   vma->vm_userfaultfd_ctx);
+			   vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*pprev) {
 		vma = *pprev;
 		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
diff --git a/mm/percpu.c b/mm/percpu.c
index f5b2c2e..537f2c0 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2419,6 +2419,7 @@
 		return page_to_phys(pcpu_addr_to_page(addr)) +
 		       offset_in_page(addr);
 }
+EXPORT_SYMBOL_GPL(per_cpu_ptr_to_phys);
 
 /**
  * pcpu_alloc_alloc_info - allocate percpu allocation info
diff --git a/mm/readahead.c b/mm/readahead.c
index 6ae5693..729fd5c 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -22,6 +22,7 @@
 #include <linux/blk-cgroup.h>
 #include <linux/fadvise.h>
 #include <linux/sched/mm.h>
+#include <trace/hooks/mm.h>
 
 #include "internal.h"
 
@@ -113,6 +114,15 @@
 
 EXPORT_SYMBOL(read_cache_pages);
 
+gfp_t readahead_gfp_mask(struct address_space *x)
+{
+	gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
+
+	trace_android_rvh_set_readahead_gfp_mask(&mask);
+	return mask;
+}
+EXPORT_SYMBOL_GPL(readahead_gfp_mask);
+
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
 		bool skip_page)
 {
diff --git a/mm/util.c b/mm/util.c
index 741ba32..4eb081e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -27,6 +27,9 @@
 #include <linux/uaccess.h>
 
 #include "internal.h"
+#ifndef __GENSYMS__
+#include <trace/hooks/syscall_check.h>
+#endif
 
 /**
  * kfree_const - conditionally free memory
@@ -523,6 +526,7 @@
 		if (populate)
 			mm_populate(ret, populate);
 	}
+	trace_android_vh_check_mmap_file(file, prot, flag, ret);
 	return ret;
 }
 
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 700434d..ad23270 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -62,6 +62,9 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/vmscan.h>
+
 struct scan_control {
 	/* How many pages shrink_list() should reclaim */
 	unsigned long nr_to_reclaim;
@@ -2737,6 +2740,7 @@
 	enum scan_balance scan_balance;
 	unsigned long ap, fp;
 	enum lru_list lru;
+	bool balance_anon_file_reclaim = false;
 
 	/* If we have no swap space, do not bother scanning anon pages. */
 	if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
@@ -2774,11 +2778,15 @@
 		goto out;
 	}
 
+	trace_android_rvh_set_balance_anon_file_reclaim(&balance_anon_file_reclaim);
+
 	/*
 	 * If there is enough inactive page cache, we do not reclaim
-	 * anything from the anonymous working right now.
+	 * anything from the anonymous working right now. But when balancing
+	 * anon and page cache files for reclaim, allow swapping of anon pages
+	 * even if there are a number of inactive file cache pages.
 	 */
-	if (sc->cache_trim_mode) {
+	if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
 		scan_balance = SCAN_FILE;
 		goto out;
 	}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index d701c33..9528973 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1181,9 +1181,7 @@
 	"nr_zone_write_pending",
 	"nr_mlock",
 	"nr_bounce",
-#if IS_ENABLED(CONFIG_ZSMALLOC)
 	"nr_zspages",
-#endif
 	"nr_free_cma",
 
 	/* enum numa_stat_item counters */
diff --git a/net/OWNERS b/net/OWNERS
new file mode 100644
index 0000000..cbbfa70
--- /dev/null
+++ b/net/OWNERS
@@ -0,0 +1,2 @@
+lorenzo@google.com
+maze@google.com
diff --git a/net/TEST_MAPPING b/net/TEST_MAPPING
new file mode 100644
index 0000000..1eb8d43
--- /dev/null
+++ b/net/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+  "presubmit": [
+    {
+      "name": "CtsNetTestCases",
+      "options": [
+        {
+          "exclude-annotation": "com.android.testutils.SkipPresubmit"
+        }
+      ]
+    }
+  ]
+}
diff --git a/net/core/dev.c b/net/core/dev.c
index c4708e2..a2bc2d5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -150,6 +150,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/prandom.h>
 #include <linux/once_lite.h>
+#include <trace/hooks/net.h>
 
 #include "net-sysfs.h"
 
@@ -520,6 +521,12 @@
 
 static inline struct list_head *ptype_head(const struct packet_type *pt)
 {
+	struct list_head vendor_pt = { .next  = NULL, };
+
+	trace_android_vh_ptype_head(pt, &vendor_pt);
+	if (vendor_pt.next)
+		return vendor_pt.next;
+
 	if (pt->type == htons(ETH_P_ALL))
 		return pt->dev ? &pt->dev->ptype_all : &ptype_all;
 	else
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index c40cd8dd..5e07acb 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -35,13 +35,11 @@
 #include <trace/events/tcp.h>
 #include <trace/events/fib.h>
 #include <trace/events/qdisc.h>
-#if IS_ENABLED(CONFIG_BRIDGE)
 #include <trace/events/bridge.h>
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_add);
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_external_learn_add);
 EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
-#endif
 
 #if IS_ENABLED(CONFIG_PAGE_POOL)
 #include <trace/events/page_pool.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 909db87..ef6d046 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,7 @@
 #include <linux/capability.h>
 #include <linux/user_namespace.h>
 #include <linux/indirect_call_wrapper.h>
+#include <trace/hooks/net.h>
 
 #include "datagram.h"
 #include "sock_destructor.h"
@@ -770,6 +771,7 @@
 	if (!skb_unref(skb))
 		return;
 
+	trace_android_vh_kfree_skb(skb);
 	trace_kfree_skb(skb, __builtin_return_address(0));
 	__kfree_skb(skb);
 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0ce4684..cd93749 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -80,6 +80,7 @@
 #include <linux/jump_label_ratelimit.h>
 #include <net/busy_poll.h>
 #include <net/mptcp.h>
+#include <trace/hooks/net.h>
 
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 
@@ -4684,6 +4685,7 @@
 
 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
 {
+	trace_android_vh_kfree_skb(skb);
 	sk_drops_add(sk, skb);
 	__kfree_skb(skb);
 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3445f80..c43eedf 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -218,6 +218,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -278,6 +279,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -2389,6 +2391,26 @@
 		goto regen;
 }
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table)
+{
+	struct inet6_dev *idev = in6_dev_get(dev);
+	int sysctl;
+	u32 table;
+
+	if (!idev)
+		return default_table;
+	sysctl = idev->cnf.accept_ra_rt_table;
+	if (sysctl == 0) {
+		table = default_table;
+	} else if (sysctl > 0) {
+		table = (u32) sysctl;
+	} else {
+		table = (unsigned) dev->ifindex + (-sysctl);
+	}
+	in6_dev_put(idev);
+	return table;
+}
+
 /*
  *	Add prefix route.
  */
@@ -2399,7 +2421,7 @@
 		      u32 flags, gfp_t gfp_flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+		.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2434,7 +2456,7 @@
 	struct fib6_node *fn;
 	struct fib6_info *rt = NULL;
 	struct fib6_table *table;
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
 
 	table = fib6_get_table(dev_net(dev), tb_id);
 	if (!table)
@@ -6781,6 +6803,13 @@
 #endif
 #endif
 	{
+		.procname	= "accept_ra_rt_table",
+		.data		= &ipv6_devconf.accept_ra_rt_table,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "proxy_ndp",
 		.data		= &ipv6_devconf.proxy_ndp,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 42d60c7..6189c53 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4275,7 +4275,7 @@
 					   const struct in6_addr *gwaddr,
 					   struct net_device *dev)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	int ifindex = dev->ifindex;
 	struct fib6_node *fn;
 	struct fib6_info *rt = NULL;
@@ -4329,7 +4329,7 @@
 		.fc_nlinfo.nl_net = net,
 	};
 
-	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+	cfg.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	cfg.fc_dst = *prefix;
 	cfg.fc_gateway = *gwaddr;
 
@@ -4347,7 +4347,7 @@
 				     const struct in6_addr *addr,
 				     struct net_device *dev)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT);
 	struct fib6_info *rt;
 	struct fib6_table *table;
 
@@ -4382,7 +4382,7 @@
 				     u32 defrtr_usr_metric)
 {
 	struct fib6_config cfg = {
-		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+		.fc_table	= l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
 		.fc_metric	= defrtr_usr_metric,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -4407,47 +4407,24 @@
 	return rt6_get_dflt_router(net, gwaddr, dev);
 }
 
-static void __rt6_purge_dflt_routers(struct net *net,
-				     struct fib6_table *table)
+static int rt6_addrconf_purge(struct fib6_info *rt, void *arg)
 {
-	struct fib6_info *rt;
+	struct net_device *dev = fib6_info_nh_dev(rt);
+	struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
 
-restart:
-	rcu_read_lock();
-	for_each_fib6_node_rt_rcu(&table->tb6_root) {
-		struct net_device *dev = fib6_info_nh_dev(rt);
-		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
-
-		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-		    (!idev || idev->cnf.accept_ra != 2) &&
-		    fib6_info_hold_safe(rt)) {
-			rcu_read_unlock();
-			ip6_del_rt(net, rt, false);
-			goto restart;
-		}
+	if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+	    (!idev || idev->cnf.accept_ra != 2)) {
+		/* Delete this route. See fib6_clean_tree() */
+		return -1;
 	}
-	rcu_read_unlock();
 
-	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+	/* Continue walking */
+	return 0;
 }
 
 void rt6_purge_dflt_routers(struct net *net)
 {
-	struct fib6_table *table;
-	struct hlist_head *head;
-	unsigned int h;
-
-	rcu_read_lock();
-
-	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-		head = &net->ipv6.fib_table_hash[h];
-		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
-				__rt6_purge_dflt_routers(net, table);
-		}
-	}
-
-	rcu_read_unlock();
+	fib6_clean_all(net, rt6_addrconf_purge, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3646fc1..b40b94a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1522,6 +1522,29 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.rst>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index aab20e5..3f2d184 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -195,6 +195,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 0f8bb0b..30ba8d7 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -28,6 +28,11 @@
 #include <linux/kobject.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/suspend.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
+
+#define NLMSG_MAX_SIZE 64
 
 struct idletimer_tg {
 	struct list_head entry;
@@ -38,15 +43,112 @@
 	struct kobject *kobj;
 	struct device_attribute attr;
 
+	struct timespec64 delayed_timer_trigger;
+	struct timespec64 last_modified_timer;
+	struct timespec64 last_suspend_time;
+	struct notifier_block pm_nb;
+
+	int timeout;
 	unsigned int refcnt;
 	u8 timer_type;
+
+	bool work_pending;
+	bool send_nl_msg;
+	bool active;
+	uid_t uid;
+	bool suspend_time_valid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+				      struct timespec64 *ts)
+{
+	bool state;
+	struct timespec64 temp;
+	spin_lock_bh(&timestamp_lock);
+	timer->work_pending = false;
+	if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+	    timer->delayed_timer_trigger.tv_sec != 0) {
+		state = false;
+		temp.tv_sec = timer->timeout;
+		temp.tv_nsec = 0;
+		if (timer->delayed_timer_trigger.tv_sec != 0) {
+			temp = timespec64_add(timer->delayed_timer_trigger,
+					      temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+			timer->delayed_timer_trigger.tv_sec = 0;
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		} else {
+			temp = timespec64_add(timer->last_modified_timer, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+		}
+	} else {
+		state = timer->active;
+	}
+	spin_unlock_bh(&timestamp_lock);
+	return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char timestamp_msg[NLMSG_MAX_SIZE];
+	char uid_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+	int res;
+	struct timespec64 ts;
+	u64 time_ns;
+	bool state;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)\n", res);
+		return;
+	}
+
+	ts = ktime_to_timespec64(ktime_get_boottime());
+	state = check_for_delayed_trigger(timer, &ts);
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+		       state ? "active" : "inactive");
+
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)\n", res);
+		return;
+	}
+
+	if (state) {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)\n", res);
+	} else {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)\n", res);
+	}
+
+	time_ns = timespec64_to_ns(&ts);
+	res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+	if (NLMSG_MAX_SIZE <= res) {
+		timestamp_msg[0] = '\0';
+		pr_err("message too long (%d)\n", res);
+	}
+
+	pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+		 timestamp_msg, uid_msg);
+	kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+	return;
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -67,6 +169,7 @@
 	unsigned long expires = 0;
 	struct timespec64 ktimespec = {};
 	long time_diff = 0;
+	unsigned long now = jiffies;
 
 	mutex_lock(&list_mutex);
 
@@ -78,15 +181,19 @@
 			time_diff = ktimespec.tv_sec;
 		} else {
 			expires = timer->timer.expires;
-			time_diff = jiffies_to_msecs(expires - jiffies) / 1000;
+			time_diff = jiffies_to_msecs(expires - now) / 1000;
 		}
 	}
 
 	mutex_unlock(&list_mutex);
 
-	if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
+	if (time_after(expires, now) || ktimespec.tv_sec > 0)
 		return sysfs_emit(buf, "%ld\n", time_diff);
 
+	if (timer->send_nl_msg)
+		return sysfs_emit(buf, "0 %d\n",
+				  jiffies_to_msecs(now - expires) / 1000);
+
 	return sysfs_emit(buf, "0\n");
 }
 
@@ -96,6 +203,9 @@
 						  work);
 
 	sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(struct timer_list *t)
@@ -104,7 +214,62 @@
 
 	pr_debug("timer %s expired\n", timer->attr.attr.name);
 
+	spin_lock_bh(&timestamp_lock);
+	timer->active = false;
+	timer->work_pending = true;
 	schedule_work(&timer->work);
+	spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+			    unsigned long pm_event, void *unused)
+{
+	struct timespec64 ts;
+	unsigned long time_diff, now = jiffies;
+	struct idletimer_tg *timer = container_of(notifier,
+						  struct idletimer_tg, pm_nb);
+	if (!timer)
+		return NOTIFY_DONE;
+
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		timer->last_suspend_time =
+			ktime_to_timespec64(ktime_get_boottime());
+		timer->suspend_time_valid = true;
+		break;
+	case PM_POST_SUSPEND:
+		if (!timer->suspend_time_valid)
+			break;
+		timer->suspend_time_valid = false;
+
+		spin_lock_bh(&timestamp_lock);
+		if (!timer->active) {
+			spin_unlock_bh(&timestamp_lock);
+			break;
+		}
+		/* since jiffies are not updated when suspended now represents
+		 * the time it would have suspended */
+		if (time_after(timer->timer.expires, now)) {
+			ts = ktime_to_timespec64(ktime_get_boottime());
+			ts = timespec64_sub(ts, timer->last_suspend_time);
+			time_diff = timespec64_to_jiffies(&ts);
+			if (timer->timer.expires > (time_diff + now)) {
+				mod_timer_pending(&timer->timer,
+						  (timer->timer.expires - time_diff));
+			} else {
+				del_timer(&timer->timer);
+				timer->timer.expires = 0;
+				timer->active = false;
+				timer->work_pending = true;
+				schedule_work(&timer->work);
+			}
+		}
+		spin_unlock_bh(&timestamp_lock);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
 }
 
 static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
@@ -158,17 +323,34 @@
 
 	ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
 	if (ret < 0) {
-		pr_debug("couldn't add file to sysfs");
+		pr_debug("couldn't add file to sysfs\n");
 		goto out_free_attr;
 	}
 
 	list_add(&info->timer->entry, &idletimer_tg_list);
-
-	timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
+	pr_debug("timer type value is 0.\n");
+	info->timer->timer_type = 0;
 	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = false;
+	info->timer->active = true;
+	info->timer->timeout = info->timeout;
+
+	info->timer->delayed_timer_trigger.tv_sec = 0;
+	info->timer->delayed_timer_trigger.tv_nsec = 0;
+	info->timer->work_pending = false;
+	info->timer->uid = 0;
+	info->timer->last_modified_timer =
+		ktime_to_timespec64(ktime_get_boottime());
+
+	info->timer->pm_nb.notifier_call = idletimer_resume;
+	ret = register_pm_notifier(&info->timer->pm_nb);
+	if (ret)
+		printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+		       __func__, ret);
 
 	INIT_WORK(&info->timer->work, idletimer_tg_work);
 
+	timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
 	mod_timer(&info->timer->timer,
 		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
 
@@ -186,7 +368,7 @@
 {
 	int ret;
 
-	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
 	if (!info->timer) {
 		ret = -ENOMEM;
 		goto out;
@@ -207,7 +389,7 @@
 
 	ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
 	if (ret < 0) {
-		pr_debug("couldn't add file to sysfs");
+		pr_debug("couldn't add file to sysfs\n");
 		goto out_free_attr;
 	}
 
@@ -215,9 +397,25 @@
 	kobject_uevent(idletimer_tg_kobj,KOBJ_ADD);
 
 	list_add(&info->timer->entry, &idletimer_tg_list);
-	pr_debug("timer type value is %u", info->timer_type);
+	pr_debug("timer type value is %u\n", info->timer_type);
 	info->timer->timer_type = info->timer_type;
 	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg != 0);
+	info->timer->active = true;
+	info->timer->timeout = info->timeout;
+
+	info->timer->delayed_timer_trigger.tv_sec = 0;
+	info->timer->delayed_timer_trigger.tv_nsec = 0;
+	info->timer->work_pending = false;
+	info->timer->uid = 0;
+	info->timer->last_modified_timer =
+		ktime_to_timespec64(ktime_get_boottime());
+
+	info->timer->pm_nb.notifier_call = idletimer_resume;
+	ret = register_pm_notifier(&info->timer->pm_nb);
+	if (ret)
+		printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+		       __func__, ret);
 
 	INIT_WORK(&info->timer->work, idletimer_tg_work);
 
@@ -231,7 +429,7 @@
 	} else {
 		timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
 		mod_timer(&info->timer->timer,
-				msecs_to_jiffies(info->timeout * 1000) + jiffies);
+			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
 	}
 
 	return 0;
@@ -244,6 +442,41 @@
 	return ret;
 }
 
+static void reset_timer(struct idletimer_tg * const info_timer,
+			const __u32 info_timeout,
+			struct sk_buff *skb)
+{
+	unsigned long now = jiffies;
+	bool timer_prev;
+
+	spin_lock_bh(&timestamp_lock);
+	timer_prev = info_timer->active;
+	info_timer->active = true;
+	/* timer_prev is used to guard overflow problem in time_before*/
+	if (!timer_prev || time_before(info_timer->timer.expires, now)) {
+		pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+			 info_timer->timer.expires, now);
+
+		/* Stores the uid resposible for waking up the radio */
+		if (skb && (skb->sk)) {
+			info_timer->uid = from_kuid_munged(current_user_ns(),
+							   sock_i_uid(skb_to_full_sk(skb)));
+		}
+
+		/* checks if there is a pending inactive notification*/
+		if (info_timer->work_pending)
+			info_timer->delayed_timer_trigger = info_timer->last_modified_timer;
+		else {
+			info_timer->work_pending = true;
+			schedule_work(&info_timer->work);
+		}
+	}
+
+	info_timer->last_modified_timer = ktime_to_timespec64(ktime_get_boottime());
+	mod_timer(&info_timer->timer, msecs_to_jiffies(info_timeout * 1000) + now);
+	spin_unlock_bh(&timestamp_lock);
+}
+
 /*
  * The actual xt_tables plugin.
  */
@@ -251,12 +484,21 @@
 					 const struct xt_action_param *par)
 {
 	const struct idletimer_tg_info *info = par->targinfo;
+	unsigned long now = jiffies;
 
 	pr_debug("resetting timer %s, timeout period %u\n",
 		 info->label, info->timeout);
 
-	mod_timer(&info->timer->timer,
-		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+	info->timer->active = true;
+
+	if (time_before(info->timer->timer.expires, now)) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+			 info->label, info->timer->timer.expires, now);
+	}
+
+	/* TODO: Avoid modifying timers on each packet */
+	reset_timer(info->timer, info->timeout, skb);
 
 	return XT_CONTINUE;
 }
@@ -268,6 +510,7 @@
 					 const struct xt_action_param *par)
 {
 	const struct idletimer_tg_info_v1 *info = par->targinfo;
+	unsigned long now = jiffies;
 
 	pr_debug("resetting timer %s, timeout period %u\n",
 		 info->label, info->timeout);
@@ -276,8 +519,16 @@
 		ktime_t tout = ktime_set(info->timeout, 0);
 		alarm_start_relative(&info->timer->alarm, tout);
 	} else {
-		mod_timer(&info->timer->timer,
-				msecs_to_jiffies(info->timeout * 1000) + jiffies);
+		info->timer->active = true;
+
+		if (time_before(info->timer->timer.expires, now)) {
+			schedule_work(&info->timer->work);
+			pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+				 info->label, info->timer->timer.expires, now);
+		}
+
+		/* TODO: Avoid modifying timers on each packet */
+		reset_timer(info->timer, info->timeout, skb);
 	}
 
 	return XT_CONTINUE;
@@ -321,9 +572,7 @@
 	info->timer = __idletimer_tg_find_by_label(info->label);
 	if (info->timer) {
 		info->timer->refcnt++;
-		mod_timer(&info->timer->timer,
-			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+		reset_timer(info->timer, info->timeout, NULL);
 		pr_debug("increased refcnt of timer %s to %u\n",
 			 info->label, info->timer->refcnt);
 	} else {
@@ -346,9 +595,6 @@
 
 	pr_debug("checkentry targinfo%s\n", info->label);
 
-	if (info->send_nl_msg)
-		return -EOPNOTSUPP;
-
 	ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
 	if(ret < 0)
 	{
@@ -361,6 +607,11 @@
 		return -EINVAL;
 	}
 
+	if (info->send_nl_msg > 1) {
+		pr_debug("invalid value for send_nl_msg\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&list_mutex);
 
 	info->timer = __idletimer_tg_find_by_label(info->label);
@@ -383,8 +634,7 @@
 				alarm_start_relative(&info->timer->alarm, tout);
 			}
 		} else {
-				mod_timer(&info->timer->timer,
-					msecs_to_jiffies(info->timeout * 1000) + jiffies);
+			reset_timer(info->timer, info->timeout, NULL);
 		}
 		pr_debug("increased refcnt of timer %s to %u\n",
 			 info->label, info->timer->refcnt);
@@ -414,8 +664,9 @@
 
 		list_del(&info->timer->entry);
 		del_timer_sync(&info->timer->timer);
-		cancel_work_sync(&info->timer->work);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		unregister_pm_notifier(&info->timer->pm_nb);
+		cancel_work_sync(&info->timer->work);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
 	} else {
@@ -443,8 +694,9 @@
 		} else {
 			del_timer_sync(&info->timer->timer);
 		}
-		cancel_work_sync(&info->timer->work);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		unregister_pm_notifier(&info->timer->pm_nb);
+		cancel_work_sync(&info->timer->work);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
 	} else {
@@ -540,3 +792,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..641e36e
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,397 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * 	netfilter module to enforce network quotas
+ * 	Sam Johnston <samj@samj.net>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either
+ *	version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* For compatibility, these definitions are copied from the
+ * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
+#define ULOG_MAC_LEN	80
+#define ULOG_PREFIX_LEN	32
+
+/* Format of the ULOG packets passed through netlink */
+typedef struct ulog_packet_msg {
+	unsigned long mark;
+	long timestamp_sec;
+	long timestamp_usec;
+	unsigned int hook;
+	char indev_name[IFNAMSIZ];
+	char outdev_name[IFNAMSIZ];
+	size_t data_len;
+	char prefix[ULOG_PREFIX_LEN];
+	unsigned char mac_len;
+	unsigned char mac[ULOG_MAC_LEN];
+	unsigned char payload[0];
+} ulog_packet_msg_t;
+#endif
+
+/**
+ * @lock:	lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+	u_int64_t quota;
+	spinlock_t lock;
+	struct list_head list;
+	atomic_t ref;
+	char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+	struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+		 "Event number for NETLINK_NFLOG message. 0 disables log."
+		 "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static kuid_t quota_list_uid = KUIDT_INIT(0);
+static kgid_t quota_list_gid = KGIDT_INIT(0);
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+	ulog_packet_msg_t *pm;
+	struct sk_buff *log_skb;
+	size_t size;
+	struct nlmsghdr *nlh;
+
+	if (!qlog_nl_event)
+		return;
+
+	size = NLMSG_SPACE(sizeof(*pm));
+	size = max(size, (size_t)NLMSG_GOODSIZE);
+	log_skb = alloc_skb(size, GFP_ATOMIC);
+	if (!log_skb) {
+		pr_err("xt_quota2: cannot alloc skb for logging\n");
+		return;
+	}
+
+	nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+			sizeof(*pm), 0);
+	if (!nlh) {
+		pr_err("xt_quota2: nlmsg_put failed\n");
+		kfree_skb(log_skb);
+		return;
+	}
+	pm = nlmsg_data(nlh);
+	memset(pm, 0, sizeof(*pm));
+	if (skb->tstamp == 0)
+		__net_timestamp((struct sk_buff *)skb);
+	pm->hook = hooknum;
+	if (prefix != NULL)
+		strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+	if (in)
+		strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+	if (out)
+		strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+
+	NETLINK_CB(log_skb).dst_group = 1;
+	pr_debug("throwing 1 packets to netlink group 1\n");
+	netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+			   size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	spin_lock_bh(&e->lock);
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+	spin_unlock_bh(&e->lock);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char buf[sizeof("18446744073709551616")];
+
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+	if (copy_from_user(buf, input, size) != 0)
+		return -EFAULT;
+	buf[sizeof(buf)-1] = '\0';
+	if (size < sizeof(buf))
+		buf[size] = '\0';
+
+	spin_lock_bh(&e->lock);
+	e->quota = simple_strtoull(buf, NULL, 0);
+	spin_unlock_bh(&e->lock);
+	return size;
+}
+
+static const struct proc_ops q2_counter_fops = {
+	.proc_read	= quota_proc_read,
+	.proc_write	= quota_proc_write,
+	.proc_lseek	= default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+	struct xt_quota_counter *e;
+	unsigned int size;
+
+	/* Do not need all the procfs things for anonymous counters. */
+	size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+	e = kmalloc(size, GFP_KERNEL);
+	if (e == NULL)
+		return NULL;
+
+	e->quota = q->quota;
+	spin_lock_init(&e->lock);
+	if (!anon) {
+		INIT_LIST_HEAD(&e->list);
+		atomic_set(&e->ref, 1);
+		strlcpy(e->name, q->name, sizeof(e->name));
+	}
+	return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:	name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+	struct proc_dir_entry *p;
+	struct xt_quota_counter *e = NULL;
+	struct xt_quota_counter *new_e;
+
+	if (*q->name == '\0')
+		return q2_new_counter(q, true);
+
+	/* No need to hold a lock while getting a new counter */
+	new_e = q2_new_counter(q, false);
+	if (new_e == NULL)
+		goto out;
+
+	spin_lock_bh(&counter_list_lock);
+	list_for_each_entry(e, &counter_list, list)
+		if (strcmp(e->name, q->name) == 0) {
+			atomic_inc(&e->ref);
+			spin_unlock_bh(&counter_list_lock);
+			kfree(new_e);
+			pr_debug("xt_quota2: old counter name=%s", e->name);
+			return e;
+		}
+	e = new_e;
+	pr_debug("xt_quota2: new_counter name=%s", e->name);
+	list_add_tail(&e->list, &counter_list);
+	/* The entry having a refcount of 1 is not directly destructible.
+	 * This func has not yet returned the new entry, thus iptables
+	 * has not references for destroying this entry.
+	 * For another rule to try to destroy it, it would 1st need for this
+	 * func* to be re-invoked, acquire a new ref for the same named quota.
+	 * Nobody will access the e->procfs_entry either.
+	 * So release the lock. */
+	spin_unlock_bh(&counter_list_lock);
+
+	/* create_proc_entry() is not spin_lock happy */
+	p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+	                      proc_xt_quota, &q2_counter_fops, e);
+
+	if (IS_ERR_OR_NULL(p)) {
+		spin_lock_bh(&counter_list_lock);
+		list_del(&e->list);
+		spin_unlock_bh(&counter_list_lock);
+		goto out;
+	}
+	proc_set_user(p, quota_list_uid, quota_list_gid);
+	return e;
+
+ out:
+	kfree(e);
+	return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+	pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+	if (q->flags & ~XT_QUOTA_MASK)
+		return -EINVAL;
+
+	q->name[sizeof(q->name)-1] = '\0';
+	if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+		printk(KERN_ERR "xt_quota.3: illegal name\n");
+		return -EINVAL;
+	}
+
+	q->master = q2_get_counter(q);
+	if (q->master == NULL) {
+		printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+
+	if (*q->name == '\0') {
+		kfree(e);
+		return;
+	}
+
+	spin_lock_bh(&counter_list_lock);
+	if (!atomic_dec_and_test(&e->ref)) {
+		spin_unlock_bh(&counter_list_lock);
+		return;
+	}
+
+	list_del(&e->list);
+	spin_unlock_bh(&counter_list_lock);
+	remove_proc_entry(e->name, proc_xt_quota);
+	kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+	int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+	bool no_change = q->flags & XT_QUOTA_NO_CHANGE;
+	bool ret = q->flags & XT_QUOTA_INVERT;
+
+	spin_lock_bh(&e->lock);
+	if (q->flags & XT_QUOTA_GROW) {
+		/*
+		 * While no_change is pointless in "grow" mode, we will
+		 * implement it here simply to have a consistent behavior.
+		 */
+		if (!no_change)
+			e->quota += charge;
+		ret = true; /* note: does not respect inversion (bug??) */
+	} else {
+		if (e->quota > charge) {
+			if (!no_change)
+				e->quota -= charge;
+			ret = !ret;
+		} else if (e->quota) {
+			/* We are transitioning, log that fact. */
+			quota2_log(xt_hooknum(par),
+				   skb,
+				   xt_in(par),
+				   xt_out(par),
+				   q->name);
+			/* we do not allow even small packets from now on */
+			e->quota = 0;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV4,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.usersize   = offsetof(struct xt_quota_mtinfo2, master),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV6,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.usersize   = offsetof(struct xt_quota_mtinfo2, master),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init quota_mt2_init(void)
+{
+	int ret;
+	pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+	nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+	if (!nflognl)
+		return -ENOMEM;
+#endif
+
+	proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+	if (proc_xt_quota == NULL)
+		return -EACCES;
+
+	ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	if (ret < 0)
+		remove_proc_entry("xt_quota", init_net.proc_net);
+	pr_debug("xt_quota2: init() %d", ret);
+	return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+	xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index ec2c2af..0e36646 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -26,6 +26,10 @@
 /* Threshold for detecting small packets to copy */
 #define GOOD_COPY_LEN  128
 
+uint virtio_transport_max_vsock_pkt_buf_size = 64 * 1024;
+module_param(virtio_transport_max_vsock_pkt_buf_size, uint, 0444);
+EXPORT_SYMBOL_GPL(virtio_transport_max_vsock_pkt_buf_size);
+
 static const struct virtio_transport *
 virtio_transport_get_ops(struct vsock_sock *vsk)
 {
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 4dae3ab..2e80e38b 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -237,7 +237,7 @@
 
 	.uinfo = {
 		.auth = {
-			.icv_truncbits = 96,
+			.icv_truncbits = IS_ENABLED(CONFIG_ANDROID) ? 128 : 96,
 			.icv_fullbits = 256,
 		}
 	},
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 229544b..ad84751 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -479,13 +479,11 @@
 	return -EOPNOTSUPP;
 }
 
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 	return xfrm_outer_mode_output(x, skb);
 }
 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
-#endif
 
 static int xfrm_output_one(struct sk_buff *skb, int err)
 {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a2f4001..d6f0fbf 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2412,19 +2412,22 @@
 	if (IS_ERR(data))
 		return PTR_ERR(data);
 
-	if (in_compat_syscall()) {
-		struct xfrm_translator *xtr = xfrm_get_translator();
+	/* Use the 64-bit / untranslated format on Android, even for compat */
+	if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) {
+		if (in_compat_syscall()) {
+			struct xfrm_translator *xtr = xfrm_get_translator();
 
-		if (!xtr) {
-			kfree(data);
-			return -EOPNOTSUPP;
-		}
+			if (!xtr) {
+				kfree(data);
+				return -EOPNOTSUPP;
+			}
 
-		err = xtr->xlate_user_policy_sockptr(&data, optlen);
-		xfrm_put_translator(xtr);
-		if (err) {
-			kfree(data);
-			return err;
+			err = xtr->xlate_user_policy_sockptr(&data, optlen);
+			xfrm_put_translator(xtr);
+			if (err) {
+				kfree(data);
+				return err;
+			}
 		}
 	}
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7c36cc1..9cdeda0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2865,19 +2865,22 @@
 	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (in_compat_syscall()) {
-		struct xfrm_translator *xtr = xfrm_get_translator();
+	/* Use the 64-bit / untranslated format on Android, even for compat */
+	if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) {
+		if (in_compat_syscall()) {
+			struct xfrm_translator *xtr = xfrm_get_translator();
 
-		if (!xtr)
-			return -EOPNOTSUPP;
+			if (!xtr)
+				return -EOPNOTSUPP;
 
-		nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
-					    link->nla_pol, extack);
-		xfrm_put_translator(xtr);
-		if (IS_ERR(nlh64))
-			return PTR_ERR(nlh64);
-		if (nlh64)
-			nlh = nlh64;
+			nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
+						    link->nla_pol, extack);
+			xfrm_put_translator(xtr);
+			if (IS_ERR(nlh64))
+				return PTR_ERR(nlh64);
+			if (nlh64)
+				nlh = nlh64;
+		}
 	}
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index d1f865b..ae4940d 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -219,7 +219,9 @@
 
 ld_flags       = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
 
-DTC_INCLUDE    := $(srctree)/scripts/dtc/include-prefixes
+# ANDROID: Allow DTC_INCLUDE to be set by the BUILD_CONFIG. This allows one to
+# compile an out-of-tree device tree.
+DTC_INCLUDE    += $(srctree)/scripts/dtc/include-prefixes
 
 dtc_cpp_flags  = -Wp,-MMD,$(depfile).pre.tmp -nostdinc                    \
 		 $(addprefix -I,$(DTC_INCLUDE))                          \
@@ -407,7 +409,8 @@
       cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
 
 quiet_cmd_lz4 = LZ4     $@
-      cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
+      cmd_lz4 = { cat $(real-prereqs) | \
+                  $(LZ4) -l -12 --favor-decSpeed stdin stdout; \
                   $(size_append); } > $@
 
 # U-Boot mkimage
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 7f39599..108aa68 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -12,6 +12,8 @@
 # for c_flags and mod-prelink-ext
 include $(srctree)/scripts/Makefile.lib
 
+mixed-build-prefix = $(if $(KBUILD_MIXED_TREE),$(KBUILD_MIXED_TREE)/)
+
 # find all modules listed in modules.order
 modules := $(sort $(shell cat $(MODORDER)))
 
@@ -39,11 +41,11 @@
 
 quiet_cmd_btf_ko = BTF [M] $@
       cmd_btf_ko = 							\
-	if [ -f vmlinux ]; then						\
-		LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
-		$(RESOLVE_BTFIDS) -b vmlinux $@; 			\
+	if [ -f $(mixed-build-prefix)vmlinux ]; then						\
+		LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base $(mixed-build-prefix)vmlinux $@; \
+		$(RESOLVE_BTFIDS) -b $(mixed-build-prefix)vmlinux $@; 			\
 	else								\
-		printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
+		printf "Skipping BTF generation for %s due to unavailability of $(mixed-build-prefix)vmlinux\n" $@ 1>&2; \
 	fi;
 
 # Same as newer-prereqs, but allows to exclude specified extra dependencies
@@ -56,8 +58,8 @@
 
 
 # Re-generate module BTFs if either module's .ko or vmlinux changed
-$(modules): %.ko: %$(mod-prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),vmlinux) FORCE
-	+$(call if_changed_except,ld_ko_o,vmlinux)
+$(modules): %.ko: %$(mod-prelink-ext).o %.mod.o scripts/module.lds $(if $(KBUILD_BUILTIN),$(mixed-build-prefix)vmlinux) FORCE
+	+$(call if_changed_except,ld_ko_o,$(mixed-build-prefix)vmlinux)
 ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 	+$(if $(newer-prereqs),$(call cmd,btf_ko))
 endif
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index ff9b09e..2e2e316 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -24,6 +24,10 @@
 suffix-$(CONFIG_MODULE_COMPRESS_ZSTD)	:= .zst
 
 modules := $(patsubst $(extmod_prefix)%, $(dst)/%$(suffix-y), $(modules))
+ifneq ($(KBUILD_EXTMOD),)
+extmod_suffix := $(subst /,_,$(subst .,_,$(subst -,_,$(KBUILD_EXTMOD))))
+modules += $(dst)/modules.order.$(extmod_suffix)
+endif
 
 __modinst: $(modules)
 	@:
@@ -82,6 +86,12 @@
 	$(call cmd,strip)
 	$(call cmd,sign)
 
+ifneq ($(KBUILD_EXTMOD),)
+$(dst)/modules.order.$(extmod_suffix): $(MODORDER) FORCE
+	$(call cmd,install)
+	@sed -i "s:^$(KBUILD_EXTMOD):$(INSTALL_MOD_DIR):g" $@
+endif
+
 else
 
 $(dst)/%.ko: FORCE
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 48585c4..c6f10a5 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -44,6 +44,8 @@
 # for mod-prelink-ext
 include $(srctree)/scripts/Makefile.lib
 
+mixed-build-prefix = $(if $(KBUILD_MIXED_TREE),$(KBUILD_MIXED_TREE)/)
+
 MODPOST = scripts/mod/modpost								\
 	$(if $(CONFIG_MODVERSIONS),-m)							\
 	$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a)					\
@@ -64,16 +66,17 @@
 
 ifeq ($(KBUILD_EXTMOD),)
 
-input-symdump := vmlinux.symvers
+input-symdump := $(mixed-build-prefix)vmlinux.symvers
 output-symdump := modules-only.symvers
+module_srcpath := $(srctree)
 
 quiet_cmd_cat = GEN     $@
       cmd_cat = cat $(real-prereqs) > $@
 
-ifneq ($(wildcard vmlinux.symvers),)
+ifneq ($(wildcard $(mixed-build-prefix)vmlinux.symvers),)
 
 __modpost: Module.symvers
-Module.symvers: vmlinux.symvers modules-only.symvers FORCE
+Module.symvers: $(mixed-build-prefix)vmlinux.symvers modules-only.symvers FORCE
 	$(call if_changed,cat)
 
 targets += Module.symvers
@@ -96,6 +99,27 @@
 input-symdump := Module.symvers $(KBUILD_EXTRA_SYMBOLS)
 output-symdump := $(KBUILD_EXTMOD)/Module.symvers
 
+# Get the external module's source path. KBUILD_EXTMOD could either be an
+# absolute path or relative path from $(srctree). This makes sure that we
+# aren't using a relative path from a separate working directory (O= or
+# KBUILD_OUTPUT) since that may not be the actual module's SCM project path. So
+# check the path relative to $(srctree) first.
+ifneq ($(realpath $(srctree)/$(KBUILD_EXTMOD) 2>/dev/null),)
+	module_srcpath := $(srctree)/$(KBUILD_EXTMOD)
+else
+	module_srcpath := $(KBUILD_EXTMOD)
+endif
+
+endif
+
+ifeq ($(CONFIG_MODULE_SCMVERSION),y)
+# Get the SCM version of the module. `sed` verifies setlocalversion returns
+# a proper revision based on the SCM type, e.g. git, mercurial, or svn.
+module_scmversion := $(shell $(srctree)/scripts/setlocalversion $(module_srcpath) | \
+	sed -n 's/.*-\(\(g\|hg\)[a-fA-F0-9]\+\(-dirty\)\?\|svn[0-9]\+\).*/\1/p')
+ifneq ($(module_scmversion),)
+MODPOST += -v $(module_scmversion)
+endif
 endif
 
 existing-input-symdump := $(wildcard $(input-symdump))
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 3643b4f..2de598d 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -3,14 +3,15 @@
 #
 # A depmod wrapper used by the toplevel Makefile
 
-if test $# -ne 2; then
-	echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+if test $# -ne 2 -a $# -ne 3; then
+	echo "Usage: $0 /sbin/depmod <kernelrelease> [System.map folder]" >&2
 	exit 1
 fi
 DEPMOD=$1
 KERNELRELEASE=$2
+KBUILD_MIXED_TREE=$3
 
-if ! test -r System.map ; then
+if ! test -r ${KBUILD_MIXED_TREE}System.map ; then
 	echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
 	exit 0
 fi
@@ -41,7 +42,7 @@
 	KERNELRELEASE=99.98.$KERNELRELEASE
 fi
 
-set -- -ae -F System.map
+set -- -ae -F ${KBUILD_MIXED_TREE}System.map
 if test -n "$INSTALL_MOD_PATH"; then
 	set -- "$@" -b "$INSTALL_MOD_PATH"
 fi
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index 3bc48c7..3034d99 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -49,6 +49,7 @@
 	}
 }
 
+#ifndef OPENSSL_IS_BORINGSSL
 static void drain_openssl_errors(void)
 {
 	const char *file;
@@ -58,6 +59,7 @@
 		return;
 	while (ERR_get_error_line(&file, &line)) {}
 }
+#endif
 
 #define ERR(cond, fmt, ...)				\
 	do {						\
@@ -112,6 +114,10 @@
 		fclose(f);
 		exit(0);
 	} else if (!strncmp(cert_src, "pkcs11:", 7)) {
+#ifdef OPENSSL_IS_BORINGSSL
+		ERR(1, "BoringSSL does not support extracting from PKCS#11");
+		exit(1);
+#else
 		ENGINE *e;
 		struct {
 			const char *cert_id;
@@ -134,6 +140,7 @@
 		ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
 		ERR(!parms.cert, "Get X.509 from PKCS#11");
 		write_cert(parms.cert);
+#endif
 	} else {
 		BIO *b;
 		X509 *x509;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index cb8ab7d..e06d3bb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -28,6 +28,8 @@
 static int all_versions = 0;
 /* If we are modposting external module set to 1 */
 static int external_module = 0;
+#define MODULE_SCMVERSION_SIZE 64
+static char module_scmversion[MODULE_SCMVERSION_SIZE];
 /* Only warn about unresolved symbols */
 static int warn_unresolved = 0;
 /* How a symbol is exported */
@@ -418,10 +420,9 @@
 		s = new_symbol(name, mod, export);
 	} else if (!external_module || s->module->is_vmlinux ||
 		   s->module == mod) {
-		warn("%s: '%s' exported twice. Previous export was in %s%s\n",
-		     mod->name, name, s->module->name,
-		     s->module->is_vmlinux ? "" : ".ko");
-		return s;
+		fatal("%s: '%s' exported twice. Previous export was in %s%s\n",
+		      mod->name, name, s->module->name,
+		      s->module->is_vmlinux ? "" : ".ko");
 	}
 
 	s->module = mod;
@@ -2234,6 +2235,20 @@
 		buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
 }
 
+/**
+ * add_scmversion() - Adds the MODULE_INFO macro for the scmversion.
+ * @b: Buffer to append to.
+ *
+ * This function fills in the module attribute `scmversion` for the kernel
+ * module. This is useful for determining a given module's SCM version on
+ * device via /sys/modules/<module>/scmversion and/or using the modinfo tool.
+ */
+static void add_scmversion(struct buffer *b)
+{
+	if (module_scmversion[0] != '\0')
+		buf_printf(b, "\nMODULE_INFO(scmversion, \"%s\");\n", module_scmversion);
+}
+
 /* Cannot check for assembler */
 static void add_retpoline(struct buffer *b)
 {
@@ -2503,7 +2518,7 @@
 	struct dump_list *dump_read_start = NULL;
 	struct dump_list **dump_read_iter = &dump_read_start;
 
-	while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:")) != -1) {
+	while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:v:")) != -1) {
 		switch (opt) {
 		case 'e':
 			external_module = 1;
@@ -2541,6 +2556,9 @@
 		case 'd':
 			missing_namespace_deps = optarg;
 			break;
+		case 'v':
+			strncpy(module_scmversion, optarg, sizeof(module_scmversion) - 1);
+			break;
 		default:
 			exit(1);
 		}
@@ -2580,6 +2598,7 @@
 		add_depends(&buf, mod);
 		add_moddevtable(&buf, mod);
 		add_srcversion(&buf, mod);
+		add_scmversion(&buf);
 
 		sprintf(fname, "%s.mod.c", mod->name);
 		write_if_changed(&buf, fname);
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 6b54e46..3fd1583 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -11,12 +11,14 @@
 #
 
 usage() {
-	echo "Usage: $0 [--save-scmversion] [srctree]" >&2
+	echo "Usage: $0 [--save-scmversion] [srctree] [branch] [kmi-generation]" >&2
 	exit 1
 }
 
 scm_only=false
 srctree=.
+android_release=
+kmi_generation=
 if test "$1" = "--save-scmversion"; then
 	scm_only=true
 	shift
@@ -25,6 +27,22 @@
 	srctree=$1
 	shift
 fi
+if test $# -gt 0; then
+	# Extract the Android release version. If there is no match, then return 255
+	# and clear the var $android_release
+	android_release=`echo "$1" | sed -e '/android[0-9]\{2,\}/!{q255}; \
+		s/^\(android[0-9]\{2,\}\)-.*/\1/'`
+	if test $? -ne 0; then
+		android_release=
+	fi
+	shift
+
+	if test $# -gt 0; then
+		kmi_generation=$1
+		[ $(expr $kmi_generation : '^[0-9]\+$') -eq 0 ] && usage
+		shift
+	fi
+fi
 if test $# -gt 0 -o ! -d "$srctree"; then
 	usage
 fi
@@ -44,8 +62,13 @@
 	fi
 
 	# Check for git and a git repo.
-	if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
-	   head=$(git rev-parse --verify HEAD 2>/dev/null); then
+	if head=$(git rev-parse --verify HEAD 2>/dev/null); then
+
+		if [ -n "$android_release" ] && [ -n "$kmi_generation" ]; then
+			printf '%s' "-$android_release-$kmi_generation"
+		elif [ -n "$android_release" ]; then
+			printf '%s' "-$android_release"
+		fi
 
 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
 		# it, because this version is defined in the top level Makefile.
@@ -143,4 +166,9 @@
 	res="$res${scm:++}"
 fi
 
+# finally, add the abXXX number if BUILD_NUMBER is set
+if test -n "${BUILD_NUMBER}"; then
+	res="$res-ab${BUILD_NUMBER}"
+fi
+
 echo "$res"
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index fbd34b8..8f06328 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -92,6 +92,7 @@
 	}
 }
 
+#ifndef OPENSSL_NO_ENGINE
 static void drain_openssl_errors(void)
 {
 	const char *file;
@@ -101,6 +102,7 @@
 		return;
 	while (ERR_get_error_line(&file, &line)) {}
 }
+#endif
 
 #define ERR(cond, fmt, ...)				\
 	do {						\
@@ -135,7 +137,9 @@
 static EVP_PKEY *read_private_key(const char *private_key_name)
 {
 	EVP_PKEY *private_key;
+	BIO *b;
 
+#ifndef OPENSSL_NO_ENGINE
 	if (!strncmp(private_key_name, "pkcs11:", 7)) {
 		ENGINE *e;
 
@@ -153,17 +157,16 @@
 		private_key = ENGINE_load_private_key(e, private_key_name,
 						      NULL, NULL);
 		ERR(!private_key, "%s", private_key_name);
-	} else {
-		BIO *b;
-
-		b = BIO_new_file(private_key_name, "rb");
-		ERR(!b, "%s", private_key_name);
-		private_key = PEM_read_bio_PrivateKey(b, NULL, pem_pw_cb,
-						      NULL);
-		ERR(!private_key, "%s", private_key_name);
-		BIO_free(b);
+		return private_key;
 	}
+#endif
 
+	b = BIO_new_file(private_key_name, "rb");
+	ERR(!b, "%s", private_key_name);
+	private_key = PEM_read_bio_PrivateKey(b, NULL, pem_pw_cb,
+					      NULL);
+	ERR(!private_key, "%s", private_key_name);
+	BIO_free(b);
 	return private_key;
 }
 
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index abcd974..b9c5b80 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -44,6 +44,9 @@
 #define avc_cache_stats_incr(field)	do {} while (0)
 #endif
 
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/avc.h>
+
 struct avc_entry {
 	u32			ssid;
 	u32			tsid;
@@ -441,6 +444,7 @@
 
 static void avc_node_delete(struct selinux_avc *avc, struct avc_node *node)
 {
+	trace_android_rvh_selinux_avc_node_delete(node);
 	hlist_del_rcu(&node->list);
 	call_rcu(&node->rhead, avc_node_free);
 	atomic_dec(&avc->avc_cache.active_nodes);
@@ -457,6 +461,7 @@
 static void avc_node_replace(struct selinux_avc *avc,
 			     struct avc_node *new, struct avc_node *old)
 {
+	trace_android_rvh_selinux_avc_node_replace(old, new);
 	hlist_replace_rcu(&old->list, &new->list);
 	call_rcu(&old->rhead, avc_node_free);
 	atomic_dec(&avc->avc_cache.active_nodes);
@@ -566,8 +571,10 @@
 	avc_cache_stats_incr(lookups);
 	node = avc_search_node(avc, ssid, tsid, tclass);
 
-	if (node)
+	if (node) {
+		trace_android_rvh_selinux_avc_lookup(node, ssid, tsid, tclass);
 		return node;
+	}
 
 	avc_cache_stats_incr(misses);
 	return NULL;
@@ -652,6 +659,7 @@
 		}
 	}
 	hlist_add_head_rcu(&node->list, head);
+	trace_android_rvh_selinux_avc_insert(node);
 found:
 	spin_unlock_irqrestore(lock, flag);
 	return node;
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 35aac62..fbbefe6 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -117,7 +117,8 @@
 	  { COMMON_IPC_PERMS, NULL } },
 	{ "netlink_route_socket",
 	  { COMMON_SOCK_PERMS,
-	    "nlmsg_read", "nlmsg_write", NULL } },
+	    "nlmsg_read", "nlmsg_write", "nlmsg_readpriv", "nlmsg_getneigh",
+	    NULL } },
 	{ "netlink_tcpdiag_socket",
 	  { COMMON_SOCK_PERMS,
 	    "nlmsg_read", "nlmsg_write", NULL } },
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index ac0ece0..948b768 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -97,6 +97,8 @@
 	bool checkreqprot;
 	bool initialized;
 	bool policycap[__POLICYDB_CAPABILITY_MAX];
+	bool android_netlink_route;
+	bool android_netlink_getneigh;
 
 	struct page *status_page;
 	struct mutex status_lock;
@@ -219,6 +221,20 @@
 	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
 }
 
+static inline bool selinux_android_nlroute_getlink(void)
+{
+	struct selinux_state *state = &selinux_state;
+
+	return state->android_netlink_route;
+}
+
+static inline bool selinux_android_nlroute_getneigh(void)
+{
+	struct selinux_state *state = &selinux_state;
+
+	return state->android_netlink_getneigh;
+}
+
 struct selinux_policy_convert_data;
 
 struct selinux_load_state {
@@ -452,5 +468,6 @@
 extern void ebitmap_cache_init(void);
 extern void hashtab_cache_init(void);
 extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
+extern void selinux_nlmsg_init(void);
 
 #endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 94ea2a8..4fee2e2 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -25,7 +25,7 @@
 	u32	perm;
 };
 
-static const struct nlmsg_perm nlmsg_route_perms[] =
+static struct nlmsg_perm nlmsg_route_perms[] =
 {
 	{ RTM_NEWLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_DELLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
@@ -216,3 +216,43 @@
 
 	return err;
 }
+
+static void nlmsg_set_perm_for_type(u32 perm, u16 type)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nlmsg_route_perms); i++) {
+		if (nlmsg_route_perms[i].nlmsg_type == type) {
+			nlmsg_route_perms[i].perm = perm;
+			break;
+		}
+	}
+}
+
+/**
+ * Use nlmsg_readpriv as the permission for RTM_GETLINK messages if the
+ * netlink_route_getlink policy capability is set. Otherwise use nlmsg_read.
+ * Similarly, use nlmsg_getneigh for RTM_GETNEIGH and RTM_GETNEIGHTBL if the
+ * netlink_route_getneigh policy capability is set. Otherwise use nlmsg_read.
+ */
+void selinux_nlmsg_init(void)
+{
+	if (selinux_android_nlroute_getlink())
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV,
+					RTM_GETLINK);
+	else
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
+					RTM_GETLINK);
+
+	if (selinux_android_nlroute_getneigh()) {
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH,
+					RTM_GETNEIGH);
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH,
+					RTM_GETNEIGHTBL);
+	} else {
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
+					RTM_GETNEIGH);
+		nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
+					RTM_GETNEIGHTBL);
+	}
+}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0ae1b71..ad91f52 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -2491,6 +2491,14 @@
 	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
 	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
 
+	if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE)) {
+		p->android_netlink_route = 1;
+	}
+
+	if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH)) {
+		p->android_netlink_getneigh = 1;
+	}
+
 	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
 		rc = ebitmap_read(&p->policycaps, fp);
 		if (rc)
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index c24d4e1..be9988e 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -238,6 +238,8 @@
 /* The policy database */
 struct policydb {
 	int mls_enabled;
+	int android_netlink_route;
+	int android_netlink_getneigh;
 
 	/* symbol tables */
 	struct symtab symtab[SYM_NUM];
@@ -334,6 +336,8 @@
 	struct policydb *p, struct role_trans_key *key);
 
 #define POLICYDB_CONFIG_MLS    1
+#define POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE    (1 << 31)
+#define POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH (1 << 30)
 
 /* the config flags related to unknown classes/perms are bits 2 and 3 */
 #define REJECT_UNKNOWN	0x00000002
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 8e92af7..3a11c45 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -68,6 +68,8 @@
 #include "policycap_names.h"
 #include "ima.h"
 
+#include <trace/hooks/selinux.h>
+
 struct convert_context_args {
 	struct selinux_state *state;
 	struct policydb *oldp;
@@ -2167,6 +2169,10 @@
 			pr_info("SELinux:  unknown policy capability %u\n",
 				i);
 	}
+
+	state->android_netlink_route = p->android_netlink_route;
+	state->android_netlink_getneigh = p->android_netlink_getneigh;
+	selinux_nlmsg_init();
 }
 
 static int security_preserve_bools(struct selinux_policy *oldpolicy,
@@ -2260,6 +2266,7 @@
 		 */
 		selinux_mark_initialized(state);
 		selinux_complete_init();
+		trace_android_rvh_selinux_is_initialized(state);
 	}
 
 	/* Free the old policy */
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index dcf6be4..a62c139 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3174,6 +3174,39 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_get_dai_id);
 
+/**
+ * snd_soc_info_multi_ext - external single mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a single external mixer control.
+ * that accepts multiple input.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_multi_mixer_control *mc =
+		(struct soc_multi_mixer_control *)kcontrol->private_value;
+	int platform_max;
+
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
+
+	if (platform_max == 1 && !strnstr(kcontrol->id.name, " Volume", 30))
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	else
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+	uinfo->count = mc->count;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = platform_max;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_multi_ext);
+
 int snd_soc_get_dai_name(const struct of_phandle_args *args,
 				const char **dai_name)
 {
diff --git a/tools/testing/selftests/filesystems/incfs/.gitignore b/tools/testing/selftests/filesystems/incfs/.gitignore
new file mode 100644
index 0000000..f0e3cd9
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/.gitignore
@@ -0,0 +1,3 @@
+incfs_test
+incfs_stress
+incfs_perf
diff --git a/tools/testing/selftests/filesystems/incfs/Makefile b/tools/testing/selftests/filesystems/incfs/Makefile
new file mode 100644
index 0000000..f379802
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -D_FILE_OFFSET_BITS=64 -Wall -Werror -I../.. -I../../../../..
+LDLIBS := -llz4 -lzstd -lcrypto -lpthread
+TEST_GEN_PROGS := incfs_test incfs_stress incfs_perf
+
+include ../../lib.mk
+
+# Put after include ../../lib.mk since that changes $(TEST_GEN_PROGS)
+# Otherwise you get multiple targets, this becomes the default, and it's a mess
+EXTRA_SOURCES := utils.c
+$(TEST_GEN_PROGS) : $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/filesystems/incfs/OWNERS b/tools/testing/selftests/filesystems/incfs/OWNERS
new file mode 100644
index 0000000..f26e11c
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/OWNERS
@@ -0,0 +1 @@
+file:/fs/incfs/OWNERS
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_perf.c b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
new file mode 100644
index 0000000..ed36bbd
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <lz4.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <ctype.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define err_msg(...)                                                           \
+	do {                                                                   \
+		fprintf(stderr, "%s: (%d) ", TAG, __LINE__);                   \
+		fprintf(stderr, __VA_ARGS__);                                  \
+		fprintf(stderr, " (%s)\n", strerror(errno));                   \
+	} while (false)
+
+#define TAG "incfs_perf"
+
+struct options {
+	int blocks; /* -b number of diff block sizes */
+	bool no_cleanup; /* -c don't clean up after */
+	const char *test_dir; /* -d working directory */
+	const char *file_types; /* -f sScCvV */
+	bool no_native; /* -n don't test native files */
+	bool no_random; /* -r don't do random reads*/
+	bool no_linear; /* -R random reads only */
+	size_t size; /* -s file size as power of 2 */
+	int tries; /* -t times to run test*/
+};
+
+enum flags {
+	SHUFFLE = 1,
+	COMPRESS = 2,
+	VERIFY = 4,
+	LAST_FLAG = 8,
+};
+
+void print_help(void)
+{
+	puts(
+	"incfs_perf. Performance test tool for incfs\n"
+	"\tTests read performance of incfs by creating files of various types\n"
+	"\tflushing caches and then reading them back.\n"
+	"\tEach file is read with different block sizes and average\n"
+	"\tthroughput in megabytes/second and memory usage are reported for\n"
+	"\teach block size\n"
+	"\tNative files are tested for comparison\n"
+	"\tNative files are created in native folder, incfs files are created\n"
+	"\tin src folder which is mounted on dst folder\n"
+	"\n"
+	"\t-bn (default 8) number of different block sizes, starting at 4096\n"
+	"\t                and doubling\n"
+	"\t-c		   don't Clean up - leave files and mount point\n"
+	"\t-d dir          create directories in dir\n"
+	"\t-fs|Sc|Cv|V     restrict which files are created.\n"
+	"\t                s blocks not shuffled, S blocks shuffled\n"
+	"\t                c blocks not compress, C blocks compressed\n"
+	"\t                v files not verified, V files verified\n"
+	"\t                If a letter is omitted, both options are tested\n"
+	"\t                If no letter are given, incfs is not tested\n"
+	"\t-n              Don't test native files\n"
+	"\t-r              No random reads (sequential only)\n"
+	"\t-R              Random reads only (no sequential)\n"
+	"\t-sn (default 30)File size as power of 2\n"
+	"\t-tn (default 5) Number of tries per file. Results are averaged\n"
+	);
+}
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+	signed char c;
+
+	/* Set defaults here */
+	*options = (struct options){
+		.blocks = 8,
+		.test_dir = ".",
+		.tries = 5,
+		.size = 30,
+	};
+
+	/* Load options from command line here */
+	while ((c = getopt(argc, argv, "b:cd:f::hnrRs:t:")) != -1) {
+		switch (c) {
+		case 'b':
+			options->blocks = strtol(optarg, NULL, 10);
+			break;
+
+		case 'c':
+			options->no_cleanup = true;
+			break;
+
+		case 'd':
+			options->test_dir = optarg;
+			break;
+
+		case 'f':
+			if (optarg)
+				options->file_types = optarg;
+			else
+				options->file_types = "sS";
+			break;
+
+		case 'h':
+			print_help();
+			exit(0);
+
+		case 'n':
+			options->no_native = true;
+			break;
+
+		case 'r':
+			options->no_random = true;
+			break;
+
+		case 'R':
+			options->no_linear = true;
+			break;
+
+		case 's':
+			options->size = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options->tries = strtol(optarg, NULL, 10);
+			break;
+
+		default:
+			print_help();
+			return -EINVAL;
+		}
+	}
+
+	options->size = 1L << options->size;
+
+	return 0;
+}
+
+void shuffle(size_t *buffer, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++i) {
+		size_t j = random() * (size - i - 1) / RAND_MAX;
+		size_t temp = buffer[i];
+
+		buffer[i] = buffer[j];
+		buffer[j] = temp;
+	}
+}
+
+int get_free_memory(void)
+{
+	FILE *meminfo = fopen("/proc/meminfo", "re");
+	char field[256];
+	char value[256] = {};
+
+	if (!meminfo)
+		return -ENOENT;
+
+	while (fscanf(meminfo, "%[^:]: %s kB\n", field, value) == 2) {
+		if (!strcmp(field, "MemFree"))
+			break;
+		*value = 0;
+	}
+
+	fclose(meminfo);
+
+	if (!*value)
+		return -ENOENT;
+
+	return strtol(value, NULL, 10);
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size, int flags)
+{
+	int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	size_t *blocks = malloc(sizeof(size_t) * block_count);
+	int error = 0;
+	size_t i;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	uint8_t compressed_data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	struct incfs_fill_block fill_block = {
+		.compression = COMPRESSION_NONE,
+		.data_len = sizeof(data),
+		.data = ptr_to_u64(data),
+	};
+
+	if (!blocks) {
+		err_msg("Out of memory");
+		error = -errno;
+		goto out;
+	}
+
+	if (fd == -1) {
+		err_msg("Could not open file for writing %s", name);
+		error = -errno;
+		goto out;
+	}
+
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	for (i = 0; i < block_count; ++i)
+		blocks[i] = i;
+
+	if (flags & SHUFFLE)
+		shuffle(blocks, block_count);
+
+	if (flags & COMPRESS) {
+		size_t comp_size = LZ4_compress_default(
+			(char *)data, (char *)compressed_data, sizeof(data),
+			ARRAY_SIZE(compressed_data));
+
+		if (comp_size <= 0) {
+			error = -EBADMSG;
+			goto out;
+		}
+		fill_block.compression = COMPRESSION_LZ4;
+		fill_block.data = ptr_to_u64(compressed_data);
+		fill_block.data_len = comp_size;
+	}
+
+	for (i = 0; i < block_count; ++i) {
+		struct incfs_fill_blocks fill_blocks = {
+			.count = 1,
+			.fill_blocks = ptr_to_u64(&fill_block),
+		};
+
+		fill_block.block_index = blocks[i];
+		int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+		if (written != 1) {
+			error = -errno;
+			err_msg("Failed to write block %lu in file %s", i,
+				name);
+			break;
+		}
+	}
+
+out:
+	free(blocks);
+	close(fd);
+	sync();
+	return error;
+}
+
+int measure_read_throughput_internal(const char *tag, int dir, const char *name,
+				     const struct options *options, bool random)
+{
+	int block;
+
+	if (random)
+		printf("%32s(random)", tag);
+	else
+		printf("%40s", tag);
+
+	for (block = 0; block < options->blocks; ++block) {
+		size_t buffer_size;
+		char *buffer;
+		int try;
+		double time = 0;
+		double throughput;
+		int memory = 0;
+
+		buffer_size = 1 << (block + 12);
+		buffer = malloc(buffer_size);
+
+		for (try = 0; try < options->tries; ++try) {
+			int err;
+			struct timespec start_time, end_time;
+			off_t i;
+			int fd;
+			size_t offsets_size = options->size / buffer_size;
+			size_t *offsets =
+				malloc(offsets_size * sizeof(*offsets));
+			int start_memory, end_memory;
+
+			if (!offsets) {
+				err_msg("Not enough memory");
+				return -ENOMEM;
+			}
+
+			for (i = 0; i < offsets_size; ++i)
+				offsets[i] = i * buffer_size;
+
+			if (random)
+				shuffle(offsets, offsets_size);
+
+			err = drop_caches();
+			if (err) {
+				err_msg("Failed to drop caches");
+				return err;
+			}
+
+			start_memory = get_free_memory();
+			if (start_memory < 0) {
+				err_msg("Failed to get start memory");
+				return start_memory;
+			}
+
+			fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+			if (fd == -1) {
+				err_msg("Failed to open file");
+				return err;
+			}
+
+			err = clock_gettime(CLOCK_MONOTONIC, &start_time);
+			if (err) {
+				err_msg("Failed to get start time");
+				return err;
+			}
+
+			for (i = 0; i < offsets_size; ++i)
+				if (pread(fd, buffer, buffer_size,
+					  offsets[i]) != buffer_size) {
+					err_msg("Failed to read file");
+					err = -errno;
+					goto fail;
+				}
+
+			err = clock_gettime(CLOCK_MONOTONIC, &end_time);
+			if (err) {
+				err_msg("Failed to get start time");
+				goto fail;
+			}
+
+			end_memory = get_free_memory();
+			if (end_memory < 0) {
+				err_msg("Failed to get end memory");
+				return end_memory;
+			}
+
+			time += end_time.tv_sec - start_time.tv_sec;
+			time += (end_time.tv_nsec - start_time.tv_nsec) / 1e9;
+
+			close(fd);
+			fd = -1;
+			memory += start_memory - end_memory;
+
+fail:
+			free(offsets);
+			close(fd);
+			if (err)
+				return err;
+		}
+
+		throughput = options->size * options->tries / time;
+		printf("%10.3e %10d", throughput, memory / options->tries);
+		free(buffer);
+	}
+
+	printf("\n");
+	return 0;
+}
+
+int measure_read_throughput(const char *tag, int dir, const char *name,
+			    const struct options *options)
+{
+	int err = 0;
+
+	if (!options->no_linear)
+		err = measure_read_throughput_internal(tag, dir, name, options,
+						       false);
+
+	if (!err && !options->no_random)
+		err = measure_read_throughput_internal(tag, dir, name, options,
+						       true);
+	return err;
+}
+
+int test_native_file(int dir, const struct options *options)
+{
+	const char *name = "file";
+	int fd;
+	char buffer[4096] = {};
+	off_t i;
+	int err;
+
+	fd = openat(dir, name, O_CREAT | O_WRONLY | O_CLOEXEC, 0600);
+	if (fd == -1) {
+		err_msg("Could not open native file");
+		return -errno;
+	}
+
+	for (i = 0; i < options->size; i += sizeof(buffer))
+		if (pwrite(fd, buffer, sizeof(buffer), i) != sizeof(buffer)) {
+			err_msg("Failed to write file");
+			err = -errno;
+			goto fail;
+		}
+
+	close(fd);
+	sync();
+	fd = -1;
+
+	err = measure_read_throughput("native", dir, name, options);
+
+fail:
+	close(fd);
+	return err;
+}
+
+struct hash_block {
+	char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+static struct hash_block *build_mtree(size_t size, char *root_hash,
+				      int *mtree_block_count)
+{
+	char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	int block_count = 0;
+	int hash_block_count = 0;
+	int total_tree_block_count = 0;
+	int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+	int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+	int levels_count = 0;
+	int i, level;
+	struct hash_block *mtree;
+
+	if (size == 0)
+		return 0;
+
+	block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	hash_block_count = block_count;
+	for (i = 0; hash_block_count > 1; i++) {
+		hash_block_count = (hash_block_count + hash_per_block - 1) /
+				   hash_per_block;
+		tree_lvl_count[i] = hash_block_count;
+		total_tree_block_count += hash_block_count;
+	}
+	levels_count = i;
+
+	for (i = 0; i < levels_count; i++) {
+		int prev_lvl_base = (i == 0) ? total_tree_block_count :
+					       tree_lvl_index[i - 1];
+
+		tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+	}
+
+	*mtree_block_count = total_tree_block_count;
+	mtree = calloc(total_tree_block_count, sizeof(*mtree));
+	/* Build level 0 hashes. */
+	for (i = 0; i < block_count; i++) {
+		int block_index = tree_lvl_index[0] + i / hash_per_block;
+		int block_off = (i % hash_per_block) * digest_size;
+		char *hash_ptr = mtree[block_index].data + block_off;
+
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+	}
+
+	/* Build higher levels of hash tree. */
+	for (level = 1; level < levels_count; level++) {
+		int prev_lvl_base = tree_lvl_index[level - 1];
+		int prev_lvl_count = tree_lvl_count[level - 1];
+
+		for (i = 0; i < prev_lvl_count; i++) {
+			int block_index =
+				i / hash_per_block + tree_lvl_index[level];
+			int block_off = (i % hash_per_block) * digest_size;
+			char *hash_ptr = mtree[block_index].data + block_off;
+
+			sha256(mtree[i + prev_lvl_base].data,
+			       INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+		}
+	}
+
+	/* Calculate root hash from the top block */
+	sha256(mtree[0].data, INCFS_DATA_FILE_BLOCK_SIZE, root_hash);
+
+	return mtree;
+}
+
+static int load_hash_tree(int cmd_fd, int dir, const char *name,
+			  struct hash_block *mtree, int mtree_block_count)
+{
+	int err;
+	int i;
+	int fd;
+	struct incfs_fill_block *fill_block_array =
+		calloc(mtree_block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = mtree_block_count,
+		.fill_blocks = ptr_to_u64(fill_block_array),
+	};
+	struct incfs_permit_fill permit_fill;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		fill_block_array[i] = (struct incfs_fill_block){
+			.block_index = i,
+			.data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+			.data = ptr_to_u64(mtree[i].data),
+			.flags = INCFS_BLOCK_FLAGS_HASH
+		};
+	}
+
+	fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		err = -errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else
+		err = 0;
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+int test_incfs_file(int dst_dir, const struct options *options, int flags)
+{
+	int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+			      O_RDONLY | O_CLOEXEC);
+	int err;
+	char name[4];
+	incfs_uuid_t id;
+	char tag[256];
+
+	snprintf(name, sizeof(name), "%c%c%c",
+		 flags & SHUFFLE ? 'S' : 's',
+		 flags & COMPRESS ? 'C' : 'c',
+		 flags & VERIFY ? 'V' : 'v');
+
+	if (cmd_file == -1) {
+		err_msg("Could not open command file");
+		return -errno;
+	}
+
+	if (flags & VERIFY) {
+		char root_hash[INCFS_MAX_HASH_SIZE];
+		int mtree_block_count;
+		struct hash_block *mtree = build_mtree(options->size, root_hash,
+						       &mtree_block_count);
+
+		if (!mtree) {
+			err_msg("Failed to build hash tree");
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		err = crypto_emit_file(cmd_file, NULL, name, &id, options->size,
+				       root_hash, "add_data");
+
+		if (!err)
+			err = load_hash_tree(cmd_file, dst_dir, name, mtree,
+					     mtree_block_count);
+
+		free(mtree);
+	} else
+		err = emit_file(cmd_file, NULL, name, &id, options->size, NULL);
+
+	if (err) {
+		err_msg("Failed to create file %s", name);
+		goto fail;
+	}
+
+	if (write_data(cmd_file, dst_dir, name, options->size, flags))
+		goto fail;
+
+	snprintf(tag, sizeof(tag), "incfs%s%s%s",
+		 flags & SHUFFLE ? "(shuffle)" : "",
+		 flags & COMPRESS ? "(compress)" : "",
+		 flags & VERIFY ? "(verify)" : "");
+
+	err = measure_read_throughput(tag, dst_dir, name, options);
+
+fail:
+	close(cmd_file);
+	return err;
+}
+
+bool skip(struct options const *options, int flag, char c)
+{
+	if (!options->file_types)
+		return false;
+
+	if (flag && strchr(options->file_types, tolower(c)))
+		return true;
+
+	if (!flag && strchr(options->file_types, toupper(c)))
+		return true;
+
+	return false;
+}
+
+int main(int argc, char *const *argv)
+{
+	struct options options;
+	int err;
+	const char *native_dir = "native";
+	const char *src_dir = "src";
+	const char *dst_dir = "dst";
+	int native_dir_fd = -1;
+	int src_dir_fd = -1;
+	int dst_dir_fd = -1;
+	int block;
+	int flags;
+
+	err = parse_options(argc, argv, &options);
+	if (err)
+		return err;
+
+	err = chdir(options.test_dir);
+	if (err) {
+		err_msg("Failed to change to %s", options.test_dir);
+		return -errno;
+	}
+
+	/* Clean up any interrupted previous runs */
+	while (!umount(dst_dir))
+		;
+
+	err = remove_dir(native_dir) || remove_dir(src_dir) ||
+	      remove_dir(dst_dir);
+	if (err)
+		return err;
+
+	err = mkdir(native_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(src_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(dst_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mount_fs_opt(dst_dir, src_dir, "readahead=0,rlog_pages=0", 0);
+	if (err) {
+		err_msg("Failed to mount incfs");
+		goto cleanup;
+	}
+
+	native_dir_fd = open(native_dir, O_RDONLY | O_CLOEXEC);
+	src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+	dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+	if (native_dir_fd == -1 || src_dir_fd == -1 || dst_dir_fd == -1) {
+		err_msg("Failed to open native, src or dst dir");
+		err = -errno;
+		goto cleanup;
+	}
+
+	printf("%40s", "");
+	for (block = 0; block < options.blocks; ++block)
+		printf("%21d", 1 << (block + 12));
+	printf("\n");
+
+	if (!err && !options.no_native)
+		err = test_native_file(native_dir_fd, &options);
+
+	for (flags = 0; flags < LAST_FLAG && !err; ++flags) {
+		if (skip(&options, flags & SHUFFLE, 's') ||
+		    skip(&options, flags & COMPRESS, 'c') ||
+		    skip(&options, flags & VERIFY, 'v'))
+			continue;
+		err = test_incfs_file(dst_dir_fd, &options, flags);
+	}
+
+cleanup:
+	close(native_dir_fd);
+	close(src_dir_fd);
+	close(dst_dir_fd);
+	if (!options.no_cleanup) {
+		umount(dst_dir);
+		remove_dir(native_dir);
+		remove_dir(dst_dir);
+		remove_dir(src_dir);
+	}
+
+	return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_stress.c b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
new file mode 100644
index 0000000..a1d4917
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define err_msg(...)                                                           \
+	do {                                                                   \
+		fprintf(stderr, "%s: (%d) ", TAG, __LINE__);                   \
+		fprintf(stderr, __VA_ARGS__);                                  \
+		fprintf(stderr, " (%s)\n", strerror(errno));                   \
+	} while (false)
+
+#define TAG "incfs_stress"
+
+struct options {
+	bool no_cleanup; /* -c */
+	const char *test_dir; /* -d */
+	unsigned int rng_seed; /* -g */
+	int num_reads; /* -n */
+	int readers; /* -r */
+	int size; /* -s */
+	int timeout; /* -t */
+};
+
+struct read_data {
+	const char *filename;
+	int dir_fd;
+	size_t filesize;
+	int num_reads;
+	unsigned int rng_seed;
+};
+
+int cancel_threads;
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+	signed char c;
+
+	/* Set defaults here */
+	*options = (struct options){
+		.test_dir = ".",
+		.num_reads = 1000,
+		.readers = 10,
+		.size = 10,
+	};
+
+	/* Load options from command line here */
+	while ((c = getopt(argc, argv, "cd:g:n:r:s:t:")) != -1) {
+		switch (c) {
+		case 'c':
+			options->no_cleanup = true;
+			break;
+
+		case 'd':
+			options->test_dir = optarg;
+			break;
+
+		case 'g':
+			options->rng_seed = strtol(optarg, NULL, 10);
+			break;
+
+		case 'n':
+			options->num_reads = strtol(optarg, NULL, 10);
+			break;
+
+		case 'r':
+			options->readers = strtol(optarg, NULL, 10);
+			break;
+
+		case 's':
+			options->size = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options->timeout = strtol(optarg, NULL, 10);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+void *reader(void *data)
+{
+	struct read_data *read_data = (struct read_data *)data;
+	int i;
+	int fd = -1;
+	void *buffer = malloc(read_data->filesize);
+
+	if (!buffer) {
+		err_msg("Failed to alloc read buffer");
+		goto out;
+	}
+
+	fd = openat(read_data->dir_fd, read_data->filename,
+		    O_RDONLY | O_CLOEXEC);
+	if (fd == -1) {
+		err_msg("Failed to open file");
+		goto out;
+	}
+
+	for (i = 0; i < read_data->num_reads && !cancel_threads; ++i) {
+		off_t offset = rnd(read_data->filesize, &read_data->rng_seed);
+		size_t count =
+			rnd(read_data->filesize - offset, &read_data->rng_seed);
+		ssize_t err = pread(fd, buffer, count, offset);
+
+		if (err != count)
+			err_msg("failed to read with value %lu", err);
+	}
+
+out:
+	close(fd);
+	free(read_data);
+	free(buffer);
+	return NULL;
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size)
+{
+	int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int error = 0;
+	int i;
+	int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+
+	if (fd == -1) {
+		err_msg("Could not open file for writing %s", name);
+		return -errno;
+	}
+
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	for (i = 0; i < block_count; ++i) {
+		uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+		size_t block_size =
+			size > i * INCFS_DATA_FILE_BLOCK_SIZE ?
+				INCFS_DATA_FILE_BLOCK_SIZE :
+				size - (i * INCFS_DATA_FILE_BLOCK_SIZE);
+		struct incfs_fill_block fill_block = {
+			.compression = COMPRESSION_NONE,
+			.block_index = i,
+			.data_len = block_size,
+			.data = ptr_to_u64(data),
+		};
+		struct incfs_fill_blocks fill_blocks = {
+			.count = 1,
+			.fill_blocks = ptr_to_u64(&fill_block),
+		};
+		int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+		if (written != 1) {
+			error = -errno;
+			err_msg("Failed to write block %d in file %s", i, name);
+			break;
+		}
+	}
+out:
+	close(fd);
+	return error;
+}
+
+int test_files(int src_dir, int dst_dir, struct options const *options)
+{
+	unsigned int seed = options->rng_seed;
+	int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+			      O_RDONLY | O_CLOEXEC);
+	int err;
+	const char *name = "001";
+	incfs_uuid_t id;
+	size_t size;
+	int i;
+	pthread_t *threads = NULL;
+
+	size = 1 << (rnd(options->size, &seed) + 12);
+	size += rnd(size, &seed);
+
+	if (cmd_file == -1) {
+		err_msg("Could not open command file");
+		return -errno;
+	}
+
+	err = emit_file(cmd_file, NULL, name, &id, size, NULL);
+	if (err) {
+		err_msg("Failed to create file %s", name);
+		return err;
+	}
+
+	threads = malloc(sizeof(pthread_t) * options->readers);
+	if (!threads) {
+		err_msg("Could not allocate memory for threads");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < options->readers; ++i) {
+		struct read_data *read_data = malloc(sizeof(*read_data));
+
+		if (!read_data) {
+			err_msg("Failed to allocate read_data");
+			err = -ENOMEM;
+			break;
+		}
+
+		*read_data = (struct read_data){
+			.filename = name,
+			.dir_fd = dst_dir,
+			.filesize = size,
+			.num_reads = options->num_reads,
+			.rng_seed = seed,
+		};
+
+		rnd(0, &seed);
+
+		err = pthread_create(threads + i, 0, reader, read_data);
+		if (err) {
+			err_msg("Failed to create thread");
+			free(read_data);
+			break;
+		}
+	}
+
+	if (err)
+		cancel_threads = 1;
+	else
+		err = write_data(cmd_file, dst_dir, name, size);
+
+	for (; i > 0; --i) {
+		if (pthread_join(threads[i - 1], NULL)) {
+			err_msg("FATAL: failed to join thread");
+			exit(-errno);
+		}
+	}
+
+	free(threads);
+	close(cmd_file);
+	return err;
+}
+
+int main(int argc, char *const *argv)
+{
+	struct options options;
+	int err;
+	const char *src_dir = "src";
+	const char *dst_dir = "dst";
+	int src_dir_fd = -1;
+	int dst_dir_fd = -1;
+
+	err = parse_options(argc, argv, &options);
+	if (err)
+		return err;
+
+	err = chdir(options.test_dir);
+	if (err) {
+		err_msg("Failed to change to %s", options.test_dir);
+		return -errno;
+	}
+
+	err = remove_dir(src_dir) || remove_dir(dst_dir);
+	if (err)
+		return err;
+
+	err = mkdir(src_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(dst_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mount_fs(dst_dir, src_dir, options.timeout);
+	if (err) {
+		err_msg("Failed to mount incfs");
+		goto cleanup;
+	}
+
+	src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+	dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+	if (src_dir_fd == -1 || dst_dir_fd == -1) {
+		err_msg("Failed to open src or dst dir");
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = test_files(src_dir_fd, dst_dir_fd, &options);
+
+cleanup:
+	close(src_dir_fd);
+	close(dst_dir_fd);
+	if (!options.no_cleanup) {
+		umount(dst_dir);
+		remove_dir(dst_dir);
+		remove_dir(src_dir);
+	}
+
+	return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_test.c b/tools/testing/selftests/filesystems/incfs/incfs_test.c
new file mode 100644
index 0000000..4c30aec
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_test.c
@@ -0,0 +1,4751 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#define _GNU_SOURCE
+
+#include <alloca.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <lz4.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <zstd.h>
+
+#include <sys/inotify.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+
+#include <linux/random.h>
+#include <linux/stat.h>
+#include <linux/unistd.h>
+
+#include <openssl/pem.h>
+#include <openssl/x509.h>
+
+#include <kselftest.h>
+#include <include/uapi/linux/fsverity.h>
+
+#include "utils.h"
+
+/* Can't include uapi/linux/fs.h because it clashes with mount.h */
+#define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
+#define FS_VERITY_FL			0x00100000 /* Verity protected inode */
+
+#define TEST_FAILURE 1
+#define TEST_SUCCESS 0
+
+#define INCFS_ROOT_INODE 0
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define le16_to_cpu(x)          (x)
+#define le32_to_cpu(x)          (x)
+#define le64_to_cpu(x)          (x)
+#else
+#error Big endian not supported!
+#endif
+
+struct {
+	int file;
+	int test;
+	bool verbose;
+} options;
+
+#define TESTCOND(condition)						\
+	do {								\
+		if (!(condition)) {					\
+			ksft_print_msg("%s failed %d\n",		\
+				       __func__, __LINE__);		\
+			goto out;					\
+		} else if (options.verbose)				\
+			ksft_print_msg("%s succeeded %d\n",		\
+				       __func__, __LINE__);		\
+	} while (false)
+
+#define TEST(statement, condition)					\
+	do {								\
+		statement;						\
+		TESTCOND(condition);					\
+	} while (false)
+
+#define TESTEQUAL(statement, res)					\
+	TESTCOND((statement) == (res))
+
+#define TESTNE(statement, res)					\
+	TESTCOND((statement) != (res))
+
+#define TESTSYSCALL(statement)						\
+	do {								\
+		int res = statement;					\
+									\
+		if (res)						\
+			ksft_print_msg("Failed: %s (%d)\n",		\
+				       strerror(errno), errno);		\
+		TESTEQUAL(res, 0);					\
+	} while (false)
+
+void print_bytes(const void *data, size_t size)
+{
+	const uint8_t *bytes = data;
+	int i;
+
+	for (i = 0; i < size; ++i) {
+		if (i % 0x10 == 0)
+			printf("%08x:", i);
+		printf("%02x ", (unsigned int) bytes[i]);
+		if (i % 0x10 == 0x0f)
+			printf("\n");
+	}
+
+	if (i % 0x10 != 0)
+		printf("\n");
+}
+
+struct hash_block {
+	char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+struct test_signature {
+	void *data;
+	size_t size;
+
+	char add_data[100];
+	size_t add_data_size;
+};
+
+struct test_file {
+	int index;
+	incfs_uuid_t id;
+	char *name;
+	off_t size;
+	char root_hash[INCFS_MAX_HASH_SIZE];
+	struct hash_block *mtree;
+	int mtree_block_count;
+	struct test_signature sig;
+	unsigned char *verity_sig;
+	size_t verity_sig_size;
+};
+
+struct test_files_set {
+	struct test_file *files;
+	int files_count;
+};
+
+struct linux_dirent64 {
+	uint64_t       d_ino;
+	int64_t        d_off;
+	unsigned short d_reclen;
+	unsigned char  d_type;
+	char	       d_name[0];
+} __packed;
+
+struct test_files_set get_test_files_set(void)
+{
+	static struct test_file files[] = {
+		{ .index = 0, .name = "file_one_byte", .size = 1 },
+		{ .index = 1,
+		  .name = "file_one_block",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE },
+		{ .index = 2,
+		  .name = "file_one_and_a_half_blocks",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE +
+			  INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+		{ .index = 3,
+		  .name = "file_three",
+		  .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+		{ .index = 4,
+		  .name = "file_four",
+		  .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 5,
+		  .name = "file_five",
+		  .size = 500 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 6,
+		  .name = "file_six",
+		  .size = 600 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 7,
+		  .name = "file_seven",
+		  .size = 700 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 8,
+		  .name = "file_eight",
+		  .size = 800 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 9,
+		  .name = "file_nine",
+		  .size = 900 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 10, .name = "file_big", .size = 500 * 1024 * 1024 }
+	};
+
+	if (options.file)
+		return (struct test_files_set) {
+			.files = files + options.file - 1,
+			.files_count = 1,
+		};
+
+	return (struct test_files_set){ .files = files,
+					.files_count = ARRAY_SIZE(files) };
+}
+
+struct test_files_set get_small_test_files_set(void)
+{
+	static struct test_file files[] = {
+		{ .index = 0, .name = "file_one_byte", .size = 1 },
+		{ .index = 1,
+		  .name = "file_one_block",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE },
+		{ .index = 2,
+		  .name = "file_one_and_a_half_blocks",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE +
+			  INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+		{ .index = 3,
+		  .name = "file_three",
+		  .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+		{ .index = 4,
+		  .name = "file_four",
+		  .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 }
+	};
+	return (struct test_files_set){ .files = files,
+					.files_count = ARRAY_SIZE(files) };
+}
+
+static int get_file_block_seed(int file, int block)
+{
+	return 7919 * file + block;
+}
+
+static loff_t min(loff_t a, loff_t b)
+{
+	return a < b ? a : b;
+}
+
+static int ilog2(size_t n)
+{
+	int l = 0;
+
+	while (n > 1) {
+		++l;
+		n >>= 1;
+	}
+	return l;
+}
+
+static pid_t flush_and_fork(void)
+{
+	fflush(stdout);
+	return fork();
+}
+
+static void print_error(char *msg)
+{
+	ksft_print_msg("%s: %s\n", msg, strerror(errno));
+}
+
+static int wait_for_process(pid_t pid)
+{
+	int status;
+	int wait_res;
+
+	wait_res = waitpid(pid, &status, 0);
+	if (wait_res <= 0) {
+		print_error("Can't wait for the child");
+		return -EINVAL;
+	}
+	if (!WIFEXITED(status)) {
+		ksft_print_msg("Unexpected child status pid=%d\n", pid);
+		return -EINVAL;
+	}
+	status = WEXITSTATUS(status);
+	if (status != 0)
+		return status;
+	return 0;
+}
+
+static void rnd_buf(uint8_t *data, size_t len, unsigned int seed)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		seed = 1103515245 * seed + 12345;
+		data[i] = (uint8_t)(seed >> (i % 13));
+	}
+}
+
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+	const unsigned char *_src = src;
+	static const char hex_asc[] = "0123456789abcdef";
+
+	while (count--) {
+		unsigned char x = *_src++;
+
+		*dst++ = hex_asc[(x & 0xf0) >> 4];
+		*dst++ = hex_asc[(x & 0x0f)];
+	}
+	*dst = 0;
+	return dst;
+}
+
+static char *get_index_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+	char path[FILENAME_MAX];
+	char str_id[1 + 2 * sizeof(id)];
+
+	bin2hex(str_id, id.bytes, sizeof(id.bytes));
+	snprintf(path, ARRAY_SIZE(path), "%s/.index/%s", mnt_dir, str_id);
+
+	return strdup(path);
+}
+
+static char *get_incomplete_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+	char path[FILENAME_MAX];
+	char str_id[1 + 2 * sizeof(id)];
+
+	bin2hex(str_id, id.bytes, sizeof(id.bytes));
+	snprintf(path, ARRAY_SIZE(path), "%s/.incomplete/%s", mnt_dir, str_id);
+
+	return strdup(path);
+}
+
+int open_file_by_id(const char *mnt_dir, incfs_uuid_t id, bool use_ioctl)
+{
+	char *path = get_index_filename(mnt_dir, id);
+	int cmd_fd = open_commands_file(mnt_dir);
+	int fd = open(path, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int error = 0;
+
+	if (fd < 0) {
+		print_error("Can't open file by id.");
+		error = -errno;
+		goto out;
+	}
+
+	if (use_ioctl && ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	if (ioctl(fd, INCFS_IOC_PERMIT_FILL, &permit_fill) != -1) {
+		print_error(
+			"Successfully called PERMIT_FILL on non pending_read file");
+		return -errno;
+		goto out;
+	}
+
+out:
+	free(path);
+	close(cmd_fd);
+
+	if (error) {
+		close(fd);
+		return error;
+	}
+
+	return fd;
+}
+
+int get_file_attr(const char *mnt_dir, incfs_uuid_t id, char *value, int size)
+{
+	char *path = get_index_filename(mnt_dir, id);
+	int res;
+
+	res = getxattr(path, INCFS_XATTR_METADATA_NAME, value, size);
+	if (res < 0)
+		res = -errno;
+
+	free(path);
+	return res;
+}
+
+static bool same_id(incfs_uuid_t *id1, incfs_uuid_t *id2)
+{
+	return !memcmp(id1->bytes, id2->bytes, sizeof(id1->bytes));
+}
+
+ssize_t ZSTD_compress_default(char *data, char *comp_data, size_t data_size,
+					size_t comp_size)
+{
+	return ZSTD_compress(comp_data, comp_size, data, data_size, 1);
+}
+
+static int emit_test_blocks(const char *mnt_dir, struct test_file *file,
+			int blocks[], int count)
+{
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+	uint8_t comp_data[2 * INCFS_DATA_FILE_BLOCK_SIZE];
+	int block_count = (count > 32) ? 32 : count;
+	int data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE * block_count;
+	uint8_t *data_buf = malloc(data_buf_size);
+	uint8_t *current_data = data_buf;
+	uint8_t *data_end = data_buf + data_buf_size;
+	struct incfs_fill_block *block_buf =
+		calloc(block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = block_count,
+		.fill_blocks = ptr_to_u64(block_buf),
+	};
+	ssize_t write_res = 0;
+	int fd = -1;
+	int error = 0;
+	int i = 0;
+	int blocks_written = 0;
+
+	for (i = 0; i < block_count; i++) {
+		int block_index = blocks[i];
+		bool compress_zstd = (file->index + block_index) % 4 == 2;
+		bool compress_lz4 = (file->index + block_index) % 4 == 0;
+		int seed = get_file_block_seed(file->index, block_index);
+		off_t block_offset =
+			((off_t)block_index) * INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t block_size = 0;
+
+		if (block_offset > file->size) {
+			error = -EINVAL;
+			break;
+		}
+		if (file->size - block_offset >
+			INCFS_DATA_FILE_BLOCK_SIZE)
+			block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+		else
+			block_size = file->size - block_offset;
+
+		rnd_buf(data, block_size, seed);
+		if (compress_lz4) {
+			size_t comp_size = LZ4_compress_default((char *)data,
+					(char *)comp_data, block_size,
+					ARRAY_SIZE(comp_data));
+
+			if (comp_size <= 0) {
+				error = -EBADMSG;
+				break;
+			}
+			if (current_data + comp_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, comp_data, comp_size);
+			block_size = comp_size;
+			block_buf[i].compression = COMPRESSION_LZ4;
+		} else if (compress_zstd) {
+			size_t comp_size = ZSTD_compress(comp_data,
+					ARRAY_SIZE(comp_data), data, block_size,
+					1);
+
+			if (comp_size <= 0) {
+				error = -EBADMSG;
+				break;
+			}
+			if (current_data + comp_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, comp_data, comp_size);
+			block_size = comp_size;
+			block_buf[i].compression = COMPRESSION_ZSTD;
+		} else {
+			if (current_data + block_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, data, block_size);
+			block_buf[i].compression = COMPRESSION_NONE;
+		}
+
+		block_buf[i].block_index = block_index;
+		block_buf[i].data_len = block_size;
+		block_buf[i].data = ptr_to_u64(current_data);
+		current_data += block_size;
+	}
+
+	if (!error) {
+		fd = open_file_by_id(mnt_dir, file->id, false);
+		if (fd < 0) {
+			error = -errno;
+			goto out;
+		}
+		write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+		if (write_res >= 0) {
+			ksft_print_msg("Wrote to file via normal fd error\n");
+			error = -EPERM;
+			goto out;
+		}
+
+		close(fd);
+		fd = open_file_by_id(mnt_dir, file->id, true);
+		if (fd < 0) {
+			error = -errno;
+			goto out;
+		}
+		write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+		if (write_res < 0)
+			error = -errno;
+		else
+			blocks_written = write_res;
+	}
+	if (error) {
+		ksft_print_msg(
+			"Writing data block error. Write returned: %d. Error:%s\n",
+			write_res, strerror(-error));
+	}
+
+out:
+	free(block_buf);
+	free(data_buf);
+	close(fd);
+	return (error < 0) ? error : blocks_written;
+}
+
+static int emit_test_block(const char *mnt_dir, struct test_file *file,
+				int block_index)
+{
+	int res = emit_test_blocks(mnt_dir, file, &block_index, 1);
+
+	if (res == 0)
+		return -EINVAL;
+	if (res == 1)
+		return 0;
+	return res;
+}
+
+static void shuffle(int array[], int count, unsigned int seed)
+{
+	int i;
+
+	for (i = 0; i < count - 1; i++) {
+		int items_left = count - i;
+		int shuffle_index;
+		int v;
+
+		seed = 1103515245 * seed + 12345;
+		shuffle_index = i + seed % items_left;
+
+		v = array[shuffle_index];
+		array[shuffle_index] = array[i];
+		array[i] = v;
+	}
+}
+
+static int emit_test_file_data(const char *mount_dir, struct test_file *file)
+{
+	int i;
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int result = 0;
+	int blocks_written = 0;
+
+	if (file->size == 0)
+		return 0;
+
+	block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+	for (i = 0; i < block_cnt; i++)
+		block_indexes[i] = i;
+	shuffle(block_indexes, block_cnt, file->index);
+
+	for (i = 0; i < block_cnt; i += blocks_written) {
+		blocks_written = emit_test_blocks(mount_dir, file,
+					block_indexes + i, block_cnt - i);
+		if (blocks_written < 0) {
+			result = blocks_written;
+			goto out;
+		}
+		if (blocks_written == 0) {
+			result = -EIO;
+			goto out;
+		}
+	}
+out:
+	free(block_indexes);
+	return result;
+}
+
+static loff_t read_whole_file(const char *filename)
+{
+	int fd = -1;
+	loff_t result;
+	loff_t bytes_read = 0;
+	uint8_t buff[16 * 1024];
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	if (fd <= 0)
+		return fd;
+
+	while (1) {
+		int read_result = read(fd, buff, ARRAY_SIZE(buff));
+
+		if (read_result < 0) {
+			print_error("Error during reading from a file.");
+			result = -errno;
+			goto cleanup;
+		} else if (read_result == 0)
+			break;
+
+		bytes_read += read_result;
+	}
+	result = bytes_read;
+
+cleanup:
+	close(fd);
+	return result;
+}
+
+static int read_test_file(uint8_t *buf, size_t len, char *filename,
+			  int block_idx)
+{
+	int fd = -1;
+	int result;
+	int bytes_read = 0;
+	size_t bytes_to_read = len;
+	off_t offset = ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	if (fd <= 0)
+		return fd;
+
+	if (lseek(fd, offset, SEEK_SET) != offset) {
+		print_error("Seek error");
+		return -errno;
+	}
+
+	while (bytes_read < bytes_to_read) {
+		int read_result =
+			read(fd, buf + bytes_read, bytes_to_read - bytes_read);
+		if (read_result < 0) {
+			result = -errno;
+			goto cleanup;
+		} else if (read_result == 0)
+			break;
+
+		bytes_read += read_result;
+	}
+	result = bytes_read;
+
+cleanup:
+	close(fd);
+	return result;
+}
+
+static char *create_backing_dir(const char *mount_dir)
+{
+	struct stat st;
+	char backing_dir_name[255];
+
+	snprintf(backing_dir_name, ARRAY_SIZE(backing_dir_name), "%s-src",
+		 mount_dir);
+
+	if (stat(backing_dir_name, &st) == 0) {
+		if (S_ISDIR(st.st_mode)) {
+			int error = delete_dir_tree(backing_dir_name);
+
+			if (error) {
+				ksft_print_msg(
+				      "Can't delete existing backing dir. %d\n",
+				      error);
+				return NULL;
+			}
+		} else {
+			if (unlink(backing_dir_name)) {
+				print_error("Can't clear backing dir");
+				return NULL;
+			}
+		}
+	}
+
+	if (mkdir(backing_dir_name, 0777)) {
+		if (errno != EEXIST) {
+			print_error("Can't open/create backing dir");
+			return NULL;
+		}
+	}
+
+	return strdup(backing_dir_name);
+}
+
+static int validate_test_file_content_with_seed(const char *mount_dir,
+						struct test_file *file,
+						unsigned int shuffle_seed)
+{
+	int error = -1;
+	char *filename = concat_file_name(mount_dir, file->name);
+	off_t size = file->size;
+	loff_t actual_size = get_file_size(filename);
+	int block_cnt = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int i;
+
+	block_indexes = alloca(sizeof(int) * block_cnt);
+	for (i = 0; i < block_cnt; i++)
+		block_indexes[i] = i;
+
+	if (shuffle_seed != 0)
+		shuffle(block_indexes, block_cnt, shuffle_seed);
+
+	if (actual_size != size) {
+		ksft_print_msg(
+			"File size doesn't match. name: %s expected size:%ld actual size:%ld\n",
+			filename, size, actual_size);
+		error = -1;
+		goto failure;
+	}
+
+	for (i = 0; i < block_cnt; i++) {
+		int block_idx = block_indexes[i];
+		uint8_t expected_block[INCFS_DATA_FILE_BLOCK_SIZE];
+		uint8_t actual_block[INCFS_DATA_FILE_BLOCK_SIZE];
+		int seed = get_file_block_seed(file->index, block_idx);
+		size_t bytes_to_compare = min(
+			(off_t)INCFS_DATA_FILE_BLOCK_SIZE,
+			size - ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE);
+		int read_result =
+			read_test_file(actual_block, INCFS_DATA_FILE_BLOCK_SIZE,
+				       filename, block_idx);
+		if (read_result < 0) {
+			ksft_print_msg(
+				"Error reading block %d from file %s. Error: %s\n",
+				block_idx, filename, strerror(-read_result));
+			error = read_result;
+			goto failure;
+		}
+		rnd_buf(expected_block, INCFS_DATA_FILE_BLOCK_SIZE, seed);
+		if (memcmp(expected_block, actual_block, bytes_to_compare)) {
+			ksft_print_msg(
+				"File contents don't match. name: %s block:%d\n",
+				file->name, block_idx);
+			error = -2;
+			goto failure;
+		}
+	}
+	free(filename);
+	return 0;
+
+failure:
+	free(filename);
+	return error;
+}
+
+static int validate_test_file_content(const char *mount_dir,
+				      struct test_file *file)
+{
+	return validate_test_file_content_with_seed(mount_dir, file, 0);
+}
+
+static int data_producer(const char *mount_dir, struct test_files_set *test_set)
+{
+	int ret = 0;
+	int timeout_ms = 1000;
+	struct incfs_pending_read_info prs[100] = {};
+	int prs_size = ARRAY_SIZE(prs);
+	int fd = open_commands_file(mount_dir);
+
+	if (fd < 0)
+		return -errno;
+
+	while ((ret = wait_for_pending_reads(fd, timeout_ms, prs, prs_size)) >
+	       0) {
+		int read_count = ret;
+		int i;
+
+		for (i = 0; i < read_count; i++) {
+			int j = 0;
+			struct test_file *file = NULL;
+
+			for (j = 0; j < test_set->files_count; j++) {
+				bool same = same_id(&(test_set->files[j].id),
+					&(prs[i].file_id));
+
+				if (same) {
+					file = &test_set->files[j];
+					break;
+				}
+			}
+			if (!file) {
+				ksft_print_msg(
+					"Unknown file in pending reads.\n");
+				break;
+			}
+
+			ret = emit_test_block(mount_dir, file,
+				prs[i].block_index);
+			if (ret < 0) {
+				ksft_print_msg("Emitting test data error: %s\n",
+						strerror(-ret));
+				break;
+			}
+		}
+	}
+	close(fd);
+	return ret;
+}
+
+static int data_producer2(const char *mount_dir,
+			  struct test_files_set *test_set)
+{
+	int ret = 0;
+	int timeout_ms = 1000;
+	struct incfs_pending_read_info2 prs[100] = {};
+	int prs_size = ARRAY_SIZE(prs);
+	int fd = open_commands_file(mount_dir);
+
+	if (fd < 0)
+		return -errno;
+
+	while ((ret = wait_for_pending_reads2(fd, timeout_ms, prs, prs_size)) >
+	       0) {
+		int read_count = ret;
+		int i;
+
+		for (i = 0; i < read_count; i++) {
+			int j = 0;
+			struct test_file *file = NULL;
+
+			for (j = 0; j < test_set->files_count; j++) {
+				bool same = same_id(&(test_set->files[j].id),
+					&(prs[i].file_id));
+
+				if (same) {
+					file = &test_set->files[j];
+					break;
+				}
+			}
+			if (!file) {
+				ksft_print_msg(
+					"Unknown file in pending reads.\n");
+				break;
+			}
+
+			ret = emit_test_block(mount_dir, file,
+				prs[i].block_index);
+			if (ret < 0) {
+				ksft_print_msg("Emitting test data error: %s\n",
+						strerror(-ret));
+				break;
+			}
+		}
+	}
+	close(fd);
+	return ret;
+}
+
+static int build_mtree(struct test_file *file)
+{
+	char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	int block_count = 0;
+	int hash_block_count = 0;
+	int total_tree_block_count = 0;
+	int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+	int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+	int levels_count = 0;
+	int i, level;
+
+	if (file->size == 0)
+		return 0;
+
+	block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	hash_block_count = block_count;
+	for (i = 0; hash_block_count > 1; i++) {
+		hash_block_count = (hash_block_count + hash_per_block - 1)
+			/ hash_per_block;
+		tree_lvl_count[i] = hash_block_count;
+		total_tree_block_count += hash_block_count;
+	}
+	levels_count = i;
+
+	for (i = 0; i < levels_count; i++) {
+		int prev_lvl_base = (i == 0) ? total_tree_block_count :
+			tree_lvl_index[i - 1];
+
+		tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+	}
+
+	file->mtree_block_count = total_tree_block_count;
+	if (block_count == 1) {
+		int seed = get_file_block_seed(file->index, 0);
+
+		memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+		rnd_buf((uint8_t *)data, file->size, seed);
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+		return 0;
+	}
+
+	file->mtree = calloc(total_tree_block_count, sizeof(*file->mtree));
+	/* Build level 0 hashes. */
+	for (i = 0; i < block_count; i++) {
+		off_t offset = i * INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+		int block_index = tree_lvl_index[0] +
+					i / hash_per_block;
+		int block_off = (i % hash_per_block) * digest_size;
+		int seed = get_file_block_seed(file->index, i);
+		char *hash_ptr = file->mtree[block_index].data + block_off;
+
+		if (file->size - offset < block_size) {
+			block_size = file->size - offset;
+			memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+		}
+
+		rnd_buf((uint8_t *)data, block_size, seed);
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+	}
+
+	/* Build higher levels of hash tree. */
+	for (level = 1; level < levels_count; level++) {
+		int prev_lvl_base = tree_lvl_index[level - 1];
+		int prev_lvl_count = tree_lvl_count[level - 1];
+
+		for (i = 0; i < prev_lvl_count; i++) {
+			int block_index =
+				i / hash_per_block + tree_lvl_index[level];
+			int block_off = (i % hash_per_block) * digest_size;
+			char *hash_ptr =
+				file->mtree[block_index].data + block_off;
+
+			sha256(file->mtree[i + prev_lvl_base].data,
+			       INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+		}
+	}
+
+	/* Calculate root hash from the top block */
+	sha256(file->mtree[0].data,
+		INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+
+	return 0;
+}
+
+static int load_hash_tree(const char *mount_dir, struct test_file *file)
+{
+	int err;
+	int i;
+	int fd;
+	struct incfs_fill_blocks fill_blocks = {
+		.count = file->mtree_block_count,
+	};
+	struct incfs_fill_block *fill_block_array =
+		calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+
+	if (fill_blocks.count == 0)
+		return 0;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+	fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		fill_block_array[i] = (struct incfs_fill_block){
+			.block_index = i,
+			.data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+			.data = ptr_to_u64(file->mtree[i].data),
+			.flags = INCFS_BLOCK_FLAGS_HASH
+		};
+	}
+
+	fd = open_file_by_id(mount_dir, file->id, false);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err >= 0) {
+		err = -EPERM;
+		goto failure;
+	}
+
+	fd = open_file_by_id(mount_dir, file->id, true);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else
+		err = 0;
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+static int cant_touch_index_test(const char *mount_dir)
+{
+	char *file_name = "test_file";
+	int file_size = 123;
+	incfs_uuid_t file_id;
+	char *index_path = concat_file_name(mount_dir, ".index");
+	char *subdir = concat_file_name(index_path, "subdir");
+	char *dst_name = concat_file_name(mount_dir, "something");
+	char *filename_in_index = NULL;
+	char *file_path = concat_file_name(mount_dir, file_name);
+	char *backing_dir;
+	int cmd_fd = -1;
+	int err;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+
+	err = mkdir(subdir, 0777);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to crate subdir in index\n");
+		goto failure;
+	}
+
+	err = rmdir(index_path);
+	if (err == 0 || errno != EBUSY) {
+		print_error(".index directory should not be removed\n");
+		goto failure;
+	}
+
+	err = emit_file(cmd_fd, ".index", file_name, &file_id,
+				file_size, NULL);
+	if (err != -EBUSY) {
+		print_error("Shouldn't be able to crate a file in index\n");
+		goto failure;
+	}
+
+	err = emit_file(cmd_fd, NULL, file_name, &file_id,
+				file_size, NULL);
+	if (err < 0)
+		goto failure;
+	filename_in_index = get_index_filename(mount_dir, file_id);
+
+	err = unlink(filename_in_index);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be delete from index\n");
+		goto failure;
+	}
+
+
+	err = rename(filename_in_index, dst_name);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to move from index\n");
+		goto failure;
+	}
+
+	free(filename_in_index);
+	filename_in_index = concat_file_name(index_path, "abc");
+	err = link(file_path, filename_in_index);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to link inside index\n");
+		goto failure;
+	}
+
+	err = rename(index_path, dst_name);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't rename .index directory\n");
+		goto failure;
+	}
+
+	close(cmd_fd);
+	free(subdir);
+	free(index_path);
+	free(dst_name);
+	free(filename_in_index);
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	free(subdir);
+	free(dst_name);
+	free(index_path);
+	free(filename_in_index);
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static bool iterate_directory(const char *dir_to_iterate, bool root,
+			      int file_count)
+{
+	struct expected_name {
+		const char *name;
+		bool root_only;
+		bool found;
+	} names[] = {
+		{INCFS_LOG_FILENAME, true, false},
+		{INCFS_PENDING_READS_FILENAME, true, false},
+		{INCFS_BLOCKS_WRITTEN_FILENAME, true, false},
+		{".index", true, false},
+		{".incomplete", true, false},
+		{"..", false, false},
+		{".", false, false},
+	};
+
+	bool pass = true, found;
+	int i;
+
+	/* Test directory iteration */
+	int fd = open(dir_to_iterate, O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+
+	if (fd < 0) {
+		print_error("Can't open directory\n");
+		return false;
+	}
+
+	for (;;) {
+		/* Enough space for one dirent - no name over 30 */
+		char buf[sizeof(struct linux_dirent64) + NAME_MAX];
+		struct linux_dirent64 *dirent = (struct linux_dirent64 *) buf;
+		int nread;
+		int i;
+
+		for (i = 0; i < NAME_MAX; ++i) {
+			nread = syscall(__NR_getdents64, fd, buf,
+					 sizeof(struct linux_dirent64) + i);
+
+			if (nread >= 0)
+				break;
+			if (errno != EINVAL)
+				break;
+		}
+
+		if (nread == 0)
+			break;
+		if (nread < 0) {
+			print_error("Error iterating directory\n");
+			pass = false;
+			goto failure;
+		}
+
+		/* Expected size is rounded up to 8 byte boundary. Not sure if
+		 * this is universal truth or just happenstance, but useful test
+		 * for the moment
+		 */
+		if (nread != (((sizeof(struct linux_dirent64)
+				+ strlen(dirent->d_name) + 1) + 7) & ~7)) {
+			print_error("Wrong dirent size");
+			pass = false;
+			goto failure;
+		}
+
+		found = false;
+		for (i = 0; i < sizeof(names) / sizeof(*names); ++i)
+			if (!strcmp(dirent->d_name, names[i].name)) {
+				if (names[i].root_only && !root) {
+					print_error("Root file error");
+					pass = false;
+					goto failure;
+				}
+
+				if (names[i].found) {
+					print_error("File appears twice");
+					pass = false;
+					goto failure;
+				}
+
+				names[i].found = true;
+				found = true;
+				break;
+			}
+
+		if (!found)
+			--file_count;
+	}
+
+	for (i = 0; i < sizeof(names) / sizeof(*names); ++i) {
+		if (!names[i].found)
+			if (root || !names[i].root_only) {
+				print_error("Expected file not present");
+				pass = false;
+				goto failure;
+			}
+	}
+
+	if (file_count) {
+		print_error("Wrong number of files\n");
+		pass = false;
+		goto failure;
+	}
+
+failure:
+	close(fd);
+	return pass;
+}
+
+static int basic_file_ops_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	char *subdir1 = concat_file_name(mount_dir, "subdir1");
+	char *subdir2 = concat_file_name(mount_dir, "subdir2");
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i, err;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	err = mkdir(subdir1, 0777);
+	if (err < 0 && errno != EEXIST) {
+		print_error("Can't create subdir1\n");
+		goto failure;
+	}
+
+	err = mkdir(subdir2, 0777);
+	if (err < 0 && errno != EEXIST) {
+		print_error("Can't create subdir2\n");
+		goto failure;
+	}
+
+	/* Create all test files in subdir1 directory */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		loff_t size;
+		char *file_path = concat_file_name(subdir1, file->name);
+
+		err = emit_file(cmd_fd, "subdir1", file->name, &file->id,
+				     file->size, NULL);
+		if (err < 0)
+			goto failure;
+
+		size = get_file_size(file_path);
+		free(file_path);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+	}
+
+	if (!iterate_directory(subdir1, false, file_num))
+		goto failure;
+
+	/* Link the files to subdir2 */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *src_name = concat_file_name(subdir1, file->name);
+		char *dst_name = concat_file_name(subdir2, file->name);
+		loff_t size;
+
+		err = link(src_name, dst_name);
+		if (err < 0) {
+			print_error("Can't move file\n");
+			goto failure;
+		}
+
+		size = get_file_size(dst_name);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+		free(src_name);
+		free(dst_name);
+	}
+
+	/* Move the files from subdir2 to the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *src_name = concat_file_name(subdir2, file->name);
+		char *dst_name = concat_file_name(mount_dir, file->name);
+		loff_t size;
+
+		err = rename(src_name, dst_name);
+		if (err < 0) {
+			print_error("Can't move file\n");
+			goto failure;
+		}
+
+		size = get_file_size(dst_name);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+		free(src_name);
+		free(dst_name);
+	}
+
+	/* +2 because there are 2 subdirs */
+	if (!iterate_directory(mount_dir, true, file_num + 2))
+		goto failure;
+
+	/* Open and close all files from the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *path = concat_file_name(mount_dir, file->name);
+		int fd;
+
+		fd = open(path, O_RDWR | O_CLOEXEC);
+		free(path);
+		if (fd <= 0) {
+			print_error("Can't open file");
+			goto failure;
+		}
+		if (close(fd)) {
+			print_error("Can't close file");
+			goto failure;
+		}
+	}
+
+	/* Delete all files from the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *path = concat_file_name(mount_dir, file->name);
+
+		err = unlink(path);
+		free(path);
+		if (err < 0) {
+			print_error("Can't unlink file");
+			goto failure;
+		}
+	}
+
+	err = delete_dir_tree(subdir1);
+	if (err) {
+		ksft_print_msg("Error deleting subdir1 %d", err);
+		goto failure;
+	}
+
+	err = rmdir(subdir2);
+	if (err) {
+		print_error("Error deleting subdir2");
+		goto failure;
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int dynamic_files_and_data_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int missing_file_idx = 5;
+	int cmd_fd = -1;
+	char *backing_dir;
+	int i;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Check that test files don't exist in the filesystem. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg(
+				"File %s somehow already exists in a clean FS.\n",
+				filename);
+			goto failure;
+		}
+		free(filename);
+	}
+
+	/* Write test data into the command file. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+		if (res < 0) {
+			ksft_print_msg("Error %s emiting file %s.\n",
+				       strerror(-res), file->name);
+			goto failure;
+		}
+
+		/* Skip writing data to one file so we can check */
+		/* that it's missing later. */
+		if (i == missing_file_idx)
+			continue;
+
+		res = emit_test_file_data(mount_dir, file);
+		if (res) {
+			ksft_print_msg("Error %s emiting data for %s.\n",
+				       strerror(-res), file->name);
+			goto failure;
+		}
+	}
+
+	/* Validate contents of the FS */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == missing_file_idx) {
+			/* No data has been written to this file. */
+			/* Check for read error; */
+			uint8_t buf;
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res = read_test_file(&buf, 1, filename, 0);
+
+			free(filename);
+			if (res > 0) {
+				ksft_print_msg(
+					"Data present, even though never writtern.\n");
+				goto failure;
+			}
+			if (res != -ETIME) {
+				ksft_print_msg("Wrong error code: %d.\n", res);
+				goto failure;
+			}
+		} else {
+			if (validate_test_file_content(mount_dir, file) < 0)
+				goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int concurrent_reads_and_writes_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	/* Validate each file from that many child processes. */
+	const int child_multiplier = 3;
+	int cmd_fd = -1;
+	char *backing_dir;
+	int status;
+	int i;
+	pid_t producer_pid;
+	pid_t *child_pids = alloca(child_multiplier * file_num * sizeof(pid_t));
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+		if (res)
+			goto failure;
+	}
+
+	/* Start child processes acessing data in the files */
+	for (i = 0; i < file_num * child_multiplier; i++) {
+		struct test_file *file = &test.files[i / child_multiplier];
+		pid_t child_pid = flush_and_fork();
+
+		if (child_pid == 0) {
+			/* This is a child process, do the data validation. */
+			int ret = validate_test_file_content_with_seed(
+				mount_dir, file, i);
+			if (ret >= 0) {
+				/* Zero exit status if data is valid. */
+				exit(0);
+			}
+
+			/* Positive status if validation error found. */
+			exit(-ret);
+		} else if (child_pid > 0) {
+			child_pids[i] = child_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	producer_pid = flush_and_fork();
+	if (producer_pid == 0) {
+		int ret;
+		/*
+		 * This is a child that should provide data to
+		 * pending reads.
+		 */
+
+		ret = data_producer(mount_dir, &test);
+		exit(-ret);
+	} else {
+		status = wait_for_process(producer_pid);
+		if (status != 0) {
+			ksft_print_msg("Data produces failed. %d(%s) ", status,
+				       strerror(status));
+			goto failure;
+		}
+	}
+
+	/* Check that all children has finished with 0 exit status */
+	for (i = 0; i < file_num * child_multiplier; i++) {
+		struct test_file *file = &test.files[i / child_multiplier];
+
+		status = wait_for_process(child_pids[i]);
+		if (status != 0) {
+			ksft_print_msg(
+				"Validation for the file %s failed with code %d (%s)\n",
+				file->name, status, strerror(status));
+			goto failure;
+		}
+	}
+
+	/* Check that there are no pending reads left */
+	{
+		struct incfs_pending_read_info prs[1] = {};
+		int timeout = 0;
+		int read_count = wait_for_pending_reads(cmd_fd, timeout, prs,
+							ARRAY_SIZE(prs));
+
+		if (read_count) {
+			ksft_print_msg(
+				"Pending reads pending when all data written\n");
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int work_after_remount_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int file_num_stage1 = file_num / 2;
+	const int file_num_stage2 = file_num;
+	char *backing_dir = NULL;
+	int i = 0;
+	int cmd_fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write first half of the data into the command file. (stage 1) */
+	for (i = 0; i < file_num_stage1; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL))
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Unmount and mount again, to see that data is persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write the second half of the data into the command file. (stage 2) */
+	for (; i < file_num_stage2; i++) {
+		struct test_file *file = &test.files[i];
+		int res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+
+		if (res)
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Validate contents of the FS */
+	for (i = 0; i < file_num_stage2; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+	}
+
+	/* Delete all files */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+		char *filename_in_index = get_index_filename(mount_dir,
+							file->id);
+
+		if (access(filename, F_OK) != 0) {
+			ksft_print_msg("File %s is not visible.\n", filename);
+			goto failure;
+		}
+
+		if (access(filename_in_index, F_OK) != 0) {
+			ksft_print_msg("File %s is not visible.\n",
+				filename_in_index);
+			goto failure;
+		}
+
+		unlink(filename);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg("File %s is still present.\n", filename);
+			goto failure;
+		}
+
+		if (access(filename_in_index, F_OK) != -1) {
+			ksft_print_msg("File %s is still present.\n",
+				filename_in_index);
+			goto failure;
+		}
+		free(filename);
+		free(filename_in_index);
+	}
+
+	/* Unmount and mount again, to see that deleted files stay deleted. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Validate all deleted files are still deleted. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg("File %s is still visible.\n", filename);
+			goto failure;
+		}
+		free(filename);
+	}
+
+	/* Final unmount */
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int attribute_test(const char *mount_dir)
+{
+	char file_attr[] = "metadata123123";
+	char attr_buf[INCFS_MAX_FILE_ATTR_SIZE] = {};
+	int cmd_fd = -1;
+	incfs_uuid_t file_id;
+	int attr_res = 0;
+	char *backing_dir;
+
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "file", &file_id, 12, file_attr))
+		goto failure;
+
+	/* Test attribute values */
+	attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+		ARRAY_SIZE(attr_buf));
+	if (attr_res != strlen(file_attr)) {
+		ksft_print_msg("Get file attr error: %d\n", attr_res);
+		goto failure;
+	}
+	if (strcmp(attr_buf, file_attr) != 0) {
+		ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+		goto failure;
+	}
+
+	/* Unmount and mount again, to see that attributes are persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Test attribute values again after remount*/
+	attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+		ARRAY_SIZE(attr_buf));
+	if (attr_res != strlen(file_attr)) {
+		ksft_print_msg("Get dir attr error: %d\n", attr_res);
+		goto failure;
+	}
+	if (strcmp(attr_buf, file_attr) != 0) {
+		ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+		goto failure;
+	}
+
+	/* Final unmount */
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int child_procs_waiting_for_data_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	int cmd_fd = -1;
+	int i;
+	pid_t *child_pids = alloca(file_num * sizeof(pid_t));
+	char *backing_dir;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file.  (10s wait time) */
+	if (mount_fs(mount_dir, backing_dir, 10000) != 0)
+		goto failure;
+
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+	}
+
+	/* Start child processes acessing data in the files */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		pid_t child_pid = flush_and_fork();
+
+		if (child_pid == 0) {
+			/* This is a child process, do the data validation. */
+			int ret = validate_test_file_content(mount_dir, file);
+
+			if (ret >= 0) {
+				/* Zero exit status if data is valid. */
+				exit(0);
+			}
+
+			/* Positive status if validation error found. */
+			exit(-ret);
+		} else if (child_pid > 0) {
+			child_pids[i] = child_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	/* Write test data into the command file. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Check that all children has finished with 0 exit status */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int status = wait_for_process(child_pids[i]);
+
+		if (status != 0) {
+			ksft_print_msg(
+				"Validation for the file %s failed with code %d (%s)\n",
+				file->name, status, strerror(status));
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int multiple_providers_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int producer_count = 5;
+	int cmd_fd = -1;
+	int status;
+	int i;
+	pid_t *producer_pids = alloca(producer_count * sizeof(pid_t));
+	char *backing_dir;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file.  (10s wait time) */
+	if (mount_fs_opt(mount_dir, backing_dir,
+			 "read_timeout_ms=10000,report_uid", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL) < 0)
+			goto failure;
+	}
+
+	/* Start producer processes */
+	for (i = 0; i < producer_count; i++) {
+		pid_t producer_pid = flush_and_fork();
+
+		if (producer_pid == 0) {
+			int ret;
+			/*
+			 * This is a child that should provide data to
+			 * pending reads.
+			 */
+
+			ret = data_producer2(mount_dir, &test);
+			exit(-ret);
+		} else if (producer_pid > 0) {
+			producer_pids[i] = producer_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	/* Validate FS content */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+		loff_t read_result = read_whole_file(filename);
+
+		free(filename);
+		if (read_result != file->size) {
+			ksft_print_msg(
+				"Error validating file %s. Result: %ld\n",
+				file->name, read_result);
+			goto failure;
+		}
+	}
+
+	/* Check that all producers has finished with 0 exit status */
+	for (i = 0; i < producer_count; i++) {
+		status = wait_for_process(producer_pids[i]);
+		if (status != 0) {
+			ksft_print_msg("Producer %d failed with code (%s)\n", i,
+				       strerror(status));
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int validate_hash_tree(const char *mount_dir, struct test_file *file)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	int fd = -1;
+	unsigned char *buf;
+	int i, err;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TEST(buf = malloc(INCFS_DATA_FILE_BLOCK_SIZE * 8), buf);
+
+	for (i = 0; i < file->mtree_block_count; ) {
+		int blocks_to_read = i % 7 + 1;
+		struct fsverity_read_metadata_arg args = {
+			.metadata_type = FS_VERITY_METADATA_TYPE_MERKLE_TREE,
+			.offset = i * INCFS_DATA_FILE_BLOCK_SIZE,
+			.length = blocks_to_read * INCFS_DATA_FILE_BLOCK_SIZE,
+			.buf_ptr = ptr_to_u64(buf),
+		};
+
+		TEST(err = ioctl(fd, FS_IOC_READ_VERITY_METADATA, &args),
+		     err == min(args.length, (file->mtree_block_count - i) *
+					     INCFS_DATA_FILE_BLOCK_SIZE));
+		TESTEQUAL(memcmp(buf, file->mtree[i].data, err), 0);
+
+		i += blocks_to_read;
+	}
+
+	result = TEST_SUCCESS;
+
+out:
+	free(buf);
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int hash_tree_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int corrupted_file_idx = 5;
+	int i = 0;
+	int cmd_fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write hashes and data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		build_mtree(file);
+		res = crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				       file->size, file->root_hash,
+				       file->sig.add_data);
+
+		if (i == corrupted_file_idx) {
+			/* Corrupt third blocks hash */
+			file->mtree[0].data[2 * SHA256_DIGEST_SIZE] ^= 0xff;
+		}
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+
+		res = load_hash_tree(mount_dir, file);
+		if (res) {
+			ksft_print_msg("Can't load hashes for %s. error: %s\n",
+				file->name, strerror(-res));
+			goto failure;
+		}
+	}
+
+	/* Validate data */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == corrupted_file_idx) {
+			uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res;
+
+			res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+					     filename, 2);
+			free(filename);
+			if (res != -EBADMSG) {
+				ksft_print_msg("Hash violation missed1. %d\n",
+					       res);
+				goto failure;
+			}
+		} else if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+		else if (validate_hash_tree(mount_dir, file) < 0)
+			goto failure;
+	}
+
+	/* Unmount and mount again, to that hashes are persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Validate data again */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == corrupted_file_idx) {
+			uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res;
+
+			res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+					     filename, 2);
+			free(filename);
+			if (res != -EBADMSG) {
+				ksft_print_msg("Hash violation missed2. %d\n",
+					       res);
+				goto failure;
+			}
+		} else if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+	}
+	result = TEST_SUCCESS;
+
+failure:
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		free(file->mtree);
+	}
+
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return result;
+}
+
+enum expected_log { FULL_LOG, NO_LOG, PARTIAL_LOG };
+
+static int validate_logs(const char *mount_dir, int log_fd,
+			 struct test_file *file,
+			 enum expected_log expected_log,
+			 bool report_uid, bool expect_data)
+{
+	int result = TEST_FAILURE;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+	struct incfs_pending_read_info prs[2048] = {};
+	struct incfs_pending_read_info2 prs2[2048] = {};
+	struct incfs_pending_read_info *previous_record = NULL;
+	int prs_size = ARRAY_SIZE(prs);
+	int block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int expected_read_count, read_count, block_index, read_index;
+	char *filename = NULL;
+	int fd = -1;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	if (block_count > prs_size)
+		block_count = prs_size;
+	expected_read_count = block_count;
+
+	for (block_index = 0; block_index < block_count; block_index++) {
+		int result = pread(fd, data, sizeof(data),
+			    INCFS_DATA_FILE_BLOCK_SIZE * block_index);
+
+		/* Make some read logs of type SAME_FILE_NEXT_BLOCK */
+		if (block_index % 100 == 10)
+			usleep(20000);
+
+		/* Skip some blocks to make logs of type SAME_FILE */
+		if (block_index % 10 == 5) {
+			++block_index;
+			--expected_read_count;
+		}
+
+		if (expect_data)
+			TESTCOND(result > 0);
+
+		if (!expect_data)
+			TESTEQUAL(result, -1);
+	}
+
+	if (report_uid)
+		read_count = wait_for_pending_reads2(log_fd,
+				expected_log == NO_LOG ? 10 : 0,
+				prs2, prs_size);
+	else
+		read_count = wait_for_pending_reads(log_fd,
+				expected_log == NO_LOG ? 10 : 0,
+				prs, prs_size);
+
+	if (expected_log == NO_LOG)
+		TESTEQUAL(read_count, 0);
+
+	if (expected_log == PARTIAL_LOG)
+		TESTCOND(read_count > 0 &&
+			 read_count <= expected_read_count);
+
+	if (expected_log == FULL_LOG)
+		TESTEQUAL(read_count, expected_read_count);
+
+	/* If read less than expected, advance block_index appropriately */
+	for (block_index = 0, read_index = 0;
+	     read_index < expected_read_count - read_count;
+	     block_index++, read_index++)
+		if (block_index % 10 == 5)
+			++block_index;
+
+	for (read_index = 0; read_index < read_count;
+	     block_index++, read_index++) {
+		struct incfs_pending_read_info *record = report_uid ?
+			(struct incfs_pending_read_info *) &prs2[read_index] :
+			&prs[read_index];
+
+		TESTCOND(same_id(&record->file_id, &file->id));
+		TESTEQUAL(record->block_index, block_index);
+		TESTNE(record->timestamp_us, 0);
+		if (previous_record)
+			TESTEQUAL(record->serial_number,
+				  previous_record->serial_number + 1);
+
+		previous_record = record;
+		if (block_index % 10 == 5)
+			++block_index;
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int read_log_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	int i = 0;
+	int cmd_fd = -1, log_fd = -1;
+	char *backing_dir = NULL;
+
+	/* Create files */
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,report_uid,read_timeout_ms=0",
+				 false), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				    file->size, NULL), 0);
+	}
+	close(cmd_fd);
+	cmd_fd = -1;
+
+	/* Validate logs */
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, true, false), 0);
+
+	/* Unmount and mount again without report_uid */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,read_timeout_ms=0", false), 0);
+
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, false), 0);
+
+	/* No read log to make sure poll doesn't crash */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=0,read_timeout_ms=0",
+			       false), 0);
+
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					NO_LOG, false, false), 0);
+
+	/* Remount and check that logs start working again */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=1,read_timeout_ms=0",
+			       true), 0);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					PARTIAL_LOG, false, false), 0);
+
+	/* Remount and check that logs continue working */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=4,read_timeout_ms=0",
+			       true), 0);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, false), 0);
+
+	/* Check logs work with data */
+	for (i = 0; i < file_num; i++) {
+		TESTEQUAL(emit_test_file_data(mount_dir, &test.files[i]), 0);
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, true), 0);
+	}
+
+	/* Final unmount */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	close(log_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return result;
+}
+
+static int emit_partial_test_file_data(const char *mount_dir,
+				       struct test_file *file)
+{
+	int i, j;
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int result = 0;
+	int blocks_written = 0;
+	int bw_fd = -1;
+	char buffer[20];
+	struct pollfd pollfd;
+	long blocks_written_total, blocks_written_new_total;
+
+	if (file->size == 0)
+		return 0;
+
+	bw_fd = open_blocks_written_file(mount_dir);
+	if (bw_fd == -1)
+		return -errno;
+
+	result = read(bw_fd, buffer, sizeof(buffer));
+	if (result <= 0) {
+		result = -EIO;
+		goto out;
+	}
+
+	buffer[result] = 0;
+	blocks_written_total = strtol(buffer, NULL, 10);
+	result = 0;
+
+	pollfd = (struct pollfd) {
+		.fd = bw_fd,
+		.events = POLLIN,
+	};
+
+	result = poll(&pollfd, 1, 0);
+	if (result) {
+		result = -EIO;
+		goto out;
+	}
+
+	/* Emit 2 blocks, skip 2 blocks etc*/
+	block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+	for (i = 0, j = 0; i < block_cnt; ++i)
+		if ((i & 2) == 0) {
+			block_indexes[j] = i;
+			++j;
+		}
+
+	for (i = 0; i < j; i += blocks_written) {
+		blocks_written = emit_test_blocks(mount_dir, file,
+						  block_indexes + i, j - i);
+		if (blocks_written < 0) {
+			result = blocks_written;
+			goto out;
+		}
+		if (blocks_written == 0) {
+			result = -EIO;
+			goto out;
+		}
+
+		result = poll(&pollfd, 1, 0);
+		if (result != 1 || pollfd.revents != POLLIN) {
+			result = -EIO;
+			goto out;
+		}
+
+		result = read(bw_fd, buffer, sizeof(buffer));
+		buffer[result] = 0;
+		blocks_written_new_total = strtol(buffer, NULL, 10);
+
+		if (blocks_written_new_total - blocks_written_total
+		    != blocks_written) {
+			result = -EIO;
+			goto out;
+		}
+
+		blocks_written_total = blocks_written_new_total;
+		result = 0;
+	}
+out:
+	free(block_indexes);
+	close(bw_fd);
+	return result;
+}
+
+static int validate_ranges(const char *mount_dir, struct test_file *file)
+{
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	char *filename = concat_file_name(mount_dir, file->name);
+	int fd;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int error = TEST_SUCCESS;
+	int i;
+	int range_cnt;
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	free(filename);
+	if (fd <= 0)
+		return TEST_FAILURE;
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error != -1 || errno != EPERM) {
+		ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+		error = -EPERM;
+		goto out;
+	}
+
+	cmd_fd = open_commands_file(mount_dir);
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("INCFS_IOC_PERMIT_FILL failed");
+		return -EPERM;
+		goto out;
+	}
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error && errno != ERANGE)
+		goto out;
+
+	if (error && errno == ERANGE && block_cnt < 509)
+		goto out;
+
+	if (!error && block_cnt >= 509) {
+		error = -ERANGE;
+		goto out;
+	}
+
+	if (fba.total_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.data_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	range_cnt = (block_cnt + 3) / 4;
+	if (range_cnt > 128)
+		range_cnt = 128;
+	if (range_cnt != fba.range_buffer_size_out / sizeof(*ranges)) {
+		error = -ERANGE;
+		goto out;
+	}
+
+	error = TEST_SUCCESS;
+	for (i = 0; i < fba.range_buffer_size_out / sizeof(*ranges) - 1; ++i)
+		if (ranges[i].begin != i * 4 || ranges[i].end != i * 4 + 2) {
+			error = -EINVAL;
+			goto out;
+		}
+
+	if (ranges[i].begin != i * 4 ||
+	    (ranges[i].end != i * 4 + 1 && ranges[i].end != i * 4 + 2)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < 64; ++i) {
+		fba.start_index = i * 2;
+		fba.end_index = i * 2 + 2;
+		error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+		if (error)
+			goto out;
+
+		if (fba.total_blocks_out != block_cnt) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		if (fba.start_index >= block_cnt) {
+			if (fba.index_out != fba.start_index) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			break;
+		}
+
+		if (i % 2) {
+			if (fba.range_buffer_size_out != 0) {
+				error = -EINVAL;
+				goto out;
+			}
+		} else {
+			if (fba.range_buffer_size_out != sizeof(*ranges)) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			if (ranges[0].begin != i * 2) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			if (ranges[0].end != i * 2 + 1 &&
+			    ranges[0].end != i * 2 + 2) {
+				error = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+out:
+	close(fd);
+	close(cmd_fd);
+	return error;
+}
+
+static int get_blocks_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+			      NULL))
+			goto failure;
+
+		if (emit_partial_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_ranges(mount_dir, file))
+			goto failure;
+
+		/*
+		 * The smallest files are filled completely, so this checks that
+		 * the fast get_filled_blocks path is not causing issues
+		 */
+		if (validate_ranges(mount_dir, file))
+			goto failure;
+	}
+
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_FAILURE;
+}
+
+static int emit_partial_test_file_hash(const char *mount_dir,
+				       struct test_file *file)
+{
+	int err;
+	int fd;
+	struct incfs_fill_blocks fill_blocks = {
+		.count = 1,
+	};
+	struct incfs_fill_block *fill_block_array =
+		calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+
+	if (file->size <= 4096 / 32 * 4096)
+		return 0;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+	fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+	rnd_buf(data, sizeof(data), 0);
+
+	fill_block_array[0] =
+		(struct incfs_fill_block){ .block_index = 1,
+					   .data_len =
+						   INCFS_DATA_FILE_BLOCK_SIZE,
+					   .data = ptr_to_u64(data),
+					   .flags = INCFS_BLOCK_FLAGS_HASH };
+
+	fd = open_file_by_id(mount_dir, file->id, true);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else
+		err = 0;
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+static int validate_hash_ranges(const char *mount_dir, struct test_file *file)
+{
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	char *filename = concat_file_name(mount_dir, file->name);
+	int fd;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int error = TEST_SUCCESS;
+	int file_blocks = (file->size + INCFS_DATA_FILE_BLOCK_SIZE - 1) /
+			  INCFS_DATA_FILE_BLOCK_SIZE;
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	if (file->size <= 4096 / 32 * 4096)
+		return 0;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	free(filename);
+	if (fd <= 0)
+		return TEST_FAILURE;
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error != -1 || errno != EPERM) {
+		ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+		error = -EPERM;
+		goto out;
+	}
+
+	cmd_fd = open_commands_file(mount_dir);
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("INCFS_IOC_PERMIT_FILL failed");
+		return -EPERM;
+		goto out;
+	}
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error)
+		goto out;
+
+	if (fba.total_blocks_out <= block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.data_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.range_buffer_size_out != sizeof(struct incfs_filled_range)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (ranges[0].begin != file_blocks + 1 ||
+	    ranges[0].end != file_blocks + 2) {
+		error = -EINVAL;
+		goto out;
+	}
+
+out:
+	close(cmd_fd);
+	close(fd);
+	return error;
+}
+
+static int get_hash_blocks_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, file->root_hash,
+				     file->sig.add_data))
+			goto failure;
+
+		if (emit_partial_test_file_hash(mount_dir, file))
+			goto failure;
+	}
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_hash_ranges(mount_dir, file))
+			goto failure;
+	}
+
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_FAILURE;
+}
+
+static int large_file_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	int result = TEST_FAILURE;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	int block_count = 3LL * 1024 * 1024 * 1024 / INCFS_DATA_FILE_BLOCK_SIZE;
+	struct incfs_fill_block *block_buf =
+		calloc(block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = block_count,
+		.fill_blocks = ptr_to_u64(block_buf),
+	};
+	incfs_uuid_t id;
+	int fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "very_large_file", &id,
+		      (uint64_t)block_count * INCFS_DATA_FILE_BLOCK_SIZE,
+		      NULL) < 0)
+		goto failure;
+
+	for (i = 0; i < block_count; i++) {
+		block_buf[i].compression = COMPRESSION_NONE;
+		block_buf[i].block_index = i;
+		block_buf[i].data_len = INCFS_DATA_FILE_BLOCK_SIZE;
+		block_buf[i].data = ptr_to_u64(data);
+	}
+
+	fd = open_file_by_id(mount_dir, id, true);
+	if (fd < 0)
+		goto failure;
+
+	if (ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks) != block_count)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "very_very_large_file", &id, 1LL << 40,
+		      NULL) < 0)
+		goto failure;
+
+	result = TEST_SUCCESS;
+
+failure:
+	close(fd);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int validate_mapped_file(const char *orig_name, const char *name,
+				size_t size, size_t offset)
+{
+	struct stat st;
+	int orig_fd = -1, fd = -1;
+	size_t block;
+	int result = TEST_FAILURE;
+
+	if (stat(name, &st)) {
+		ksft_print_msg("Failed to stat %s with error %s\n",
+			       name, strerror(errno));
+		goto failure;
+	}
+
+	if (size != st.st_size) {
+		ksft_print_msg("Mismatched file sizes for file %s - expected %llu, got %llu\n",
+				   name, size, st.st_size);
+		goto failure;
+	}
+
+	fd = open(name, O_RDONLY | O_CLOEXEC);
+	if (fd == -1) {
+		ksft_print_msg("Failed to open %s with error %s\n", name,
+			       strerror(errno));
+		goto failure;
+	}
+
+	orig_fd = open(orig_name, O_RDONLY | O_CLOEXEC);
+	if (orig_fd == -1) {
+		ksft_print_msg("Failed to open %s with error %s\n", orig_name,
+			       strerror(errno));
+		goto failure;
+	}
+
+	for (block = 0; block < size; block += INCFS_DATA_FILE_BLOCK_SIZE) {
+		uint8_t orig_data[INCFS_DATA_FILE_BLOCK_SIZE];
+		uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+		ssize_t orig_read, mapped_read;
+
+		orig_read = pread(orig_fd, orig_data,
+				 INCFS_DATA_FILE_BLOCK_SIZE, block + offset);
+		mapped_read = pread(fd, data, INCFS_DATA_FILE_BLOCK_SIZE,
+				    block);
+
+		if (orig_read < mapped_read ||
+		    mapped_read != min(size - block,
+				       INCFS_DATA_FILE_BLOCK_SIZE)) {
+			ksft_print_msg("Failed to read enough data: %llu %llu %llu %lld %lld\n",
+				       block, size, offset, orig_read,
+				       mapped_read);
+			goto failure;
+		}
+
+		if (memcmp(orig_data, data, mapped_read)) {
+			ksft_print_msg("Data doesn't match: %llu %llu %llu %lld %lld\n",
+				       block, size, offset, orig_read,
+				       mapped_read);
+			goto failure;
+		}
+	}
+
+	result = TEST_SUCCESS;
+
+failure:
+	close(orig_fd);
+	close(fd);
+	return result;
+}
+
+static int mapped_file_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int result = TEST_FAILURE;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	for (i = 0; i < file_num; ++i) {
+		struct test_file *file = &test.files[i];
+		size_t blocks = file->size / INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t mapped_offset = blocks / 4 *
+			INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t mapped_size = file->size / 4 * 3 - mapped_offset;
+		struct incfs_create_mapped_file_args mfa;
+		char mapped_file_name[FILENAME_MAX];
+		char orig_file_path[PATH_MAX];
+		char mapped_file_path[PATH_MAX];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+					NULL) < 0)
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+
+		if (snprintf(mapped_file_name, ARRAY_SIZE(mapped_file_name),
+					"%s.mapped", file->name) < 0)
+			goto failure;
+
+		mfa = (struct incfs_create_mapped_file_args) {
+			.size = mapped_size,
+			.mode = 0664,
+			.file_name = ptr_to_u64(mapped_file_name),
+			.source_file_id = file->id,
+			.source_offset = mapped_offset,
+		};
+
+		result = ioctl(cmd_fd, INCFS_IOC_CREATE_MAPPED_FILE, &mfa);
+		if (result) {
+			ksft_print_msg(
+				"Failed to create mapped file with error %d\n",
+				result);
+			goto failure;
+		}
+
+		result = snprintf(orig_file_path,
+				  ARRAY_SIZE(orig_file_path), "%s/%s",
+				  mount_dir, file->name);
+
+		if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+			result = TEST_FAILURE;
+			goto failure;
+		}
+
+		result = snprintf(mapped_file_path,
+				  ARRAY_SIZE(mapped_file_path), "%s/%s",
+				  mount_dir, mapped_file_name);
+
+		if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+			result = TEST_FAILURE;
+			goto failure;
+		}
+
+		result = validate_mapped_file(orig_file_path, mapped_file_path,
+					      mapped_size, mapped_offset);
+		if (result)
+			goto failure;
+	}
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static const char v1_file[] = {
+	/* Header */
+	/* 0x00: Magic number */
+	0x49, 0x4e, 0x43, 0x46, 0x53, 0x00, 0x00, 0x00,
+	/* 0x08: Version */
+	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x10: Header size */
+	0x38, 0x00,
+	/* 0x12: Block size */
+	0x00, 0x10,
+	/* 0x14: Flags */
+	0x00, 0x00, 0x00, 0x00,
+	/* 0x18: First md offset */
+	0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x20: File size */
+	0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x28: UUID */
+	0x8c, 0x7d, 0xd9, 0x22, 0xad, 0x47, 0x49, 0x4f,
+	0xc0, 0x2c, 0x38, 0x8e, 0x12, 0xc0, 0x0e, 0xac,
+
+	/* 0x38: Attribute */
+	0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+	0x31, 0x32, 0x33, 0x31, 0x32, 0x33,
+
+	/* Attribute md record */
+	/* 0x46: Type */
+	0x02,
+	/* 0x47: Size */
+	0x25, 0x00,
+	/* 0x49: CRC */
+	0x9a, 0xef, 0xef, 0x72,
+	/* 0x4d: Next md offset */
+	0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x55: Prev md offset */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x5d: fa_offset */
+	0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x65: fa_size */
+	0x0e, 0x00,
+	/* 0x67: fa_crc */
+	0xfb, 0x5e, 0x72, 0x89,
+
+	/* Blockmap table */
+	/* 0x6b: First 10-byte entry */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* Blockmap md record */
+	/* 0x75: Type */
+	0x01,
+	/* 0x76: Size */
+	0x23, 0x00,
+	/* 0x78: CRC */
+	0x74, 0x45, 0xd3, 0xb9,
+	/* 0x7c: Next md offset */
+	0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
+	/* 0x84: Prev md offset */
+	0x46, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
+	/* 0x8c: blockmap offset */
+	0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x94: blockmap count */
+	0x01, 0x00, 0x00, 0x00,
+};
+
+static int compatibility_test(const char *mount_dir)
+{
+	static const char *name = "file";
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	char *filename = NULL;
+	int fd = -1;
+	uint64_t size = 0x0c;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TEST(filename = concat_file_name(backing_dir, name), filename);
+	TEST(fd = open(filename, O_CREAT | O_WRONLY | O_CLOEXEC, 0777),
+	     fd != -1);
+	TESTEQUAL(write(fd, v1_file, sizeof(v1_file)), sizeof(v1_file));
+	TESTEQUAL(fsetxattr(fd, INCFS_XATTR_SIZE_NAME, &size, sizeof(size), 0),
+		  0);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 50), 0);
+	free(filename);
+	TEST(filename = concat_file_name(mount_dir, name), filename);
+	close(fd);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	umount(mount_dir);
+	free(backing_dir);
+	free(filename);
+	return result;
+}
+
+static int zero_blocks_written_count(int fd, uint32_t data_blocks_written,
+				     uint32_t hash_blocks_written)
+{
+	int test_result = TEST_FAILURE;
+	uint64_t offset;
+	uint8_t type;
+	uint32_t bw;
+
+	/* Get first md record */
+	TESTEQUAL(pread(fd, &offset, sizeof(offset), 24), sizeof(offset));
+
+	/* Find status md record */
+	for (;;) {
+		TESTNE(offset, 0);
+		TESTEQUAL(pread(fd, &type, sizeof(type), le64_to_cpu(offset)),
+			  sizeof(type));
+		if (type == 4)
+			break;
+		TESTEQUAL(pread(fd, &offset, sizeof(offset),
+				le64_to_cpu(offset) + 7),
+			  sizeof(offset));
+	}
+
+	/* Read blocks_written */
+	offset = le64_to_cpu(offset);
+	TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+	TESTEQUAL(le32_to_cpu(bw), data_blocks_written);
+	TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+	TESTEQUAL(le32_to_cpu(bw), hash_blocks_written);
+
+	/* Write out zero */
+	bw = 0;
+	TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+	TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+
+	test_result = TEST_SUCCESS;
+out:
+	return test_result;
+}
+
+static int validate_block_count(const char *mount_dir, const char *backing_dir,
+				struct test_file *file,
+				int total_data_blocks, int filled_data_blocks,
+				int total_hash_blocks, int filled_hash_blocks)
+{
+	char *filename = NULL;
+	char *backing_filename = NULL;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+	int test_result = TEST_FAILURE;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(backing_filename = concat_file_name(backing_dir, file->name),
+	     backing_filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+	close(fd);
+	TESTEQUAL(umount(mount_dir), 0);
+	TEST(fd = open(backing_filename, O_RDWR | O_CLOEXEC), fd != -1);
+	TESTEQUAL(zero_blocks_written_count(fd, filled_data_blocks,
+					    filled_hash_blocks),
+		  TEST_SUCCESS);
+	close(fd);
+	fd = -1;
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	permit_fill.file_descriptor = fd;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill), 0);
+	do {
+		ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+		fba.start_index = fba.index_out + 1;
+	} while (fba.index_out < fba.total_blocks_out);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+	test_result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	close(fd);
+	free(filename);
+	free(backing_filename);
+	return test_result;
+}
+
+
+
+static int validate_data_block_count(const char *mount_dir,
+				     const char *backing_dir,
+				     struct test_file *file)
+{
+	const int total_data_blocks = 1 + (file->size - 1) /
+						INCFS_DATA_FILE_BLOCK_SIZE;
+	const int filled_data_blocks = (total_data_blocks + 1) / 2;
+
+	int test_result = TEST_FAILURE;
+	char *filename = NULL;
+	char *incomplete_filename = NULL;
+	struct stat stat_buf_incomplete, stat_buf_file;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+	int i;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(incomplete_filename = get_incomplete_filename(mount_dir, file->id),
+	     incomplete_filename);
+
+	TESTEQUAL(stat(filename, &stat_buf_file), 0);
+	TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), 0);
+	TESTEQUAL(stat_buf_file.st_ino, stat_buf_incomplete.st_ino);
+	TESTEQUAL(stat_buf_file.st_nlink, 3);
+
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, 0);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	for (i = 0; i < total_data_blocks; i += 2)
+		TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, 0);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+	close(fd);
+	fd = -1;
+
+	TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+				       total_data_blocks, filled_data_blocks,
+				       0, 0),
+		  0);
+
+	for (i = 1; i < total_data_blocks; i += 2)
+		TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+	TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), -1);
+	TESTEQUAL(errno, ENOENT);
+
+	test_result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(incomplete_filename);
+	free(filename);
+	return test_result;
+}
+
+static int data_block_count_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+
+	for (i = 0; i < test.files_count; ++i) {
+		struct test_file *file = &test.files[i];
+
+		TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+		TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				    file->size,	NULL),
+			  0);
+		close(cmd_fd);
+		cmd_fd = -1;
+
+		TESTEQUAL(validate_data_block_count(mount_dir, backing_dir,
+						    file),
+			  0);
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int validate_hash_block_count(const char *mount_dir,
+				     const char *backing_dir,
+				     struct test_file *file)
+{
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	const int total_data_blocks = 1 + (file->size - 1) /
+					INCFS_DATA_FILE_BLOCK_SIZE;
+
+	int result = TEST_FAILURE;
+	int hash_layer = total_data_blocks;
+	int total_hash_blocks = 0;
+	int filled_hash_blocks;
+	char *filename = NULL;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+
+	while (hash_layer > 1) {
+		hash_layer = (hash_layer + hash_per_block - 1) / hash_per_block;
+		total_hash_blocks += hash_layer;
+	}
+	filled_hash_blocks = total_hash_blocks > 1 ? 1 : 0;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	TESTEQUAL(emit_partial_test_file_hash(mount_dir, file), 0);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+	close(fd);
+	fd = -1;
+
+	if (filled_hash_blocks)
+		TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+				       total_data_blocks, 0,
+				       total_hash_blocks, filled_hash_blocks),
+		  0);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int hash_block_count_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+
+	for (i = 0; i < test.files_count; i++) {
+		struct test_file *file = &test.files[i];
+
+		TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+		TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, file->root_hash,
+				     file->sig.add_data),
+			  0);
+		close(cmd_fd);
+		cmd_fd = -1;
+
+		TESTEQUAL(validate_hash_block_count(mount_dir, backing_dir,
+						    &test.files[i]),
+			  0);
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int is_close(struct timespec *start, int expected_ms)
+{
+	const int allowed_variance = 100;
+	int result = TEST_FAILURE;
+	struct timespec finish;
+	int diff;
+
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &finish), 0);
+	diff = (finish.tv_sec - start->tv_sec) * 1000 +
+		(finish.tv_nsec - start->tv_nsec) / 1000000;
+
+	TESTCOND(diff >= expected_ms - allowed_variance);
+	TESTCOND(diff <= expected_ms + allowed_variance);
+	result = TEST_SUCCESS;
+out:
+	return result;
+}
+
+static int per_uid_read_timeouts_test(const char *mount_dir)
+{
+	struct test_file file = {
+		.name = "file",
+		.size = 16 * INCFS_DATA_FILE_BLOCK_SIZE
+	};
+
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int pid = -1;
+	int cmd_fd = -1;
+	char *filename = NULL;
+	int fd = -1;
+	struct timespec start;
+	char buffer[4096];
+	struct incfs_per_uid_read_timeouts purt_get[1];
+	struct incfs_get_read_timeouts_args grt = {
+		ptr_to_u64(purt_get),
+		sizeof(purt_get)
+	};
+	struct incfs_per_uid_read_timeouts purt_set[] = {
+		{
+			.uid = 0,
+			.min_time_us = 1000000,
+			.min_pending_time_us = 2000000,
+			.max_pending_time_us = 3000000,
+		},
+	};
+	struct incfs_set_read_timeouts_args srt = {
+		ptr_to_u64(purt_set),
+		sizeof(purt_set)
+	};
+	int status;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "read_timeout_ms=1000,readahead=0", false), 0);
+
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+			    NULL), 0);
+
+	TEST(filename = concat_file_name(mount_dir, file.name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC), 0);
+
+	/* Default mount options read failure is 1000 */
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	grt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+	/* Set it to 3000 */
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 3000), 0);
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), -1);
+	TESTEQUAL(errno, E2BIG);
+	TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+	grt.timeouts_array_size = sizeof(purt_get);
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+	TESTEQUAL(purt_get[0].uid, purt_set[0].uid);
+	TESTEQUAL(purt_get[0].min_time_us, purt_set[0].min_time_us);
+	TESTEQUAL(purt_get[0].min_pending_time_us,
+		  purt_set[0].min_pending_time_us);
+	TESTEQUAL(purt_get[0].max_pending_time_us,
+		  purt_set[0].max_pending_time_us);
+
+	/* Still 1000 in UID 2 */
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(setuid(2), 0);
+		TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+		exit(0);
+	}
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(WEXITSTATUS(status), 0);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Set it to default */
+	purt_set[0].max_pending_time_us = UINT32_MAX;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Test min read time */
+	TESTEQUAL(emit_test_block(mount_dir, &file, 0), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), sizeof(buffer));
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Test min pending time */
+	purt_set[0].uid = 2;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(setuid(2), 0);
+		TESTEQUAL(pread(fd, buffer, sizeof(buffer), sizeof(buffer)),
+			  sizeof(buffer));
+		exit(0);
+	}
+	sleep(1);
+	TESTEQUAL(emit_test_block(mount_dir, &file, 1), 0);
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(WEXITSTATUS(status), 0);
+	TESTEQUAL(is_close(&start, 2000), 0);
+
+	/* Clear timeouts */
+	srt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	grt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+
+	if (pid == 0)
+		exit(result);
+
+	free(filename);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+#define DIRS 3
+static int inotify_test(const char *mount_dir)
+{
+	const char *mapped_file_name = "mapped_name";
+	struct test_file file = {
+		.name = "file",
+		.size = 16 * INCFS_DATA_FILE_BLOCK_SIZE
+	};
+
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL, *index_dir = NULL, *incomplete_dir = NULL;
+	char *file_name = NULL;
+	int cmd_fd = -1;
+	int notify_fd = -1;
+	int wds[DIRS];
+	char buffer[DIRS * (sizeof(struct inotify_event) + NAME_MAX + 1)];
+	char *ptr = buffer;
+	struct inotify_event *event;
+	struct inotify_event *events[DIRS] = {};
+	const char *names[DIRS] = {};
+	int res;
+	char id[sizeof(incfs_uuid_t) * 2 + 1];
+	struct incfs_create_mapped_file_args mfa;
+
+	/* File creation triggers inotify events in .index and .incomplete? */
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TEST(index_dir = concat_file_name(mount_dir, ".index"), index_dir);
+	TEST(incomplete_dir = concat_file_name(mount_dir, ".incomplete"),
+	     incomplete_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 50), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TEST(notify_fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC),
+	     notify_fd != -1);
+	TEST(wds[0] = inotify_add_watch(notify_fd, mount_dir,
+					IN_CREATE | IN_DELETE),
+	     wds[0] != -1);
+	TEST(wds[1] = inotify_add_watch(notify_fd, index_dir,
+					IN_CREATE | IN_DELETE),
+	     wds[1] != -1);
+	TEST(wds[2] = inotify_add_watch(notify_fd, incomplete_dir,
+					IN_CREATE | IN_DELETE),
+	     wds[2] != -1);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+			   NULL), 0);
+	TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+
+	while (ptr < buffer + res) {
+		int i;
+
+		event = (struct inotify_event *) ptr;
+		TESTCOND(ptr + sizeof(*event) <= buffer + res);
+		for (i = 0; i < DIRS; ++i)
+			if (event->wd == wds[i]) {
+				TESTEQUAL(events[i], NULL);
+				events[i] = event;
+				ptr += sizeof(*event);
+				names[i] = ptr;
+				ptr += events[i]->len;
+				TESTCOND(ptr <= buffer + res);
+				break;
+			}
+		TESTCOND(i < DIRS);
+	}
+
+	TESTNE(events[0], NULL);
+	TESTNE(events[1], NULL);
+	TESTNE(events[2], NULL);
+
+	bin2hex(id, file.id.bytes, sizeof(incfs_uuid_t));
+
+	TESTEQUAL(events[0]->mask, IN_CREATE);
+	TESTEQUAL(events[1]->mask, IN_CREATE);
+	TESTEQUAL(events[2]->mask, IN_CREATE);
+	TESTEQUAL(strcmp(names[0], file.name), 0);
+	TESTEQUAL(strcmp(names[1], id), 0);
+	TESTEQUAL(strcmp(names[2], id), 0);
+
+	/* Creating a mapped file triggers inotify event */
+	mfa = (struct incfs_create_mapped_file_args) {
+		.size = INCFS_DATA_FILE_BLOCK_SIZE,
+		.mode = 0664,
+		.file_name = ptr_to_u64(mapped_file_name),
+		.source_file_id = file.id,
+		.source_offset = INCFS_DATA_FILE_BLOCK_SIZE,
+	};
+
+	TEST(res = ioctl(cmd_fd, INCFS_IOC_CREATE_MAPPED_FILE, &mfa),
+	     res != -1);
+	TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+	event = (struct inotify_event *) buffer;
+	TESTEQUAL(event->wd, wds[0]);
+	TESTEQUAL(event->mask, IN_CREATE);
+	TESTEQUAL(strcmp(event->name, mapped_file_name), 0);
+
+	/* File completion triggers inotify event in .incomplete? */
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+	event = (struct inotify_event *) buffer;
+	TESTEQUAL(event->wd, wds[2]);
+	TESTEQUAL(event->mask, IN_DELETE);
+	TESTEQUAL(strcmp(event->name, id), 0);
+
+	/* File unlinking triggers inotify event in .index? */
+	TEST(file_name = concat_file_name(mount_dir, file.name), file_name);
+	TESTEQUAL(unlink(file_name), 0);
+	TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+	memset(events, 0, sizeof(events));
+	memset(names, 0, sizeof(names));
+	for (ptr = buffer; ptr < buffer + res;) {
+		event = (struct inotify_event *) ptr;
+		int i;
+
+		TESTCOND(ptr + sizeof(*event) <= buffer + res);
+		for (i = 0; i < DIRS; ++i)
+			if (event->wd == wds[i]) {
+				TESTEQUAL(events[i], NULL);
+				events[i] = event;
+				ptr += sizeof(*event);
+				names[i] = ptr;
+				ptr += events[i]->len;
+				TESTCOND(ptr <= buffer + res);
+				break;
+			}
+		TESTCOND(i < DIRS);
+	}
+
+	TESTNE(events[0], NULL);
+	TESTNE(events[1], NULL);
+	TESTEQUAL(events[2], NULL);
+
+	TESTEQUAL(events[0]->mask, IN_DELETE);
+	TESTEQUAL(events[1]->mask, IN_DELETE);
+	TESTEQUAL(strcmp(names[0], file.name), 0);
+	TESTEQUAL(strcmp(names[1], id), 0);
+
+	result = TEST_SUCCESS;
+out:
+	free(file_name);
+	close(notify_fd);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	free(index_dir);
+	free(incomplete_dir);
+	return result;
+}
+
+static EVP_PKEY *create_key(void)
+{
+	EVP_PKEY *pkey = NULL;
+	RSA *rsa = NULL;
+	BIGNUM *bn = NULL;
+
+	pkey = EVP_PKEY_new();
+	if (!pkey)
+		goto fail;
+
+	bn = BN_new();
+	BN_set_word(bn, RSA_F4);
+
+	rsa = RSA_new();
+	if (!rsa)
+		goto fail;
+
+	RSA_generate_key_ex(rsa, 4096, bn, NULL);
+	EVP_PKEY_assign_RSA(pkey, rsa);
+
+	BN_free(bn);
+	return pkey;
+
+fail:
+	BN_free(bn);
+	EVP_PKEY_free(pkey);
+	return NULL;
+}
+
+static X509 *get_cert(EVP_PKEY *key)
+{
+	X509 *x509 = NULL;
+	X509_NAME *name = NULL;
+
+	x509 = X509_new();
+	if (!x509)
+		return NULL;
+
+	ASN1_INTEGER_set(X509_get_serialNumber(x509), 1);
+	X509_gmtime_adj(X509_get_notBefore(x509), 0);
+	X509_gmtime_adj(X509_get_notAfter(x509), 31536000L);
+	X509_set_pubkey(x509, key);
+
+	name = X509_get_subject_name(x509);
+	X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
+		   (const unsigned char *)"US", -1, -1, 0);
+	X509_NAME_add_entry_by_txt(name, "ST", MBSTRING_ASC,
+		   (const unsigned char *)"CA", -1, -1, 0);
+	X509_NAME_add_entry_by_txt(name, "L", MBSTRING_ASC,
+		   (const unsigned char *)"San Jose", -1, -1, 0);
+	X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC,
+		   (const unsigned char *)"Example", -1, -1, 0);
+	X509_NAME_add_entry_by_txt(name, "OU", MBSTRING_ASC,
+		   (const unsigned char *)"Org", -1, -1, 0);
+	X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
+		   (const unsigned char *)"www.example.com", -1, -1, 0);
+	X509_set_issuer_name(x509, name);
+
+	if (!X509_sign(x509, key, EVP_sha256()))
+		return NULL;
+
+	return x509;
+}
+
+static int sign(EVP_PKEY *key, X509 *cert, const char *data, size_t len,
+		unsigned char **sig, size_t *sig_len)
+{
+	const int pkcs7_flags = PKCS7_BINARY | PKCS7_NOATTR | PKCS7_PARTIAL |
+			  PKCS7_DETACHED;
+	const EVP_MD *md = EVP_sha256();
+
+	int result = TEST_FAILURE;
+
+	BIO *bio = NULL;
+	PKCS7 *p7 = NULL;
+	unsigned char *bio_buffer;
+
+	TEST(bio = BIO_new_mem_buf(data, len), bio);
+	TEST(p7 = PKCS7_sign(NULL, NULL, NULL, bio, pkcs7_flags), p7);
+	TESTNE(PKCS7_sign_add_signer(p7, cert, key, md, pkcs7_flags), 0);
+	TESTEQUAL(PKCS7_final(p7, bio, pkcs7_flags), 1);
+	TEST(*sig_len = i2d_PKCS7(p7, NULL), *sig_len);
+	TEST(bio_buffer = malloc(*sig_len), bio_buffer);
+	*sig = bio_buffer;
+	TEST(*sig_len = i2d_PKCS7(p7, &bio_buffer), *sig_len);
+	TESTEQUAL(PKCS7_verify(p7, NULL, NULL, bio, NULL,
+			 pkcs7_flags | PKCS7_NOVERIFY | PKCS7_NOSIGS), 1);
+
+	result = TEST_SUCCESS;
+out:
+	PKCS7_free(p7);
+	BIO_free(bio);
+	return result;
+}
+
+static int verity_installed(const char *mount_dir, int cmd_fd, bool *installed)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	int fd = -1;
+	struct test_file *file = &get_test_files_set().files[0];
+
+	TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+			    NULL), 0);
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, NULL), -1);
+	*installed = errno != EOPNOTSUPP;
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	if (filename)
+		remove(filename);
+	free(filename);
+	return result;
+}
+
+static int enable_verity(const char *mount_dir, struct test_file *file,
+			   EVP_PKEY *key, X509 *cert, bool use_signatures)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	int fd = -1;
+	struct fsverity_enable_arg fear = {
+		.version = 1,
+		.hash_algorithm = FS_VERITY_HASH_ALG_SHA256,
+		.block_size = INCFS_DATA_FILE_BLOCK_SIZE,
+		.sig_size = 0,
+		.sig_ptr = 0,
+	};
+	struct {
+		__u8 version;           /* must be 1 */
+		__u8 hash_algorithm;    /* Merkle tree hash algorithm */
+		__u8 log_blocksize;     /* log2 of size of data and tree blocks */
+		__u8 salt_size;         /* size of salt in bytes; 0 if none */
+		__le32 sig_size;        /* must be 0 */
+		__le64 data_size;       /* size of file the Merkle tree is built over */
+		__u8 root_hash[64];     /* Merkle tree root hash */
+		__u8 salt[32];          /* salt prepended to each hashed block */
+		__u8 __reserved[144];   /* must be 0's */
+	} __packed fsverity_descriptor = {
+		.version = 1,
+		.hash_algorithm = 1,
+		.log_blocksize = 12,
+		.data_size = file->size,
+	};
+	struct {
+		char magic[8];                  /* must be "FSVerity" */
+		__le16 digest_algorithm;
+		__le16 digest_size;
+		__u8 digest[32];
+	} __packed fsverity_signed_digest =  {
+		.digest_algorithm = 1,
+		.digest_size = 32
+	};
+	unsigned char *sig = NULL;
+	size_t sig_size = 0;
+	uint64_t flags;
+	struct statx statxbuf = {};
+
+	memcpy(fsverity_signed_digest.magic, "FSVerity", 8);
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TESTEQUAL(syscall(__NR_statx, AT_FDCWD, filename, 0, STATX_ALL,
+			  &statxbuf), 0);
+	TESTEQUAL(statxbuf.stx_attributes_mask & STATX_ATTR_VERITY,
+		  STATX_ATTR_VERITY);
+	TESTEQUAL(statxbuf.stx_attributes & STATX_ATTR_VERITY, 0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, FS_IOC_GETFLAGS, &flags), 0);
+	TESTEQUAL(flags & FS_VERITY_FL, 0);
+
+	/* First try to enable verity with random digest */
+	if (key) {
+		TESTEQUAL(sign(key, cert, (void *)&fsverity_signed_digest,
+			    sizeof(fsverity_signed_digest), &sig, &sig_size),
+			  0);
+
+		fear.sig_size = sig_size;
+		fear.sig_ptr = ptr_to_u64(sig);
+		TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, &fear), -1);
+	}
+
+	/* Now try with correct digest */
+	memcpy(fsverity_descriptor.root_hash, file->root_hash, 32);
+	sha256((char *)&fsverity_descriptor, sizeof(fsverity_descriptor),
+	       (char *)fsverity_signed_digest.digest);
+
+	if (ioctl(fd, FS_IOC_ENABLE_VERITY, NULL) == -1 &&
+	    errno == EOPNOTSUPP) {
+		result = TEST_SUCCESS;
+		goto out;
+	}
+
+	free(sig);
+	sig = NULL;
+
+	if (key)
+		TESTEQUAL(sign(key, cert, (void *)&fsverity_signed_digest,
+			       sizeof(fsverity_signed_digest),
+			       &sig, &sig_size),
+		  0);
+
+	if (use_signatures) {
+		fear.sig_size = sig_size;
+		file->verity_sig_size = sig_size;
+		fear.sig_ptr = ptr_to_u64(sig);
+		file->verity_sig = sig;
+		sig = NULL;
+	} else {
+		fear.sig_size = 0;
+		fear.sig_ptr = 0;
+	}
+	TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, &fear), 0);
+
+	result = TEST_SUCCESS;
+out:
+	free(sig);
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int memzero(const unsigned char *buf, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++i)
+		if (buf[i])
+			return -1;
+	return 0;
+}
+
+static int validate_verity(const char *mount_dir, struct test_file *file)
+{
+	int result = TEST_FAILURE;
+	char *filename = concat_file_name(mount_dir, file->name);
+	int fd = -1;
+	uint64_t flags;
+	struct fsverity_digest *digest;
+	struct statx statxbuf = {};
+	struct fsverity_read_metadata_arg frma = {};
+	uint8_t *buf = NULL;
+	struct fsverity_descriptor desc;
+
+	TEST(digest = malloc(sizeof(struct fsverity_digest) +
+			     INCFS_MAX_HASH_SIZE), digest != NULL);
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TESTEQUAL(syscall(__NR_statx, AT_FDCWD, filename, 0, STATX_ALL,
+			  &statxbuf), 0);
+	TESTEQUAL(statxbuf.stx_attributes & STATX_ATTR_VERITY,
+		  STATX_ATTR_VERITY);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, FS_IOC_GETFLAGS, &flags), 0);
+	TESTEQUAL(flags & FS_VERITY_FL, FS_VERITY_FL);
+	digest->digest_size = INCFS_MAX_HASH_SIZE;
+	TESTEQUAL(ioctl(fd, FS_IOC_MEASURE_VERITY, digest), 0);
+	TESTEQUAL(digest->digest_algorithm, FS_VERITY_HASH_ALG_SHA256);
+	TESTEQUAL(digest->digest_size, 32);
+
+	if (file->verity_sig) {
+		TEST(buf = malloc(file->verity_sig_size), buf);
+		frma = (struct fsverity_read_metadata_arg) {
+			.metadata_type = FS_VERITY_METADATA_TYPE_SIGNATURE,
+			.length = file->verity_sig_size,
+			.buf_ptr = ptr_to_u64(buf),
+		};
+		TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma),
+			  file->verity_sig_size);
+		TESTEQUAL(memcmp(buf, file->verity_sig, file->verity_sig_size),
+			  0);
+	} else {
+		frma = (struct fsverity_read_metadata_arg) {
+			.metadata_type = FS_VERITY_METADATA_TYPE_SIGNATURE,
+		};
+		TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma), -1);
+		TESTEQUAL(errno, ENODATA);
+	}
+
+	frma = (struct fsverity_read_metadata_arg) {
+		.metadata_type = FS_VERITY_METADATA_TYPE_DESCRIPTOR,
+		.length = sizeof(desc),
+		.buf_ptr = ptr_to_u64(&desc),
+	};
+	TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma),
+		  sizeof(desc));
+	TESTEQUAL(desc.version, 1);
+	TESTEQUAL(desc.hash_algorithm, FS_VERITY_HASH_ALG_SHA256);
+	TESTEQUAL(desc.log_blocksize, ilog2(INCFS_DATA_FILE_BLOCK_SIZE));
+	TESTEQUAL(desc.salt_size, 0);
+	TESTEQUAL(desc.__reserved_0x04, 0);
+	TESTEQUAL(desc.data_size, file->size);
+	TESTEQUAL(memcmp(desc.root_hash, file->root_hash, SHA256_DIGEST_SIZE),
+		  0);
+	TESTEQUAL(memzero(desc.root_hash + SHA256_DIGEST_SIZE,
+			  sizeof(desc.root_hash) - SHA256_DIGEST_SIZE), 0);
+	TESTEQUAL(memzero(desc.salt, sizeof(desc.salt)), 0);
+	TESTEQUAL(memzero(desc.__reserved, sizeof(desc.__reserved)), 0);
+
+	result = TEST_SUCCESS;
+out:
+	free(buf);
+	close(fd);
+	free(filename);
+	free(digest);
+	return result;
+}
+
+static int verity_test_optional_sigs(const char *mount_dir, bool use_signatures)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	bool installed;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	EVP_PKEY *key = NULL;
+	X509 *cert = NULL;
+	BIO *mem = NULL;
+	long len;
+	void *ptr;
+	FILE *proc_key_fd = NULL;
+	char *line = NULL;
+	size_t read = 0;
+	int key_id = -1;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(verity_installed(mount_dir, cmd_fd, &installed), 0);
+	if (!installed) {
+		result = TEST_SUCCESS;
+		goto out;
+	}
+	TEST(key = create_key(), key);
+	TEST(cert = get_cert(key), cert);
+
+	TEST(proc_key_fd = fopen("/proc/keys", "r"), proc_key_fd != NULL);
+	while (getline(&line, &read, proc_key_fd) != -1)
+		if (strstr(line, ".fs-verity"))
+			key_id = strtol(line, NULL, 16);
+
+	TEST(mem = BIO_new(BIO_s_mem()), mem != NULL);
+	TESTEQUAL(i2d_X509_bio(mem, cert), 1);
+	TEST(len = BIO_get_mem_data(mem, &ptr), len != 0);
+	TESTCOND(key_id == -1
+		 || syscall(__NR_add_key, "asymmetric", "test:key", ptr, len,
+			    key_id) != -1);
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		build_mtree(file);
+		TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, file->root_hash,
+				     file->sig.add_data), 0);
+
+		TESTEQUAL(load_hash_tree(mount_dir, file), 0);
+		TESTEQUAL(enable_verity(mount_dir, file, key, cert,
+					use_signatures),
+			  0);
+	}
+
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_verity(mount_dir, &test.files[i]), 0);
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_verity(mount_dir, &test.files[i]), 0);
+
+	result = TEST_SUCCESS;
+out:
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		free(file->mtree);
+		free(file->verity_sig);
+
+		file->mtree = NULL;
+		file->verity_sig = NULL;
+	}
+
+	free(line);
+	BIO_free(mem);
+	X509_free(cert);
+	EVP_PKEY_free(key);
+	fclose(proc_key_fd);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int verity_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+
+	TESTEQUAL(verity_test_optional_sigs(mount_dir, true), TEST_SUCCESS);
+	TESTEQUAL(verity_test_optional_sigs(mount_dir, false), TEST_SUCCESS);
+	result = TEST_SUCCESS;
+out:
+	return result;
+}
+
+static int verity_file_valid(const char *mount_dir, struct test_file *file)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	int fd = -1;
+	uint8_t buffer[INCFS_DATA_FILE_BLOCK_SIZE];
+	struct incfs_get_file_sig_args gfsa = {
+		.file_signature = ptr_to_u64(buffer),
+		.file_signature_buf_size = sizeof(buffer),
+	};
+	int i;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &gfsa), 0);
+	for (i = 0; i < file->size; i += sizeof(buffer))
+		TESTEQUAL(pread(fd, buffer, sizeof(buffer), i),
+			  file->size - i > sizeof(buffer) ?
+				sizeof(buffer) : file->size - i);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int enable_verity_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	bool installed;
+	int cmd_fd = -1;
+	struct test_files_set test = get_test_files_set();
+	int i;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(verity_installed(mount_dir, cmd_fd, &installed), 0);
+	if (!installed) {
+		result = TEST_SUCCESS;
+		goto out;
+	}
+	for (i = 0; i < test.files_count; ++i) {
+		struct test_file *file = &test.files[i];
+
+		TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL), 0);
+		TESTEQUAL(emit_test_file_data(mount_dir, file), 0);
+		TESTEQUAL(enable_verity(mount_dir, file, NULL, NULL, false), 0);
+	}
+
+	/* Check files are valid on disk */
+	close(cmd_fd);
+	cmd_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	for (i = 0; i < test.files_count; ++i)
+		TESTEQUAL(verity_file_valid(mount_dir, &test.files[i]), 0);
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int mmap_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int cmd_fd = -1;
+	/*
+	 * File is big enough to have a two layer tree with two hashes in the
+	 * higher level, so we can corrupt the second one
+	 */
+	int shas_per_block = INCFS_DATA_FILE_BLOCK_SIZE / SHA256_DIGEST_SIZE;
+	struct test_file file = {
+		  .name = "file",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE * shas_per_block * 2,
+	};
+	char *filename = NULL;
+	int fd = -1;
+	char *addr = (void *)-1;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+
+	TESTEQUAL(build_mtree(&file), 0);
+	file.mtree[1].data[INCFS_DATA_FILE_BLOCK_SIZE] ^= 0xff;
+	TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file.name, &file.id,
+			       file.size, file.root_hash,
+			       file.sig.add_data), 0);
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTEQUAL(load_hash_tree(mount_dir, &file), 0);
+	TEST(filename = concat_file_name(mount_dir, file.name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TEST(addr = mmap(NULL, file.size, PROT_READ, MAP_PRIVATE, fd, 0),
+	     addr != (void *) -1);
+	TESTEQUAL(mlock(addr, INCFS_DATA_FILE_BLOCK_SIZE), 0);
+	TESTEQUAL(munlock(addr, INCFS_DATA_FILE_BLOCK_SIZE), 0);
+	TESTEQUAL(mlock(addr + shas_per_block * INCFS_DATA_FILE_BLOCK_SIZE,
+			INCFS_DATA_FILE_BLOCK_SIZE), -1);
+	TESTEQUAL(mlock(addr + (shas_per_block - 1) *
+			       INCFS_DATA_FILE_BLOCK_SIZE,
+			INCFS_DATA_FILE_BLOCK_SIZE), 0);
+	TESTEQUAL(munlock(addr + (shas_per_block - 1) *
+				 INCFS_DATA_FILE_BLOCK_SIZE,
+			  INCFS_DATA_FILE_BLOCK_SIZE), 0);
+	TESTEQUAL(mlock(addr + (shas_per_block - 1) *
+			       INCFS_DATA_FILE_BLOCK_SIZE,
+			INCFS_DATA_FILE_BLOCK_SIZE * 2), -1);
+	TESTEQUAL(munmap(addr, file.size), 0);
+
+	result = TEST_SUCCESS;
+out:
+	free(file.mtree);
+	close(fd);
+	free(filename);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int truncate_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int cmd_fd = -1;
+	struct test_file file = {
+		  .name = "file",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE,
+	};
+	char *backing_file = NULL;
+	int fd = -1;
+	struct stat st;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+		  0);
+	TEST(backing_file = concat_file_name(backing_dir, file.name),
+	     backing_file);
+	TEST(fd = open(backing_file, O_RDWR | O_CLOEXEC), fd != -1);
+	TESTEQUAL(stat(backing_file, &st), 0);
+	TESTCOND(st.st_blocks < 128);
+	TESTEQUAL(fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1 << 24), 0);
+	TESTEQUAL(stat(backing_file, &st), 0);
+	TESTCOND(st.st_blocks > 32768);
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTEQUAL(stat(backing_file, &st), 0);
+	TESTCOND(st.st_blocks < 128);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(backing_file);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int stat_file_test(const char *mount_dir, int cmd_fd,
+			  struct test_file *file)
+{
+	int result = TEST_FAILURE;
+	struct stat st;
+	char *filename = NULL;
+
+	TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL), 0);
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TESTEQUAL(stat(filename, &st), 0);
+	TESTCOND(st.st_blocks < 32);
+	TESTEQUAL(emit_test_file_data(mount_dir, file), 0);
+	TESTEQUAL(stat(filename, &st), 0);
+	TESTCOND(st.st_blocks > file->size / 512);
+
+	result = TEST_SUCCESS;
+out:
+	free(filename);
+	return result;
+}
+
+static int stat_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		TESTEQUAL(stat_file_test(mount_dir, cmd_fd, file), 0);
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+#define SYSFS_DIR "/sys/fs/incremental-fs/instances/test_node/"
+
+static int sysfs_test_value(const char *name, uint64_t value)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	FILE *file = NULL;
+	uint64_t res;
+
+	TEST(filename = concat_file_name(SYSFS_DIR, name), filename);
+	TEST(file = fopen(filename, "re"), file);
+	TESTEQUAL(fscanf(file, "%lu", &res), 1);
+	TESTEQUAL(res, value);
+
+	result = TEST_SUCCESS;
+out:
+	if (file)
+		fclose(file);
+	free(filename);
+	return result;
+}
+
+static int sysfs_test_value_range(const char *name, uint64_t low, uint64_t high)
+{
+	int result = TEST_FAILURE;
+	char *filename = NULL;
+	FILE *file = NULL;
+	uint64_t res;
+
+	TEST(filename = concat_file_name(SYSFS_DIR, name), filename);
+	TEST(file = fopen(filename, "re"), file);
+	TESTEQUAL(fscanf(file, "%lu", &res), 1);
+	TESTCOND(res >= low && res <= high);
+
+	result = TEST_SUCCESS;
+out:
+	if (file)
+		fclose(file);
+	free(filename);
+	return result;
+}
+
+static int ioctl_test_last_error(int cmd_fd, const incfs_uuid_t *file_id,
+				 int page, int error)
+{
+	int result = TEST_FAILURE;
+	struct incfs_get_last_read_error_args glre;
+
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_LAST_READ_ERROR, &glre), 0);
+	if (file_id)
+		TESTEQUAL(memcmp(&glre.file_id_out, file_id, sizeof(*file_id)),
+			  0);
+
+	TESTEQUAL(glre.page_out, page);
+	TESTEQUAL(glre.errno_out, error);
+	result = TEST_SUCCESS;
+out:
+	return result;
+}
+
+static int sysfs_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int cmd_fd = -1;
+	struct test_file file = {
+		  .name = "file",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE,
+	};
+	char *filename = NULL;
+	int fd = -1;
+	int pid = -1;
+	char buffer[32];
+	int status;
+	struct incfs_per_uid_read_timeouts purt_set[] = {
+		{
+			.uid = 0,
+			.min_time_us = 1000000,
+			.min_pending_time_us = 1000000,
+			.max_pending_time_us = 2000000,
+		},
+	};
+	struct incfs_set_read_timeouts_args srt = {
+		ptr_to_u64(purt_set),
+		sizeof(purt_set)
+	};
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=test_node",
+			       false),
+		  0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(build_mtree(&file), 0);
+	file.root_hash[0] ^= 0xff;
+	TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+				   file.root_hash, file.sig.add_data),
+		  0);
+	TEST(filename = concat_file_name(mount_dir, file.name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl_test_last_error(cmd_fd, NULL, 0, 0), 0);
+	TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 0), 0);
+	TEST(read(fd, NULL, 1), -1);
+	TESTEQUAL(ioctl_test_last_error(cmd_fd, &file.id, 0, -ETIME), 0);
+	TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 2), 0);
+
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 0), 0);
+	TESTEQUAL(read(fd, NULL, 1), -1);
+	TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 1), 0);
+	TESTSYSCALL(close(fd));
+	fd = -1;
+
+	TESTSYSCALL(unlink(filename));
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "read_timeout_ms=10000,sysfs_name=test_node",
+			       true),
+		  0);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+		  0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTSYSCALL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC));
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+		exit(0);
+	}
+	sleep(1);
+	TESTEQUAL(sysfs_test_value("reads_delayed_pending", 0), 0);
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(status, 0);
+	TESTEQUAL(sysfs_test_value("reads_delayed_pending", 1), 0);
+	/* Allow +/- 10% */
+	TESTEQUAL(sysfs_test_value_range("reads_delayed_pending_us", 900000, 1100000),
+		  0);
+
+	TESTSYSCALL(close(fd));
+	fd = -1;
+
+	TESTSYSCALL(unlink(filename));
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+		  0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(sysfs_test_value("reads_delayed_min", 0), 0);
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+	TESTEQUAL(sysfs_test_value("reads_delayed_min", 1), 0);
+	/* This should be exact */
+	TESTEQUAL(sysfs_test_value("reads_delayed_min_us", 1000000), 0);
+
+	TESTSYSCALL(close(fd));
+	fd = -1;
+
+	TESTSYSCALL(unlink(filename));
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+		  0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTSYSCALL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC));
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+		exit(0);
+	}
+	usleep(500000);
+	TESTEQUAL(sysfs_test_value("reads_delayed_pending", 1), 0);
+	TESTEQUAL(sysfs_test_value("reads_delayed_min", 1), 0);
+	TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(status, 0);
+	TESTEQUAL(sysfs_test_value("reads_delayed_pending", 2), 0);
+	TESTEQUAL(sysfs_test_value("reads_delayed_min", 2), 0);
+	/* Exact 1000000 plus 500000 +/- 10% */
+	TESTEQUAL(sysfs_test_value_range("reads_delayed_min_us", 1450000, 1550000), 0);
+	/* Allow +/- 10% */
+	TESTEQUAL(sysfs_test_value_range("reads_delayed_pending_us", 1350000, 1650000),
+		  0);
+
+	result = TEST_SUCCESS;
+out:
+	if (pid == 0)
+		exit(result);
+	free(file.mtree);
+	free(filename);
+	close(fd);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int sysfs_test_directories(bool one_present, bool two_present)
+{
+	int result = TEST_FAILURE;
+	struct stat st;
+
+	TESTEQUAL(stat("/sys/fs/incremental-fs/instances/1", &st),
+		  one_present ? 0 : -1);
+	if (one_present)
+		TESTCOND(S_ISDIR(st.st_mode));
+	else
+		TESTEQUAL(errno, ENOENT);
+	TESTEQUAL(stat("/sys/fs/incremental-fs/instances/2", &st),
+		  two_present ? 0 : -1);
+	if (two_present)
+		TESTCOND(S_ISDIR(st.st_mode));
+	else
+		TESTEQUAL(errno, ENOENT);
+
+	result = TEST_SUCCESS;
+out:
+	return result;
+}
+
+static int sysfs_rename_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	char *mount_dir2 = NULL;
+	int fd = -1;
+	char c;
+
+	/* Mount with no node */
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+	TESTEQUAL(sysfs_test_directories(false, false), 0);
+
+	/* Remount with node */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=1", true),
+		  0);
+	TESTEQUAL(sysfs_test_directories(true, false), 0);
+	TEST(fd = open("/sys/fs/incremental-fs/instances/1/reads_delayed_min",
+		       O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(pread(fd, &c, 1, 0), 1);
+	TESTEQUAL(c, '0');
+	TESTEQUAL(pread(fd, &c, 1, 0), 1);
+	TESTEQUAL(c, '0');
+
+	/* Rename node */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=2", true),
+		  0);
+	TESTEQUAL(sysfs_test_directories(false, true), 0);
+	TESTEQUAL(pread(fd, &c, 1, 0), -1);
+
+	/* Try mounting another instance with same node name */
+	TEST(mount_dir2 = concat_file_name(backing_dir, "incfs-mount-dir2"),
+	     mount_dir2);
+	rmdir(mount_dir2); /* In case we crashed before */
+	TESTSYSCALL(mkdir(mount_dir2, 0777));
+	TEST(mount_fs_opt(mount_dir2, backing_dir, "sysfs_name=2", false),
+		  -1);
+
+	/* Try mounting another instance then remounting with existing name */
+	TESTEQUAL(mount_fs(mount_dir2, backing_dir, 0), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir2, backing_dir, "sysfs_name=2", true),
+		  -1);
+
+	/* Remount with no node */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "", true),
+		  0);
+	TESTEQUAL(sysfs_test_directories(false, false), 0);
+
+	result = TEST_SUCCESS;
+out:
+	umount(mount_dir2);
+	rmdir(mount_dir2);
+	free(mount_dir2);
+	close(fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static char *setup_mount_dir()
+{
+	struct stat st;
+	char *current_dir = getcwd(NULL, 0);
+	char *mount_dir = concat_file_name(current_dir, "incfs-mount-dir");
+
+	free(current_dir);
+	if (stat(mount_dir, &st) == 0) {
+		if (S_ISDIR(st.st_mode))
+			return mount_dir;
+
+		ksft_print_msg("%s is a file, not a dir.\n", mount_dir);
+		return NULL;
+	}
+
+	if (mkdir(mount_dir, 0777)) {
+		print_error("Can't create mount dir.");
+		return NULL;
+	}
+
+	return mount_dir;
+}
+
+int parse_options(int argc, char *const *argv)
+{
+	signed char c;
+
+	while ((c = getopt(argc, argv, "f:t:v")) != -1)
+		switch (c) {
+		case 'f':
+			options.file = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options.test = strtol(optarg, NULL, 10);
+			break;
+
+		case 'v':
+			options.verbose = true;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+struct test_case {
+	int (*pfunc)(const char *dir);
+	const char *name;
+};
+
+void run_one_test(const char *mount_dir, struct test_case *test_case)
+{
+	ksft_print_msg("Running %s\n", test_case->name);
+	if (test_case->pfunc(mount_dir) == TEST_SUCCESS)
+		ksft_test_result_pass("%s\n", test_case->name);
+	else
+		ksft_test_result_fail("%s\n", test_case->name);
+}
+
+int main(int argc, char *argv[])
+{
+	char *mount_dir = NULL;
+	int i;
+	int fd, count;
+
+	if (parse_options(argc, argv))
+		ksft_exit_fail_msg("Bad options\n");
+
+	// Seed randomness pool for testing on QEMU
+	// NOTE - this abuses the concept of randomness - do *not* ever do this
+	// on a machine for production use - the device will think it has good
+	// randomness when it does not.
+	fd = open("/dev/urandom", O_WRONLY | O_CLOEXEC);
+	count = 4096;
+	for (int i = 0; i < 128; ++i)
+		ioctl(fd, RNDADDTOENTCNT, &count);
+	close(fd);
+
+	ksft_print_header();
+
+	if (geteuid() != 0)
+		ksft_print_msg("Not a root, might fail to mount.\n");
+
+	mount_dir = setup_mount_dir();
+	if (mount_dir == NULL)
+		ksft_exit_fail_msg("Can't create a mount dir\n");
+
+#define MAKE_TEST(test)                                                        \
+	{                                                                      \
+		test, #test                                                    \
+	}
+	struct test_case cases[] = {
+		MAKE_TEST(basic_file_ops_test),
+		MAKE_TEST(cant_touch_index_test),
+		MAKE_TEST(dynamic_files_and_data_test),
+		MAKE_TEST(concurrent_reads_and_writes_test),
+		MAKE_TEST(attribute_test),
+		MAKE_TEST(work_after_remount_test),
+		MAKE_TEST(child_procs_waiting_for_data_test),
+		MAKE_TEST(multiple_providers_test),
+		MAKE_TEST(hash_tree_test),
+		MAKE_TEST(read_log_test),
+		MAKE_TEST(get_blocks_test),
+		MAKE_TEST(get_hash_blocks_test),
+		MAKE_TEST(large_file_test),
+		MAKE_TEST(mapped_file_test),
+		MAKE_TEST(compatibility_test),
+		MAKE_TEST(data_block_count_test),
+		MAKE_TEST(hash_block_count_test),
+		MAKE_TEST(per_uid_read_timeouts_test),
+		MAKE_TEST(inotify_test),
+		MAKE_TEST(verity_test),
+		MAKE_TEST(enable_verity_test),
+		MAKE_TEST(mmap_test),
+		MAKE_TEST(truncate_test),
+		MAKE_TEST(stat_test),
+		MAKE_TEST(sysfs_test),
+		MAKE_TEST(sysfs_rename_test),
+	};
+#undef MAKE_TEST
+
+	if (options.test) {
+		if (options.test <= 0 || options.test > ARRAY_SIZE(cases))
+			ksft_exit_fail_msg("Invalid test\n");
+
+		ksft_set_plan(1);
+		run_one_test(mount_dir, &cases[options.test - 1]);
+	} else {
+		ksft_set_plan(ARRAY_SIZE(cases));
+		for (i = 0; i < ARRAY_SIZE(cases); ++i)
+			run_one_test(mount_dir, &cases[i]);
+	}
+
+	umount2(mount_dir, MNT_FORCE);
+	rmdir(mount_dir);
+	return !ksft_get_fail_cnt() ? ksft_exit_pass() : ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.c b/tools/testing/selftests/filesystems/incfs/utils.c
new file mode 100644
index 0000000..e60b0d3
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <openssl/sha.h>
+#include <openssl/md5.h>
+
+#include "utils.h"
+
+#ifndef __S_IFREG
+#define __S_IFREG S_IFREG
+#endif
+
+unsigned int rnd(unsigned int max, unsigned int *seed)
+{
+	return rand_r(seed) * ((uint64_t)max + 1) / RAND_MAX;
+}
+
+int remove_dir(const char *dir)
+{
+	int err = rmdir(dir);
+
+	if (err && errno == ENOTEMPTY) {
+		err = delete_dir_tree(dir);
+		if (err)
+			return err;
+		return 0;
+	}
+
+	if (err && errno != ENOENT)
+		return -errno;
+
+	return 0;
+}
+
+int drop_caches(void)
+{
+	int drop_caches =
+		open("/proc/sys/vm/drop_caches", O_WRONLY | O_CLOEXEC);
+	int i;
+
+	if (drop_caches == -1)
+		return -errno;
+	i = write(drop_caches, "3", 1);
+	close(drop_caches);
+
+	if (i != 1)
+		return -errno;
+
+	return 0;
+}
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+	     int read_timeout_ms)
+{
+	static const char fs_name[] = INCFS_NAME;
+	char mount_options[512];
+	int result;
+
+	snprintf(mount_options, ARRAY_SIZE(mount_options),
+		 "read_timeout_ms=%u",
+		  read_timeout_ms);
+
+	result = mount(backing_dir, mount_dir, fs_name, 0, mount_options);
+	if (result != 0)
+		perror("Error mounting fs.");
+	return result;
+}
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+		 const char *opt, bool remount)
+{
+	static const char fs_name[] = INCFS_NAME;
+	int result;
+
+	result = mount(backing_dir, mount_dir, fs_name,
+		       remount ? MS_REMOUNT : 0, opt);
+	if (result != 0)
+		perror("Error mounting fs.");
+	return result;
+}
+
+struct hash_section {
+	uint32_t algorithm;
+	uint8_t log2_blocksize;
+	uint32_t salt_size;
+	/* no salt */
+	uint32_t hash_size;
+	uint8_t hash[SHA256_DIGEST_SIZE];
+} __packed;
+
+struct signature_blob {
+	uint32_t version;
+	uint32_t hash_section_size;
+	struct hash_section hash_section;
+	uint32_t signing_section_size;
+	uint8_t signing_section[];
+} __packed;
+
+size_t format_signature(void **buf, const char *root_hash, const char *add_data)
+{
+	size_t size = sizeof(struct signature_blob) + strlen(add_data) + 1;
+	struct signature_blob *sb = malloc(size);
+
+	*sb = (struct signature_blob){
+		.version = INCFS_SIGNATURE_VERSION,
+		.hash_section_size = sizeof(struct hash_section),
+		.hash_section =
+			(struct hash_section){
+				.algorithm = INCFS_HASH_TREE_SHA256,
+				.log2_blocksize = 12,
+				.salt_size = 0,
+				.hash_size = SHA256_DIGEST_SIZE,
+			},
+		.signing_section_size = sizeof(uint32_t) + strlen(add_data) + 1,
+	};
+
+	memcpy(sb->hash_section.hash, root_hash, SHA256_DIGEST_SIZE);
+	memcpy((char *)sb->signing_section, add_data, strlen(add_data) + 1);
+	*buf = sb;
+	return size;
+}
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+		     incfs_uuid_t *id_out, size_t size, const char *root_hash,
+		     const char *add_data)
+{
+	int mode = __S_IFREG | 0555;
+	void *signature;
+	int error = 0;
+
+	struct incfs_new_file_args args = {
+			.size = size,
+			.mode = mode,
+			.file_name = ptr_to_u64(filename),
+			.directory_path = ptr_to_u64(dir),
+			.file_attr = 0,
+			.file_attr_len = 0
+	};
+
+	args.signature_size = format_signature(&signature, root_hash, add_data);
+	args.signature_info = ptr_to_u64(signature);
+
+	md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+	if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0) {
+		error = -errno;
+		goto out;
+	}
+
+	*id_out = args.file_id;
+
+out:
+	free(signature);
+	return error;
+}
+
+int emit_file(int fd, const char *dir, const char *filename,
+	      incfs_uuid_t *id_out, size_t size, const char *attr)
+{
+	int mode = __S_IFREG | 0555;
+	struct incfs_new_file_args args = { .size = size,
+					    .mode = mode,
+					    .file_name = ptr_to_u64(filename),
+					    .directory_path = ptr_to_u64(dir),
+					    .signature_info = ptr_to_u64(NULL),
+					    .signature_size = 0,
+					    .file_attr = ptr_to_u64(attr),
+					    .file_attr_len =
+						    attr ? strlen(attr) : 0 };
+
+	md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+	if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0)
+		return -errno;
+
+	*id_out = args.file_id;
+	return 0;
+}
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size)
+{
+	return 0;
+}
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size)
+{
+	struct incfs_get_file_sig_args args = {
+		.file_signature = ptr_to_u64(buf),
+		.file_signature_buf_size = buf_size
+	};
+
+	if (ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &args) == 0)
+		return args.file_signature_len_out;
+	return -errno;
+}
+
+loff_t get_file_size(const char *name)
+{
+	struct stat st;
+
+	if (stat(name, &st) == 0)
+		return st.st_size;
+	return -ENOENT;
+}
+
+int open_commands_file(const char *mount_dir)
+{
+	char cmd_file[255];
+	int cmd_fd;
+
+	snprintf(cmd_file, ARRAY_SIZE(cmd_file),
+			"%s/%s", mount_dir, INCFS_PENDING_READS_FILENAME);
+	cmd_fd = open(cmd_file, O_RDONLY | O_CLOEXEC);
+
+	if (cmd_fd < 0)
+		perror("Can't open commands file");
+	return cmd_fd;
+}
+
+int open_log_file(const char *mount_dir)
+{
+	char file[255];
+	int fd;
+
+	snprintf(file, ARRAY_SIZE(file), "%s/.log", mount_dir);
+	fd = open(file, O_RDWR | O_CLOEXEC);
+	if (fd < 0)
+		perror("Can't open log file");
+	return fd;
+}
+
+int open_blocks_written_file(const char *mount_dir)
+{
+	char file[255];
+	int fd;
+
+	snprintf(file, ARRAY_SIZE(file),
+			"%s/%s", mount_dir, INCFS_BLOCKS_WRITTEN_FILENAME);
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+
+	if (fd < 0)
+		perror("Can't open blocks_written file");
+	return fd;
+}
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+	struct incfs_pending_read_info *prs, int prs_count)
+{
+	ssize_t read_res = 0;
+
+	if (timeout_ms > 0) {
+		int poll_res = 0;
+		struct pollfd pollfd = {
+			.fd = fd,
+			.events = POLLIN
+		};
+
+		poll_res = poll(&pollfd, 1, timeout_ms);
+		if (poll_res < 0)
+			return -errno;
+		if (poll_res == 0)
+			return 0;
+		if (!(pollfd.revents | POLLIN))
+			return 0;
+	}
+
+	read_res = read(fd, prs, prs_count * sizeof(*prs));
+	if (read_res < 0)
+		return -errno;
+
+	return read_res / sizeof(*prs);
+}
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+	struct incfs_pending_read_info2 *prs, int prs_count)
+{
+	ssize_t read_res = 0;
+
+	if (timeout_ms > 0) {
+		int poll_res = 0;
+		struct pollfd pollfd = {
+			.fd = fd,
+			.events = POLLIN
+		};
+
+		poll_res = poll(&pollfd, 1, timeout_ms);
+		if (poll_res < 0)
+			return -errno;
+		if (poll_res == 0)
+			return 0;
+		if (!(pollfd.revents | POLLIN))
+			return 0;
+	}
+
+	read_res = read(fd, prs, prs_count * sizeof(*prs));
+	if (read_res < 0)
+		return -errno;
+
+	return read_res / sizeof(*prs);
+}
+
+char *concat_file_name(const char *dir, const char *file)
+{
+	char full_name[FILENAME_MAX] = "";
+
+	if (snprintf(full_name, ARRAY_SIZE(full_name), "%s/%s", dir, file) < 0)
+		return NULL;
+	return strdup(full_name);
+}
+
+int delete_dir_tree(const char *dir_path)
+{
+	DIR *dir = NULL;
+	struct dirent *dp;
+	int result = 0;
+
+	dir = opendir(dir_path);
+	if (!dir) {
+		result = -errno;
+		goto out;
+	}
+
+	while ((dp = readdir(dir))) {
+		char *full_path;
+
+		if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
+			continue;
+
+		full_path = concat_file_name(dir_path, dp->d_name);
+		if (dp->d_type == DT_DIR)
+			result = delete_dir_tree(full_path);
+		else
+			result = unlink(full_path);
+		free(full_path);
+		if (result)
+			goto out;
+	}
+
+out:
+	if (dir)
+		closedir(dir);
+	if (!result)
+		rmdir(dir_path);
+	return result;
+}
+
+void sha256(const char *data, size_t dsize, char *hash)
+{
+	SHA256_CTX ctx;
+
+	SHA256_Init(&ctx);
+	SHA256_Update(&ctx, data, dsize);
+	SHA256_Final((unsigned char *)hash, &ctx);
+}
+
+void md5(const char *data, size_t dsize, char *hash)
+{
+	MD5_CTX ctx;
+
+	MD5_Init(&ctx);
+	MD5_Update(&ctx, data, dsize);
+	MD5_Final((unsigned char *)hash, &ctx);
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.h b/tools/testing/selftests/filesystems/incfs/utils.h
new file mode 100644
index 0000000..f5ed8dc
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <stdbool.h>
+#include <sys/stat.h>
+
+#include <include/uapi/linux/incrementalfs.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+
+#define __packed __attribute__((__packed__))
+
+#ifdef __LP64__
+#define ptr_to_u64(p) ((__u64)p)
+#else
+#define ptr_to_u64(p) ((__u64)(__u32)p)
+#endif
+
+#define SHA256_DIGEST_SIZE 32
+#define INCFS_MAX_MTREE_LEVELS 8
+
+unsigned int rnd(unsigned int max, unsigned int *seed);
+
+int remove_dir(const char *dir);
+
+int drop_caches(void);
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+	     int read_timeout_ms);
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+		 const char *opt, bool remount);
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size);
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size);
+
+int emit_node(int fd, char *filename, int *ino_out, int parent_ino,
+		size_t size, mode_t mode, char *attr);
+
+int emit_file(int fd, const char *dir, const char *filename,
+	      incfs_uuid_t *id_out, size_t size, const char *attr);
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+		     incfs_uuid_t *id_out, size_t size, const char *root_hash,
+		     const char *add_data);
+
+loff_t get_file_size(const char *name);
+
+int open_commands_file(const char *mount_dir);
+
+int open_log_file(const char *mount_dir);
+
+int open_blocks_written_file(const char *mount_dir);
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+	struct incfs_pending_read_info *prs, int prs_count);
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+	struct incfs_pending_read_info2 *prs, int prs_count);
+
+char *concat_file_name(const char *dir, const char *file);
+
+void sha256(const char *data, size_t dsize, char *hash);
+
+void md5(const char *data, size_t dsize, char *hash);
+
+int delete_dir_tree(const char *path);