gpu: update r32p1 gpu driver [2/14]

PD#SWPL-56978

Problem:
mali driver is not official for AndroidS

Solution:
update to r32p1 beta

Verify:
full verification tests on ohm

Change-Id: Id6cd1282e9f8f0743602224c763808700e56e9f9
Signed-off-by: binqi zhang <binqi.zhang@amlogic.com>
diff --git a/0001-update-r32p1-gpu-driver.patch b/0001-update-r32p1-gpu-driver.patch
new file mode 100644
index 0000000..ca10415
--- /dev/null
+++ b/0001-update-r32p1-gpu-driver.patch
@@ -0,0 +1,94611 @@
+From 39f791b03c5e534884f13ecfb9f86306fe7324dd Mon Sep 17 00:00:00 2001
+From: binqi zhang <binqi.zhang@amlogic.com>
+Date: Thu, 12 Aug 2021 16:34:29 +0800
+Subject: [PATCH] update r32p1 gpu driver
+
+Change-Id: Ic8672b57c42e98ed97c65b48925280fbb38de0ee
+---
+ .../ABI/testing/sysfs-device-mali             |  293 +
+ .../devicetree/bindings/arm/mali-midgard.txt  |   19 +-
+ .../bindings/arm/memory_group_manager.txt     |    7 +-
+ .../bindings/arm/priority_control_manager.txt |   48 +
+ .../arm/protected_memory_allocator.txt        |    7 +-
+ .../devicetree/bindings/power/mali-opp.txt    |    9 +-
+ .../Documentation/dma-buf-test-exporter.txt   |   10 +-
+ dvalin/kernel/Mconfig                         |   31 +-
+ dvalin/kernel/build.bp                        |   65 +-
+ dvalin/kernel/drivers/base/arm/Kbuild         |   34 +
+ dvalin/kernel/drivers/base/arm/Kconfig        |   64 +
+ dvalin/kernel/drivers/base/arm/Makefile       |   98 +
+ dvalin/kernel/drivers/base/arm/Mconfig        |   64 +
+ .../base/{ => arm}/dma_buf_lock/src/Kbuild    |    9 +-
+ .../base/{ => arm}/dma_buf_lock/src/Makefile  |   17 +-
+ .../{ => arm}/dma_buf_lock/src/dma_buf_lock.c |  132 +-
+ .../{ => arm}/dma_buf_lock/src/dma_buf_lock.h |    7 +-
+ .../base/arm/dma_buf_test_exporter/Kbuild     |   23 +
+ .../base/arm/dma_buf_test_exporter/build.bp   |   36 +
+ .../dma-buf-test-exporter.c                   |  106 +-
+ .../memory_group_manager}/Kbuild              |   11 +-
+ .../base/arm/memory_group_manager/build.bp    |   36 +
+ .../memory_group_manager.c                    |   14 +-
+ .../arm/protected_memory_allocator/Kbuild     |   23 +
+ .../arm/protected_memory_allocator/build.bp   |   36 +
+ .../protected_memory_allocator.c              |  551 ++
+ .../base/dma_buf_test_exporter/Kconfig        |   26 -
+ .../base/dma_buf_test_exporter/Makefile       |   36 -
+ .../base/dma_buf_test_exporter/build.bp       |   26 -
+ .../base/memory_group_manager/Makefile        |   35 -
+ .../base/memory_group_manager/build.bp        |   22 -
+ .../base/protected_memory_allocator/Makefile  |   35 -
+ .../base/protected_memory_allocator/build.bp  |   26 -
+ .../protected_memory_allocator.c              |  308 -
+ dvalin/kernel/drivers/gpu/arm/Kbuild          |    8 +-
+ dvalin/kernel/drivers/gpu/arm/Kconfig         |    8 +-
+ .../Kbuild => gpu/arm/Makefile}               |    9 +-
+ dvalin/kernel/drivers/gpu/arm/midgard/Kbuild  |  367 +-
+ dvalin/kernel/drivers/gpu/arm/midgard/Kconfig |  357 +-
+ .../kernel/drivers/gpu/arm/midgard/Makefile   |  201 +-
+ dvalin/kernel/drivers/gpu/arm/midgard/Mconfig |  288 +-
+ .../drivers/gpu/arm/midgard/arbiter/Kbuild    |   11 +-
+ .../arm/midgard/arbiter/mali_kbase_arbif.c    |  209 +-
+ .../arm/midgard/arbiter/mali_kbase_arbif.h    |   44 +-
+ .../midgard/arbiter/mali_kbase_arbiter_defs.h |   34 +-
+ .../arbiter/mali_kbase_arbiter_interface.h    |   70 +-
+ .../midgard/arbiter/mali_kbase_arbiter_pm.c   |  614 +-
+ .../midgard/arbiter/mali_kbase_arbiter_pm.h   |   87 +-
+ .../gpu/arm/midgard/backend/gpu/Kbuild        |   70 +-
+ .../backend/gpu/mali_kbase_backend_config.h   |    7 +-
+ .../gpu/mali_kbase_cache_policy_backend.c     |    9 +-
+ .../gpu/mali_kbase_cache_policy_backend.h     |   10 +-
+ .../gpu/mali_kbase_clk_rate_trace_mgr.c       |  325 ++
+ .../gpu/mali_kbase_clk_rate_trace_mgr.h       |  154 +
+ .../gpu/mali_kbase_debug_job_fault_backend.c  |   11 +-
+ .../midgard/backend/gpu/mali_kbase_devfreq.c  |  207 +-
+ .../midgard/backend/gpu/mali_kbase_devfreq.h  |   23 +-
+ .../backend/gpu/mali_kbase_device_hw.c        |  388 --
+ .../backend/gpu/mali_kbase_device_internal.h  |  127 -
+ .../backend/gpu/mali_kbase_gpuprops_backend.c |   75 +-
+ .../backend/gpu/mali_kbase_instr_backend.c    |  130 +-
+ .../backend/gpu/mali_kbase_instr_defs.h       |   18 +-
+ .../backend/gpu/mali_kbase_instr_internal.h   |    9 +-
+ .../backend/gpu/mali_kbase_irq_internal.h     |    7 +-
+ .../backend/gpu/mali_kbase_irq_linux.c        |   37 +-
+ .../midgard/backend/gpu/mali_kbase_jm_as.c    |   14 +-
+ .../midgard/backend/gpu/mali_kbase_jm_defs.h  |   20 +-
+ .../midgard/backend/gpu/mali_kbase_jm_hw.c    |  236 +-
+ .../backend/gpu/mali_kbase_jm_internal.h      |   17 +-
+ .../midgard/backend/gpu/mali_kbase_jm_rb.c    |  145 +-
+ .../midgard/backend/gpu/mali_kbase_jm_rb.h    |    8 +-
+ .../backend/gpu/mali_kbase_js_backend.c       |   61 +-
+ .../backend/gpu/mali_kbase_js_internal.h      |    8 +-
+ .../backend/gpu/mali_kbase_l2_mmu_config.c    |   47 +-
+ .../backend/gpu/mali_kbase_l2_mmu_config.h    |   25 +-
+ .../backend/gpu/mali_kbase_pm_always_on.c     |   13 +-
+ .../backend/gpu/mali_kbase_pm_always_on.h     |    9 +-
+ .../backend/gpu/mali_kbase_pm_backend.c       |  243 +-
+ .../midgard/backend/gpu/mali_kbase_pm_ca.c    |   44 +-
+ .../midgard/backend/gpu/mali_kbase_pm_ca.h    |    7 +-
+ .../backend/gpu/mali_kbase_pm_ca_devfreq.h    |    7 +-
+ .../backend/gpu/mali_kbase_pm_coarse_demand.c |   13 +-
+ .../backend/gpu/mali_kbase_pm_coarse_demand.h |    9 +-
+ .../midgard/backend/gpu/mali_kbase_pm_defs.h  |  244 +-
+ .../backend/gpu/mali_kbase_pm_driver.c        |  907 ++-
+ .../backend/gpu/mali_kbase_pm_internal.h      |  143 +-
+ .../backend/gpu/mali_kbase_pm_l2_states.h     |   20 +-
+ .../backend/gpu/mali_kbase_pm_mcu_states.h    |   63 +
+ .../backend/gpu/mali_kbase_pm_metrics.c       |  271 +-
+ .../backend/gpu/mali_kbase_pm_policy.c        |  204 +-
+ .../backend/gpu/mali_kbase_pm_policy.h        |    7 +-
+ .../backend/gpu/mali_kbase_pm_shader_states.h |   44 +-
+ .../arm/midgard/backend/gpu/mali_kbase_time.c |   57 +-
+ .../kernel/drivers/gpu/arm/midgard/build.bp   |  203 +-
+ .../arm/midgard/context/Kbuild}               |   19 +-
+ .../context/backend/mali_kbase_context_csf.c  |  201 +
+ .../context/backend/mali_kbase_context_jm.c   |  138 +-
+ .../arm/midgard/context/mali_kbase_context.c  |  205 +-
+ .../arm/midgard/context/mali_kbase_context.h  |   35 +-
+ .../context/mali_kbase_context_internal.h     |   18 +-
+ .../kernel/drivers/gpu/arm/midgard/csf/Kbuild |   47 +
+ .../ipa_control/Kbuild}                       |   11 +-
+ .../ipa_control/mali_kbase_csf_ipa_control.c  |  925 +++
+ .../ipa_control/mali_kbase_csf_ipa_control.h  |  244 +
+ .../gpu/arm/midgard/csf/mali_kbase_csf.c      | 3069 ++++++++++
+ .../gpu/arm/midgard/csf/mali_kbase_csf.h      |  564 ++
+ .../csf/mali_kbase_csf_cpu_queue_debugfs.c    |  191 +
+ .../csf/mali_kbase_csf_cpu_queue_debugfs.h    |   90 +
+ .../midgard/csf/mali_kbase_csf_csg_debugfs.c  |  591 ++
+ .../midgard/csf/mali_kbase_csf_csg_debugfs.h  |   47 +
+ .../gpu/arm/midgard/csf/mali_kbase_csf_defs.h | 1254 ++++
+ .../arm/midgard/csf/mali_kbase_csf_firmware.c | 2337 ++++++++
+ .../arm/midgard/csf/mali_kbase_csf_firmware.h |  811 +++
+ .../midgard/csf/mali_kbase_csf_firmware_cfg.c |  327 ++
+ .../midgard/csf/mali_kbase_csf_firmware_cfg.h |   74 +
+ .../csf/mali_kbase_csf_firmware_no_mali.c     | 1389 +++++
+ .../csf/mali_kbase_csf_heap_context_alloc.c   |  195 +
+ .../csf/mali_kbase_csf_heap_context_alloc.h   |   75 +
+ .../gpu/arm/midgard/csf/mali_kbase_csf_kcpu.c | 2258 ++++++++
+ .../gpu/arm/midgard/csf/mali_kbase_csf_kcpu.h |  356 ++
+ .../midgard/csf/mali_kbase_csf_kcpu_debugfs.c |  197 +
+ .../midgard/csf/mali_kbase_csf_kcpu_debugfs.h |   37 +
+ .../csf/mali_kbase_csf_protected_memory.c     |  119 +
+ .../csf/mali_kbase_csf_protected_memory.h     |   71 +
+ .../midgard/csf/mali_kbase_csf_reset_gpu.c    |  629 ++
+ .../midgard/csf/mali_kbase_csf_scheduler.c    | 5063 +++++++++++++++++
+ .../midgard/csf/mali_kbase_csf_scheduler.h    |  494 ++
+ .../midgard/csf/mali_kbase_csf_tiler_heap.c   |  611 ++
+ .../midgard/csf/mali_kbase_csf_tiler_heap.h   |  115 +
+ .../csf/mali_kbase_csf_tiler_heap_debugfs.c   |  106 +
+ .../csf/mali_kbase_csf_tiler_heap_debugfs.h   |   37 +
+ .../csf/mali_kbase_csf_tiler_heap_def.h       |  114 +
+ .../arm/midgard/csf/mali_kbase_csf_timeout.c  |  178 +
+ .../arm/midgard/csf/mali_kbase_csf_timeout.h  |   66 +
+ .../midgard/csf/mali_kbase_csf_tl_reader.c    |  534 ++
+ .../midgard/csf/mali_kbase_csf_tl_reader.h    |  185 +
+ .../midgard/csf/mali_kbase_csf_trace_buffer.c |  688 +++
+ .../midgard/csf/mali_kbase_csf_trace_buffer.h |  182 +
+ .../drivers/gpu/arm/midgard/debug/Kbuild      |   27 +
+ .../mali_kbase_debug_ktrace_codes_csf.h       |  278 +
+ .../mali_kbase_debug_ktrace_codes_jm.h        |   10 +-
+ .../backend/mali_kbase_debug_ktrace_csf.c     |  193 +
+ .../backend/mali_kbase_debug_ktrace_csf.h     |  203 +
+ .../mali_kbase_debug_ktrace_defs_csf.h        |  116 +
+ .../backend/mali_kbase_debug_ktrace_defs_jm.h |  100 +-
+ .../backend/mali_kbase_debug_ktrace_jm.c      |   50 +-
+ .../backend/mali_kbase_debug_ktrace_jm.h      |  118 +-
+ .../mali_kbase_debug_linux_ktrace_csf.h       |  241 +
+ .../mali_kbase_debug_linux_ktrace_jm.h        |   52 +-
+ .../midgard/debug/mali_kbase_debug_ktrace.c   |   55 +-
+ .../midgard/debug/mali_kbase_debug_ktrace.h   |   20 +-
+ .../debug/mali_kbase_debug_ktrace_codes.h     |   24 +-
+ .../debug/mali_kbase_debug_ktrace_defs.h      |   83 +-
+ .../debug/mali_kbase_debug_ktrace_internal.h  |    7 +-
+ .../debug/mali_kbase_debug_linux_ktrace.h     |   40 +-
+ .../{tests/kutf/Makefile => device/Kbuild}    |   30 +-
+ .../device/backend/mali_kbase_device_csf.c    |  464 ++
+ .../device/backend/mali_kbase_device_hw_csf.c |  163 +
+ .../device/backend/mali_kbase_device_hw_jm.c  |   98 +
+ .../device/backend/mali_kbase_device_jm.c     |  181 +-
+ .../arm/midgard/device/mali_kbase_device.c    |  233 +-
+ .../arm/midgard/device/mali_kbase_device.h    |  126 +-
+ .../arm/midgard/device/mali_kbase_device_hw.c |  182 +
+ .../device/mali_kbase_device_internal.h       |   24 +-
+ .../{tests/kutf/Kconfig => gpu/Kbuild}        |   19 +-
+ .../gpu/backend/mali_kbase_gpu_fault_csf.c    |  104 +
+ .../gpu/backend/mali_kbase_gpu_fault_jm.c     |   13 +-
+ .../gpu/arm/midgard/gpu/mali_kbase_gpu.c      |    8 +-
+ .../arm/midgard/gpu/mali_kbase_gpu_fault.h    |   23 +-
+ .../arm/midgard/gpu/mali_kbase_gpu_regmap.h   |  416 +-
+ .../kernel/drivers/gpu/arm/midgard/ipa/Kbuild |   25 +-
+ .../mali_kbase_ipa_counter_common_csf.c       |  457 ++
+ .../mali_kbase_ipa_counter_common_csf.h       |  159 +
+ .../mali_kbase_ipa_counter_common_jm.c}       |   20 +-
+ .../mali_kbase_ipa_counter_common_jm.h}       |   40 +-
+ .../ipa/backend/mali_kbase_ipa_counter_csf.c  |  171 +
+ .../mali_kbase_ipa_counter_jm.c}              |  113 +-
+ .../gpu/arm/midgard/ipa/mali_kbase_ipa.c      |  287 +-
+ .../gpu/arm/midgard/ipa/mali_kbase_ipa.h      |   90 +-
+ .../arm/midgard/ipa/mali_kbase_ipa_debugfs.c  |   12 +-
+ .../arm/midgard/ipa/mali_kbase_ipa_debugfs.h  |   12 +-
+ .../arm/midgard/ipa/mali_kbase_ipa_simple.c   |   53 +-
+ .../arm/midgard/ipa/mali_kbase_ipa_simple.h   |    7 +-
+ .../gpu/arm/midgard/jm/mali_kbase_jm_defs.h   |   76 +-
+ .../gpu/arm/midgard/jm/mali_kbase_jm_js.h     |   89 +-
+ .../gpu/arm/midgard/jm/mali_kbase_js_defs.h   |  411 +-
+ .../arm/midgard/mali_base_hwconfig_features.h |   62 +-
+ .../arm/midgard/mali_base_hwconfig_issues.h   |   62 +-
+ .../drivers/gpu/arm/midgard/mali_kbase.h      |  165 +-
+ .../arm/midgard/mali_kbase_as_fault_debugfs.c |   11 +-
+ .../arm/midgard/mali_kbase_as_fault_debugfs.h |    9 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_bits.h |   16 +-
+ .../gpu/arm/midgard/mali_kbase_cache_policy.c |   12 +-
+ .../gpu/arm/midgard/mali_kbase_cache_policy.h |   11 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_caps.h |   61 +
+ .../gpu/arm/midgard/mali_kbase_ccswe.c        |  100 +
+ .../gpu/arm/midgard/mali_kbase_ccswe.h        |   96 +
+ .../gpu/arm/midgard/mali_kbase_config.c       |   68 +-
+ .../gpu/arm/midgard/mali_kbase_config.h       |  294 +-
+ .../arm/midgard/mali_kbase_config_defaults.h  |   26 +-
+ .../gpu/arm/midgard/mali_kbase_core_linux.c   | 1969 +++++--
+ .../arm/midgard/mali_kbase_cs_experimental.h  |   20 +-
+ .../gpu/arm/midgard/mali_kbase_ctx_sched.c    |   84 +-
+ .../gpu/arm/midgard/mali_kbase_ctx_sched.h    |   44 +-
+ .../gpu/arm/midgard/mali_kbase_debug.c        |    9 +-
+ .../gpu/arm/midgard/mali_kbase_debug.h        |   79 +-
+ .../arm/midgard/mali_kbase_debug_job_fault.c  |   36 +-
+ .../arm/midgard/mali_kbase_debug_job_fault.h  |   10 +-
+ .../arm/midgard/mali_kbase_debug_mem_view.c   |   26 +-
+ .../arm/midgard/mali_kbase_debug_mem_view.h   |    7 +-
+ .../arm/midgard/mali_kbase_debugfs_helper.c   |  104 +-
+ .../arm/midgard/mali_kbase_debugfs_helper.h   |   53 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_defs.h |  573 +-
+ .../arm/midgard/mali_kbase_disjoint_events.c  |    7 +-
+ .../gpu/arm/midgard/mali_kbase_dma_fence.c    |   59 +-
+ .../gpu/arm/midgard/mali_kbase_dma_fence.h    |   26 +-
+ .../gpu/arm/midgard/mali_kbase_dummy_job_wa.c |   15 +-
+ .../gpu/arm/midgard/mali_kbase_dummy_job_wa.h |   36 +-
+ .../gpu/arm/midgard/mali_kbase_dvfs_debugfs.c |   98 +
+ .../gpu/arm/midgard/mali_kbase_dvfs_debugfs.h |   35 +
+ .../gpu/arm/midgard/mali_kbase_event.c        |   30 +-
+ .../gpu/arm/midgard/mali_kbase_fence.c        |   73 +-
+ .../gpu/arm/midgard/mali_kbase_fence.h        |   17 +-
+ .../gpu/arm/midgard/mali_kbase_fence_defs.h   |   15 +-
+ .../gpu/arm/midgard/mali_kbase_fence_ops.c    |   83 +
+ .../gpu/arm/midgard/mali_kbase_gator.h        |    7 +-
+ .../midgard/mali_kbase_gpu_memory_debugfs.c   |   22 +-
+ .../midgard/mali_kbase_gpu_memory_debugfs.h   |   26 +-
+ .../gpu/arm/midgard/mali_kbase_gpuprops.c     |  315 +-
+ .../gpu/arm/midgard/mali_kbase_gpuprops.h     |   67 +-
+ .../arm/midgard/mali_kbase_gpuprops_types.h   |   89 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_gwt.c  |   11 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_gwt.h  |    9 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_hw.c   |  230 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_hw.h   |   23 +-
+ .../arm/midgard/mali_kbase_hwaccess_backend.h |    8 +-
+ .../arm/midgard/mali_kbase_hwaccess_defs.h    |   15 +-
+ .../midgard/mali_kbase_hwaccess_gpuprops.h    |   39 +-
+ .../arm/midgard/mali_kbase_hwaccess_instr.h   |   22 +-
+ .../gpu/arm/midgard/mali_kbase_hwaccess_jm.h  |   12 +-
+ .../gpu/arm/midgard/mali_kbase_hwaccess_pm.h  |   75 +-
+ .../arm/midgard/mali_kbase_hwaccess_time.h    |   30 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt.c        |  103 +-
+ .../midgard/mali_kbase_hwcnt_accumulator.h    |    7 +-
+ .../arm/midgard/mali_kbase_hwcnt_backend.h    |  102 +-
+ .../midgard/mali_kbase_hwcnt_backend_csf.c    | 1864 ++++++
+ .../midgard/mali_kbase_hwcnt_backend_csf.h    |  162 +
+ .../midgard/mali_kbase_hwcnt_backend_csf_if.h |  311 +
+ .../mali_kbase_hwcnt_backend_csf_if_fw.c      |  787 +++
+ .../mali_kbase_hwcnt_backend_csf_if_fw.h      |   50 +
+ .../midgard/mali_kbase_hwcnt_backend_gpu.c    |  510 --
+ .../arm/midgard/mali_kbase_hwcnt_backend_jm.c |  793 +++
+ ...nd_gpu.h => mali_kbase_hwcnt_backend_jm.h} |   23 +-
+ .../arm/midgard/mali_kbase_hwcnt_context.h    |   46 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_gpu.c    |  760 +--
+ .../gpu/arm/midgard/mali_kbase_hwcnt_gpu.h    |  314 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_legacy.c |   11 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_legacy.h |    7 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_types.c  |  129 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_types.h  |  125 +-
+ .../midgard/mali_kbase_hwcnt_virtualizer.c    |   26 +-
+ .../midgard/mali_kbase_hwcnt_virtualizer.h    |   23 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_jd.c   |  331 +-
+ .../gpu/arm/midgard/mali_kbase_jd_debugfs.c   |   56 +-
+ .../gpu/arm/midgard/mali_kbase_jd_debugfs.h   |   12 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_jm.c   |   16 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_jm.h   |   12 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_js.c   |  422 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_js.h   |   12 +-
+ .../gpu/arm/midgard/mali_kbase_js_ctx_attr.c  |   39 +-
+ .../gpu/arm/midgard/mali_kbase_js_ctx_attr.h  |   47 +-
+ .../gpu/arm/midgard/mali_kbase_kinstr_jm.c    |  894 +++
+ .../gpu/arm/midgard/mali_kbase_kinstr_jm.h    |  275 +
+ .../gpu/arm/midgard/mali_kbase_linux.h        |   12 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_mem.c  | 1160 ++--
+ .../drivers/gpu/arm/midgard/mali_kbase_mem.h  |  518 +-
+ .../gpu/arm/midgard/mali_kbase_mem_linux.c    |  683 ++-
+ .../gpu/arm/midgard/mali_kbase_mem_linux.h    |   36 +-
+ .../gpu/arm/midgard/mali_kbase_mem_lowlevel.h |   13 +-
+ .../gpu/arm/midgard/mali_kbase_mem_pool.c     |   44 +-
+ .../arm/midgard/mali_kbase_mem_pool_debugfs.c |    7 +-
+ .../arm/midgard/mali_kbase_mem_pool_debugfs.h |    7 +-
+ .../arm/midgard/mali_kbase_mem_pool_group.c   |    7 +-
+ .../arm/midgard/mali_kbase_mem_pool_group.h   |    7 +-
+ .../midgard/mali_kbase_mem_profile_debugfs.c  |   26 +-
+ .../midgard/mali_kbase_mem_profile_debugfs.h  |   23 +-
+ .../mali_kbase_mem_profile_debugfs_buf_size.h |    8 +-
+ .../arm/midgard/mali_kbase_mipe_gen_header.h  |   40 +-
+ .../gpu/arm/midgard/mali_kbase_mipe_proto.h   |    7 +-
+ .../gpu/arm/midgard/mali_kbase_native_mgm.c   |    7 +-
+ .../gpu/arm/midgard/mali_kbase_native_mgm.h   |    9 +-
+ .../arm/midgard/mali_kbase_platform_fake.c    |   15 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_pm.c   |   55 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_pm.h   |   34 +-
+ .../midgard/mali_kbase_regs_history_debugfs.c |  138 +-
+ .../midgard/mali_kbase_regs_history_debugfs.h |   40 +-
+ .../gpu/arm/midgard/mali_kbase_reset_gpu.h    |  162 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_smc.c  |    9 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_smc.h  |   11 +-
+ .../gpu/arm/midgard/mali_kbase_softjobs.c     |  167 +-
+ .../gpu/arm/midgard/mali_kbase_strings.c      |   10 +-
+ .../gpu/arm/midgard/mali_kbase_strings.h      |    7 +-
+ .../drivers/gpu/arm/midgard/mali_kbase_sync.h |   28 +-
+ .../gpu/arm/midgard/mali_kbase_sync_android.c |   67 +-
+ .../gpu/arm/midgard/mali_kbase_sync_common.c  |   11 +-
+ .../gpu/arm/midgard/mali_kbase_sync_file.c    |   36 +-
+ .../arm/midgard/mali_kbase_trace_gpu_mem.c    |  221 +
+ .../arm/midgard/mali_kbase_trace_gpu_mem.h    |  100 +
+ .../gpu/arm/midgard/mali_kbase_utility.h      |   11 +-
+ .../gpu/arm/midgard/mali_kbase_vinstr.c       |  311 +-
+ .../gpu/arm/midgard/mali_kbase_vinstr.h       |    7 +-
+ .../gpu/arm/midgard/mali_linux_trace.h        |   47 +-
+ .../drivers/gpu/arm/midgard/mali_malisw.h     |   60 +-
+ ...gpu.h => mali_power_gpu_frequency_trace.c} |   21 +-
+ .../midgard/mali_power_gpu_frequency_trace.h  |   68 +
+ .../Kconfig => gpu/arm/midgard/mmu/Kbuild}    |   22 +-
+ .../midgard/mmu/backend/mali_kbase_mmu_csf.c  |  565 ++
+ .../midgard/mmu/backend/mali_kbase_mmu_jm.c   |   90 +-
+ .../gpu/arm/midgard/mmu/mali_kbase_mmu.c      |  220 +-
+ .../gpu/arm/midgard/mmu/mali_kbase_mmu.h      |   45 +-
+ .../gpu/arm/midgard/mmu/mali_kbase_mmu_hw.h   |   12 +-
+ .../midgard/mmu/mali_kbase_mmu_hw_direct.c    |   67 +-
+ .../arm/midgard/mmu/mali_kbase_mmu_internal.h |   49 +-
+ .../midgard/mmu/mali_kbase_mmu_mode_aarch64.c |   25 +-
+ .../midgard/mmu/mali_kbase_mmu_mode_lpae.c    |  215 -
+ .../drivers/gpu/arm/midgard/platform/Kconfig  |   10 +-
+ .../arm/midgard/platform/devicetree/Kbuild    |    7 +-
+ .../midgard/platform/devicetree/Kbuild.rej    |   17 +
+ .../devicetree/mali_kbase_clk_rate_trace.c    |  105 +
+ .../devicetree/mali_kbase_config_devicetree.c |   39 +-
+ .../devicetree/mali_kbase_config_platform.c   |   43 +
+ .../devicetree/mali_kbase_config_platform.h   |   54 +-
+ .../mali_kbase_config_platform.h.rej          |   42 +
+ .../devicetree/mali_kbase_runtime_pm.c        |   26 +-
+ .../gpu/arm/midgard/platform/vexpress/Kbuild  |   11 +-
+ .../vexpress/mali_kbase_config_platform.h     |    7 +-
+ .../vexpress/mali_kbase_config_vexpress.c     |   22 +-
+ .../midgard/platform/vexpress_1xv7_a57/Kbuild |   11 +-
+ .../mali_kbase_config_platform.h              |    7 +-
+ .../mali_kbase_config_vexpress.c              |   20 +-
+ .../platform/vexpress_6xvirtex7_10mhz/Kbuild  |   13 +-
+ .../mali_kbase_config_platform.h              |    7 +-
+ .../mali_kbase_config_vexpress.c              |   22 +-
+ .../gpu/arm/midgard/protected_mode_switcher.h |   31 +-
+ .../drivers/gpu/arm/midgard/tests/Kbuild      |   17 +-
+ .../drivers/gpu/arm/midgard/tests/Kconfig     |   46 +-
+ .../drivers/gpu/arm/midgard/tests/Mconfig     |   81 +-
+ .../drivers/gpu/arm/midgard/tests/build.bp    |   40 +
+ .../midgard/tests/include/kutf/kutf_helpers.h |   15 +-
+ .../tests/include/kutf/kutf_helpers_user.h    |   25 +-
+ .../arm/midgard/tests/include/kutf/kutf_mem.h |    7 +-
+ .../tests/include/kutf/kutf_resultset.h       |    7 +-
+ .../midgard/tests/include/kutf/kutf_suite.h   |   29 +-
+ .../midgard/tests/include/kutf/kutf_utils.h   |    7 +-
+ .../drivers/gpu/arm/midgard/tests/kutf/Kbuild |   21 +-
+ .../gpu/arm/midgard/tests/kutf/build.bp       |   24 +-
+ .../gpu/arm/midgard/tests/kutf/kutf_helpers.c |   13 +-
+ .../midgard/tests/kutf/kutf_helpers_user.c    |   28 +-
+ .../gpu/arm/midgard/tests/kutf/kutf_mem.c     |    7 +-
+ .../arm/midgard/tests/kutf/kutf_resultset.c   |    7 +-
+ .../gpu/arm/midgard/tests/kutf/kutf_suite.c   |   23 +-
+ .../gpu/arm/midgard/tests/kutf/kutf_utils.c   |    7 +-
+ .../mali_kutf_clk_rate_trace/kernel/Kbuild    |   25 +
+ .../mali_kutf_clk_rate_trace/kernel/build.bp  |   43 +
+ .../kernel/mali_kutf_clk_rate_trace_test.c    |  957 ++++
+ .../mali_kutf_clk_rate_trace_test.h           |  151 +
+ .../midgard/tests/mali_kutf_irq_test/Kbuild   |   13 +-
+ .../midgard/tests/mali_kutf_irq_test/Kconfig  |   29 -
+ .../midgard/tests/mali_kutf_irq_test/Makefile |   51 -
+ .../midgard/tests/mali_kutf_irq_test/build.bp |   21 +-
+ .../mali_kutf_irq_test_main.c                 |   15 +-
+ .../arm/midgard/thirdparty}/Kbuild            |    9 +-
+ .../arm/midgard/thirdparty/mali_kbase_mmap.c  |   77 +-
+ .../kernel/drivers/gpu/arm/midgard/tl/Kbuild  |   32 +
+ .../tl/backend/mali_kbase_timeline_csf.c      |  171 +
+ .../tl/backend/mali_kbase_timeline_jm.c       |   23 +-
+ .../gpu/arm/midgard/tl/mali_kbase_timeline.c  |  142 +-
+ .../gpu/arm/midgard/tl/mali_kbase_timeline.h  |   57 +-
+ .../arm/midgard/tl/mali_kbase_timeline_io.c   |  177 +-
+ .../arm/midgard/tl/mali_kbase_timeline_priv.h |   26 +-
+ .../arm/midgard/tl/mali_kbase_tl_serialize.h  |    7 +-
+ .../gpu/arm/midgard/tl/mali_kbase_tlstream.c  |   43 +-
+ .../gpu/arm/midgard/tl/mali_kbase_tlstream.h  |   22 +-
+ .../arm/midgard/tl/mali_kbase_tracepoints.c   |  585 +-
+ .../arm/midgard/tl/mali_kbase_tracepoints.h   | 1159 +++-
+ .../include/linux/dma-buf-test-exporter.h     |    8 +-
+ .../include/linux/memory_group_manager.h      |    7 +-
+ .../include/linux/priority_control_manager.h  |   77 +
+ .../linux/protected_memory_allocator.h        |    7 +-
+ .../include/linux/protected_mode_switcher.h   |    7 +-
+ .../arm/midgard/csf/mali_base_csf_kernel.h    |  765 +++
+ .../csf/mali_gpu_csf_control_registers.h      |   32 +
+ .../arm/midgard/csf/mali_gpu_csf_registers.h  | 1488 +++++
+ .../arm/midgard/csf/mali_kbase_csf_ioctl.h    |  433 ++
+ .../gpu/backend/mali_kbase_gpu_regmap_csf.h   |  335 ++
+ .../gpu/backend/mali_kbase_gpu_regmap_jm.h    |   47 +-
+ .../midgard/gpu/mali_kbase_gpu_coherency.h    |   13 +-
+ .../gpu/arm/midgard/gpu/mali_kbase_gpu_id.h   |   64 +-
+ .../arm/midgard/gpu/mali_kbase_gpu_regmap.h   |  434 ++
+ .../gpu/arm/midgard/jm/mali_base_jm_kernel.h  |  285 +-
+ .../gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h  |  107 +-
+ .../uapi}/gpu/arm/midgard/mali_base_kernel.h  |  461 +-
+ .../gpu/arm/midgard/mali_base_mem_priv.h      |   23 +-
+ .../gpu/arm/midgard/mali_kbase_hwcnt_reader.h |   76 +-
+ .../uapi}/gpu/arm/midgard/mali_kbase_ioctl.h  |  212 +-
+ .../arm/midgard/mali_kbase_kinstr_jm_reader.h |   69 +
+ .../uapi}/gpu/arm/midgard/mali_uk.h           |   46 +-
+ 407 files changed, 58537 insertions(+), 10409 deletions(-)
+ create mode 100644 dvalin/kernel/Documentation/ABI/testing/sysfs-device-mali
+ create mode 100644 dvalin/kernel/Documentation/devicetree/bindings/arm/priority_control_manager.txt
+ create mode 100644 dvalin/kernel/drivers/base/arm/Kbuild
+ create mode 100644 dvalin/kernel/drivers/base/arm/Kconfig
+ create mode 100644 dvalin/kernel/drivers/base/arm/Makefile
+ create mode 100644 dvalin/kernel/drivers/base/arm/Mconfig
+ rename dvalin/kernel/drivers/base/{ => arm}/dma_buf_lock/src/Kbuild (78%)
+ rename dvalin/kernel/drivers/base/{ => arm}/dma_buf_lock/src/Makefile (71%)
+ rename dvalin/kernel/drivers/base/{ => arm}/dma_buf_lock/src/dma_buf_lock.c (90%)
+ rename dvalin/kernel/drivers/base/{ => arm}/dma_buf_lock/src/dma_buf_lock.h (88%)
+ create mode 100644 dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/Kbuild
+ create mode 100644 dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/build.bp
+ rename dvalin/kernel/drivers/base/{ => arm}/dma_buf_test_exporter/dma-buf-test-exporter.c (89%)
+ rename dvalin/kernel/drivers/base/{dma_buf_test_exporter => arm/memory_group_manager}/Kbuild (74%)
+ create mode 100644 dvalin/kernel/drivers/base/arm/memory_group_manager/build.bp
+ rename dvalin/kernel/drivers/base/{ => arm}/memory_group_manager/memory_group_manager.c (98%)
+ create mode 100644 dvalin/kernel/drivers/base/arm/protected_memory_allocator/Kbuild
+ create mode 100644 dvalin/kernel/drivers/base/arm/protected_memory_allocator/build.bp
+ create mode 100644 dvalin/kernel/drivers/base/arm/protected_memory_allocator/protected_memory_allocator.c
+ delete mode 100644 dvalin/kernel/drivers/base/dma_buf_test_exporter/Kconfig
+ delete mode 100644 dvalin/kernel/drivers/base/dma_buf_test_exporter/Makefile
+ delete mode 100644 dvalin/kernel/drivers/base/dma_buf_test_exporter/build.bp
+ delete mode 100644 dvalin/kernel/drivers/base/memory_group_manager/Makefile
+ delete mode 100644 dvalin/kernel/drivers/base/memory_group_manager/build.bp
+ delete mode 100644 dvalin/kernel/drivers/base/protected_memory_allocator/Makefile
+ delete mode 100644 dvalin/kernel/drivers/base/protected_memory_allocator/build.bp
+ delete mode 100644 dvalin/kernel/drivers/base/protected_memory_allocator/protected_memory_allocator.c
+ rename dvalin/kernel/drivers/{base/protected_memory_allocator/Kbuild => gpu/arm/Makefile} (77%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.h
+ delete mode 100755 dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
+ delete mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_mcu_states.h
+ rename dvalin/kernel/drivers/{base/memory_group_manager/Kconfig => gpu/arm/midgard/context/Kbuild} (63%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/context/backend/mali_kbase_context_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/Kbuild
+ rename dvalin/kernel/drivers/gpu/arm/midgard/{Makefile.kbase => csf/ipa_control/Kbuild} (75%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/ipa_control/mali_kbase_csf_ipa_control.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/ipa_control/mali_kbase_csf_ipa_control.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_cpu_queue_debugfs.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_cpu_queue_debugfs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_csg_debugfs.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_csg_debugfs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_defs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_firmware.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_firmware.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_firmware_cfg.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_firmware_cfg.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_firmware_no_mali.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_heap_context_alloc.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_heap_context_alloc.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_kcpu.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_kcpu.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_kcpu_debugfs.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_kcpu_debugfs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_protected_memory.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_protected_memory.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_reset_gpu.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_scheduler.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_scheduler.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tiler_heap.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tiler_heap.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tiler_heap_debugfs.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tiler_heap_debugfs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tiler_heap_def.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_timeout.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_timeout.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tl_reader.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_tl_reader.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_trace_buffer.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/csf/mali_kbase_csf_trace_buffer.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/Kbuild
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/backend/mali_kbase_debug_ktrace_codes_csf.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/backend/mali_kbase_debug_ktrace_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/backend/mali_kbase_debug_ktrace_csf.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/backend/mali_kbase_debug_ktrace_defs_csf.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/debug/backend/mali_kbase_debug_linux_ktrace_csf.h
+ rename dvalin/kernel/drivers/gpu/arm/midgard/{tests/kutf/Makefile => device/Kbuild} (56%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/device/backend/mali_kbase_device_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/device/backend/mali_kbase_device_hw_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/device/backend/mali_kbase_device_hw_jm.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/device/mali_kbase_device_hw.c
+ rename dvalin/kernel/drivers/gpu/arm/midgard/{tests/kutf/Kconfig => gpu/Kbuild} (65%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/gpu/backend/mali_kbase_gpu_fault_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/ipa/backend/mali_kbase_ipa_counter_common_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/ipa/backend/mali_kbase_ipa_counter_common_csf.h
+ rename dvalin/kernel/drivers/gpu/arm/midgard/ipa/{mali_kbase_ipa_vinstr_common.c => backend/mali_kbase_ipa_counter_common_jm.c} (95%)
+ rename dvalin/kernel/drivers/gpu/arm/midgard/ipa/{mali_kbase_ipa_vinstr_common.h => backend/mali_kbase_ipa_counter_common_jm.h} (85%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/ipa/backend/mali_kbase_ipa_counter_csf.c
+ rename dvalin/kernel/drivers/gpu/arm/midgard/ipa/{mali_kbase_ipa_vinstr_g7x.c => backend/mali_kbase_ipa_counter_jm.c} (83%)
+ mode change 100644 => 100755 dvalin/kernel/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_caps.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_ccswe.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_ccswe.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_dvfs_debugfs.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_dvfs_debugfs.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_fence_ops.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_csf.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_csf.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_csf_if.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_csf_if_fw.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_csf_if_fw.h
+ delete mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_gpu.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_hwcnt_backend_jm.c
+ rename dvalin/kernel/drivers/gpu/arm/midgard/{mali_kbase_hwcnt_backend_gpu.h => mali_kbase_hwcnt_backend_jm.h} (75%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_kinstr_jm.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_kinstr_jm.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_gpu_mem.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_kbase_trace_gpu_mem.h
+ rename dvalin/kernel/drivers/gpu/arm/midgard/{gpu/mali_kbase_gpu.h => mali_power_gpu_frequency_trace.c} (67%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mali_power_gpu_frequency_trace.h
+ rename dvalin/kernel/drivers/{base/protected_memory_allocator/Kconfig => gpu/arm/midgard/mmu/Kbuild} (61%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mmu/backend/mali_kbase_mmu_csf.c
+ delete mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/mmu/mali_kbase_mmu_mode_lpae.c
+ mode change 100644 => 100755 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/Kbuild
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/Kbuild.rej
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_clk_rate_trace.c
+ mode change 100644 => 100755 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_devicetree.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.c
+ mode change 100644 => 100755 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_config_platform.h.rej
+ mode change 100644 => 100755 dvalin/kernel/drivers/gpu/arm/midgard/platform/devicetree/mali_kbase_runtime_pm.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/build.bp
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_clk_rate_trace/kernel/Kbuild
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_clk_rate_trace/kernel/build.bp
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_clk_rate_trace/kernel/mali_kutf_clk_rate_trace_test.c
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_clk_rate_trace/mali_kutf_clk_rate_trace_test.h
+ delete mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Kconfig
+ delete mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
+ rename dvalin/kernel/drivers/{base/memory_group_manager => gpu/arm/midgard/thirdparty}/Kbuild (82%)
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tl/Kbuild
+ create mode 100644 dvalin/kernel/drivers/gpu/arm/midgard/tl/backend/mali_kbase_timeline_csf.c
+ create mode 100644 dvalin/kernel/include/linux/priority_control_manager.h
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/csf/mali_base_csf_kernel.h
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_control_registers.h
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/csf/mali_gpu_csf_registers.h
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/csf/mali_kbase_csf_ioctl.h
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/gpu/backend/mali_kbase_gpu_regmap_csf.h
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/gpu/backend/mali_kbase_gpu_regmap_jm.h (89%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/gpu/mali_kbase_gpu_coherency.h (75%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/gpu/mali_kbase_gpu_id.h (74%)
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/gpu/mali_kbase_gpu_regmap.h
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/jm/mali_base_jm_kernel.h (75%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/jm/mali_kbase_jm_ioctl.h (57%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/mali_base_kernel.h (72%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/mali_base_mem_priv.h (80%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/mali_kbase_hwcnt_reader.h (52%)
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/mali_kbase_ioctl.h (83%)
+ create mode 100644 dvalin/kernel/include/uapi/gpu/arm/midgard/mali_kbase_kinstr_jm_reader.h
+ rename dvalin/kernel/{drivers => include/uapi}/gpu/arm/midgard/mali_uk.h (69%)
+
+diff --git a/dvalin/kernel/Documentation/ABI/testing/sysfs-device-mali b/dvalin/kernel/Documentation/ABI/testing/sysfs-device-mali
+new file mode 100644
+index 0000000..99f8ae5
+--- /dev/null
++++ b/dvalin/kernel/Documentation/ABI/testing/sysfs-device-mali
+@@ -0,0 +1,293 @@
++/*
++ *
++ * (C) COPYRIGHT 2020 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation) and any use by you of this program is subject to the terms
++ * of such GNU licence.
++ *
++ * A copy of the licence is included with the program) and can also be obtained
++ * from Free Software Foundation) Inc.) 51 Franklin Street) Fifth Floor)
++ * Boston) MA  02110-1301) USA.
++ *
++ */
++
++What:		/sys/class/misc/mali%u/device/core_mask
++Description:
++		This attribute is used to restrict the number of shader cores
++		available in this instance, is useful for debugging purposes.
++		Reading this attribute provides us mask of all cores available.
++		Writing to it will set the current core mask. Doesn't
++		allow disabling all the cores present in this instance.
++
++What:		/sys/class/misc/mali%u/device/debug_command
++Description:
++		This attribute is used to issue debug commands that supported
++		by the driver. On reading it provides the list of debug commands
++		that are supported, and writing back one of those commands will
++		enable that debug option.
++
++What:		/sys/class/misc/mali%u/device/dvfs_period
++Description:
++		This is used to set the DVFS sampling period to be used by the
++		driver, On reading it provides the current DVFS sampling period,
++		on writing a value we set the DVFS sampling period.
++
++What:		/sys/class/misc/mali%u/device/dummy_job_wa_info
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU that requires a GPU workaround
++		to execute the dummy fragment job on all shader cores to
++		workaround a hang issue.
++
++		Its a readonly attribute and on reading gives details on the
++		options used with the dummy workaround.
++
++What:		/sys/class/misc/mali%u/device/fw_timeout
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. This attribute is
++		used to set the duration value in milliseconds for the
++		waiting timeout used for a GPU status change request being
++		acknowledged by the FW.
++
++What:		/sys/class/misc/mali%u/device/gpuinfo
++Description:
++		This attribute provides description of the present Mali GPU.
++		Its a read only attribute provides details like GPU family, the
++		number of cores, the hardware version and the raw product id.
++
++What:		/sys/class/misc/mali%u/device/idle_hysteresis_time
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. This attribute is
++		used to set the duration value in milliseconds for the
++		configuring hysteresis field for determining GPU idle detection.
++
++What:		/sys/class/misc/mali%u/device/js_ctx_scheduling_mode
++Description:
++		This attribute is available only with platform device that
++		supports a Job Manager based GPU. This attribute is used to set
++		context scheduling priority for a job slot.
++
++		On Reading it provides the currently set job slot context
++		priority.
++
++		Writing 0 to this attribute sets it to the mode were
++		higher priority atoms will be scheduled first, regardless of
++		the context they belong to. Newly-runnable higher priority atoms
++		can preempt lower priority atoms currently running on the GPU,
++		even if they belong to a different context.
++
++		Writing 1 to this attribute set it to the mode were the
++		highest-priority atom will be chosen from each context in turn
++		using a round-robin algorithm, so priority only has an effect
++		within the context an atom belongs to. Newly-runnable higher
++		priority atoms can preempt the lower priority atoms currently
++		running on the GPU, but only if they belong to the same context.
++
++What:		/sys/class/misc/mali%u/device/js_scheduling_period
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU. Used to set the job scheduler
++		tick period in nano-seconds. The Job Scheduler determines the
++		jobs that are run on the GPU, and for how long, Job Scheduler
++		makes decisions at a regular time interval determined by value
++		in js_scheduling_period.
++
++What:		/sys/class/misc/mali%u/device/js_softstop_always
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU. Soft-stops are disabled when
++		only a single context is present, this attribute is used to
++		enable soft-stop when only a single context is present can be
++		used for debug and unit-testing purposes.
++
++What:		/sys/class/misc/mali%u/device/js_timeouts
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU. It used to set the soft stop
++		and hard stop times for the job scheduler.
++
++		Writing value 0 causes no change, or -1 to restore the
++		default timeout.
++
++		The format used to set js_timeouts is
++		"<soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss>
++		<hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss>
++		<reset_ms_cl> <reset_ms_dumping>"
++
++
++What:		/sys/class/misc/mali%u/device/lp_mem_pool_max_size
++Description:
++		This attribute is used to set the maximum number of large pages
++		memory pools that the driver can contain. Large pages are of
++		size 2MB. On read it displays all the max size of all memory
++		pools and can be used to modify each individual pools as well.
++
++What:		/sys/class/misc/mali%u/device/lp_mem_pool_size
++Description:
++		This attribute is used to set the number of large memory pages
++		which should be	populated, changing this value may cause
++		existing pages to be removed from the pool, or new pages to be
++		created and then added to the pool. On read it will provide
++		pool size for all available pools and we can modify individual
++		pool.
++
++What:		/sys/class/misc/mali%u/device/mem_pool_max_size
++Description:
++		This attribute is used to set the maximum number of small pages
++		for memory pools that the driver can contain. Here small pages
++		are of size 4KB. On read it will display the max size for all
++		available pools and allows us to set max size of
++		individual pools.
++
++What:		/sys/class/misc/mali%u/device/mem_pool_size
++Description:
++		This attribute is used to set the number of small memory pages
++		which should be populated, changing this value may cause
++		existing pages to be removed from the pool, or new pages to
++		be created and then added to the pool. On read it will provide
++		pool size for all available pools and we can modify individual
++		pool.
++
++What:		/sys/class/misc/mali%u/device/device/mempool/ctx_default_max_size
++Description:
++		This attribute is used to set maximum memory pool size for
++		all the memory pool so that the maximum amount of free memory
++		that each pool can hold is identical.
++
++What:		/sys/class/misc/mali%u/device/device/mempool/lp_max_size
++Description:
++		This attribute is used to set the maximum number of large pages
++		for all memory pools that the driver can contain.
++		Large pages are of size 2MB.
++
++What:		/sys/class/misc/mali%u/device/device/mempool/max_size
++Description:
++		This attribute is used to set the maximum number of small pages
++		for all the memory pools that the driver can contain.
++		Here small pages are of size 4KB.
++
++What:		/sys/class/misc/mali%u/device/pm_poweroff
++Description:
++		This attribute contains the current values, represented as the
++		following space-separated integers:
++		• PM_GPU_POWEROFF_TICK_NS.
++		• PM_POWEROFF_TICK_SHADER.
++		• PM_POWEROFF_TICK_GPU.
++
++		Example:
++		echo 100000 4 4 > /sys/class/misc/mali0/device/pm_poweroff
++
++		Sets the following new values: 100,000ns tick, four ticks
++		for shader power down, and four ticks for GPU power down.
++
++What:		/sys/class/misc/mali%u/device/power_policy
++Description:
++		This attribute is used to find the current power policy been
++		used, reading will list the power policies available and
++		enclosed in square bracket is the current one been selected.
++
++		Example:
++		cat /sys/class/misc/mali0/device/power_policy
++		[demand] coarse_demand always_on
++
++		To switch to a different policy at runtime write the valid entry
++		name back to the attribute.
++
++		Example:
++		echo "coarse_demand" > /sys/class/misc/mali0/device/power_policy
++
++What:		/sys/class/misc/mali%u/device/progress_timeout
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. This attribute
++		is used to set the progress timeout value and read the current
++		progress timeout value.
++
++		Progress timeout value is the maximum number of GPU cycles
++		without forward progress to allow to elapse before terminating a
++		GPU command queue group.
++
++What:		/sys/class/misc/mali%u/device/reset_timeout
++Description:
++		This attribute is used to set the number of milliseconds to
++		wait for the soft stop to complete for the GPU jobs before
++		proceeding with the GPU reset.
++
++What:		/sys/class/misc/mali%u/device/soft_job_timeout
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU. It used to set the timeout
++		value for waiting for any soft event to complete.
++
++What:		/sys/class/misc/mali%u/device/scheduling/serialize_jobs
++Description:
++		This attribute is available only with platform device that
++                supports a Job Manager based GPU.
++
++		Various options available under this are:
++		• none - for disabling serialization.
++		• intra-slot - Serialize atoms within a slot, only one
++				atom per job slot.
++		• inter-slot - Serialize atoms between slots, only one
++				job slot running at any time.
++		• full - it a combination of both inter and intra slot,
++				so only one atom and one job slot running
++				at any time.
++		• full-reset - full serialization and Reset the GPU after
++				each atom completion
++
++		These options are useful for debugging and investigating
++		failures and gpu hangs to narrow down atoms that could cause
++		troubles.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/Compute iterator count/*
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. Its a read-only attribute
++		which indicates the maximum number of Compute iterators
++		supported by the GPU.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/CSHWIF count/*
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. Its a read-only
++		attribute which indicates the maximum number of	CSHWIFs
++		supported by the GPU.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/Fragment iterator count/*
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. Its a read-only
++		attribute which indicates the maximum number of
++		Fragment iterators supported by the GPU.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/Scoreboard set count/*
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. Its a read-only
++		attribute which indicates the maximum number of
++		Scoreboard set supported by the GPU.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/Tiler iterator count/*
++Description:
++		This attribute is available only with mali platform
++		device-driver that supports a CSF GPU. Its a read-only
++		attribute which indicates the maximum number of	Tiler iterators
++		supported by the GPU.
++
++What:		/sys/class/misc/mali%u/device/firmware_config/Log verbosity/*
++Description:
++		This attribute is available only with mali platform
++                device-driver that supports a CSF GPU.
++
++		Used to enable firmware logs, logging levels valid values
++		are indicated using 'min and 'max' attribute values
++		values that are read-only.
++
++		Log level can be set using the 'cur' read, write attribute,
++		we can use a valid log level value from min and max range values
++		and set a valid desired log level for firmware logs.
+diff --git a/dvalin/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt b/dvalin/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt
+index dd8f733..a74d569 100644
+--- a/dvalin/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt
++++ b/dvalin/kernel/Documentation/devicetree/bindings/arm/mali-midgard.txt
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2013-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2013-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ * ARM Mali Midgard / Bifrost devices
+@@ -46,12 +45,12 @@ Documentation/devicetree/bindings/regulator/regulator.txt for details.
+                        This is optional.
+ - operating-points-v2 : Refer to Documentation/devicetree/bindings/power/mali-opp.txt
+ for details.
+-- quirks_jm : Used to write to the JM_CONFIG register or equivalent.
++- quirks_gpu : Used to write to the JM_CONFIG or CSF_CONFIG register.
+ 	  Should be used with care. Options passed here are used to override
+ 	  certain default behavior. Note: This will override 'idvs-group-size'
+ 	  field in devicetree and module param 'corestack_driver_control',
+-	  therefore if 'quirks_jm' is used then 'idvs-group-size' and
+-	  'corestack_driver_control' value should be incorporated into 'quirks_jm'.
++	  therefore if 'quirks_gpu' is used then 'idvs-group-size' and
++	  'corestack_driver_control' value should be incorporated into 'quirks_gpu'.
+ - quirks_sc : Used to write to the SHADER_CONFIG register.
+ 	  Should be used with care. Options passed here are used to override
+ 	  certain default behavior.
+@@ -64,8 +63,8 @@ for details.
+ - power_model : Sets the power model parameters. Defined power models include:
+ 	  "mali-simple-power-model", "mali-g51-power-model", "mali-g52-power-model",
+ 	  "mali-g52_r1-power-model", "mali-g71-power-model", "mali-g72-power-model",
+-	  "mali-g76-power-model", "mali-g77-power-model", "mali-tnax-power-model"
+-	  and "mali-tbex-power-model".
++	  "mali-g76-power-model", "mali-g77-power-model", "mali-tnax-power-model",
++	  "mali-tbex-power-model" and "mali-tbax-power-model".
+ 	- mali-simple-power-model: this model derives the GPU power usage based
+ 	  on the GPU voltage scaled by the system temperature. Note: it was
+ 	  designed for the Juno platform, and may not be suitable for others.
+@@ -98,6 +97,8 @@ for details.
+ 	  are used at different points so care should be taken to configure
+ 	  both power models in the device tree (specifically dynamic-coefficient,
+ 	  static-coefficient and scale) to best match the platform.
++- power_policy : Sets the GPU power policy at probe time. Available options are
++                 "coarse_demand" and "always_on". If not set, then "coarse_demand" is used.
+ - system-coherency : Sets the coherency protocol to be used for coherent
+ 		     accesses made from the GPU.
+ 		     If not set then no coherency is used.
+diff --git a/dvalin/kernel/Documentation/devicetree/bindings/arm/memory_group_manager.txt b/dvalin/kernel/Documentation/devicetree/bindings/arm/memory_group_manager.txt
+index fda8f00..634973f 100644
+--- a/dvalin/kernel/Documentation/devicetree/bindings/arm/memory_group_manager.txt
++++ b/dvalin/kernel/Documentation/devicetree/bindings/arm/memory_group_manager.txt
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ * Arm memory group manager for Mali GPU device drivers
+diff --git a/dvalin/kernel/Documentation/devicetree/bindings/arm/priority_control_manager.txt b/dvalin/kernel/Documentation/devicetree/bindings/arm/priority_control_manager.txt
+new file mode 100644
+index 0000000..c7dd14f
+--- /dev/null
++++ b/dvalin/kernel/Documentation/devicetree/bindings/arm/priority_control_manager.txt
+@@ -0,0 +1,48 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++* Arm priority control manager for Mali GPU device drivers
++
++Required properties:
++
++- compatible: Must be "arm,priority-control-manager"
++
++An example node:
++
++        gpu_priority_control_manager: priority-control-manager {
++                compatible = "arm,priority-control-manager";
++        };
++
++It must be referenced by the GPU as well, see priority-control-manager:
++
++	gpu: gpu@0x6e000000 {
++		compatible = "arm,mali-midgard";
++		reg = <0x0 0x6e000000 0x0 0x200000>;
++		interrupts = <0 168 4>, <0 168 4>, <0 168 4>;
++		interrupt-names = "JOB", "MMU", "GPU";
++		clocks = <&scpi_dvfs 2>;
++		clock-names = "clk_mali";
++		system-coherency = <31>;
++		priority-control-manager = <&gpu_priority_control_manager>;
++		operating-points = <
++			/* KHz uV */
++			50000 820000
++		>;
++	};
+diff --git a/dvalin/kernel/Documentation/devicetree/bindings/arm/protected_memory_allocator.txt b/dvalin/kernel/Documentation/devicetree/bindings/arm/protected_memory_allocator.txt
+index f054348..89a3cc7 100644
+--- a/dvalin/kernel/Documentation/devicetree/bindings/arm/protected_memory_allocator.txt
++++ b/dvalin/kernel/Documentation/devicetree/bindings/arm/protected_memory_allocator.txt
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ * Arm protected memory allocator for Mali GPU device drivers
+diff --git a/dvalin/kernel/Documentation/devicetree/bindings/power/mali-opp.txt b/dvalin/kernel/Documentation/devicetree/bindings/power/mali-opp.txt
+index 49ed773..b9c0743 100644
+--- a/dvalin/kernel/Documentation/devicetree/bindings/power/mali-opp.txt
++++ b/dvalin/kernel/Documentation/devicetree/bindings/power/mali-opp.txt
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2017, 2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2017, 2019-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ * ARM Mali Midgard OPP
+@@ -54,7 +53,7 @@ Optional properties:
+ 
+ - opp-core-count: Number of cores to use for this OPP. If this is present then
+   the driver will build a core mask using the available core mask provided by
+-  the GPU hardware.
++  the GPU hardware. An opp-core-count value of 0 is not permitted.
+ 
+   If neither this nor opp-core-mask are present then all shader cores will be
+   used for this OPP.
+diff --git a/dvalin/kernel/Documentation/dma-buf-test-exporter.txt b/dvalin/kernel/Documentation/dma-buf-test-exporter.txt
+index 8d8cbc9..b01020c 100644
+--- a/dvalin/kernel/Documentation/dma-buf-test-exporter.txt
++++ b/dvalin/kernel/Documentation/dma-buf-test-exporter.txt
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012-2013, 2020-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,10 +16,7 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
+-#
+-
+ 
+ =====================
+ dma-buf-test-exporter
+@@ -42,5 +40,3 @@ It supports being compiled as a module both in-tree and out-of-tree.
+ 
+ See include/linux/dma-buf-test-exporter.h for the ioctl interface.
+ See Documentation/dma-buf-sharing.txt for details on dma_buf.
+-
+-
+diff --git a/dvalin/kernel/Mconfig b/dvalin/kernel/Mconfig
+index e451591..217715c 100644
+--- a/dvalin/kernel/Mconfig
++++ b/dvalin/kernel/Mconfig
+@@ -1,27 +1,26 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-#
+-# (C) COPYRIGHT 2017-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2017-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
+ #
+-# A copy of the licence is included with the program, and can also be obtained
+-# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+-# Boston, MA 02110-1301, USA.
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+ #
+ 
+-source "kernel/drivers/gpu/arm/midgard/Mconfig"
+-source "kernel/drivers/gpu/arm/midgard/arbitration/Mconfig"
++menu "Kernel menu"
+ 
+-config DMA_BUF_SYNC_IOCTL_SUPPORTED
+-	bool "Kernel DMA buffers support DMA_BUF_IOCTL_SYNC"
+-	depends on BACKEND_KERNEL
+-	default y
++source "kernel/drivers/base/arm/Mconfig"
++source "kernel/drivers/gpu/arm/midgard/Mconfig"
+ 
+-config BUILD_CSF_ONLY_MODULE
+-	bool "Build CSF GPU specific kernel modules"
+-	depends on BUILD_KERNEL_MODULES && GPU_HAS_CSF
+-	default y
++endmenu
+diff --git a/dvalin/kernel/build.bp b/dvalin/kernel/build.bp
+index 2bc725f..c97da2c 100644
+--- a/dvalin/kernel/build.bp
++++ b/dvalin/kernel/build.bp
+@@ -1,15 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2016-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2016-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+- * A copy of the licence is included with the program, and can also be obtained
+- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+- * Boston, MA 02110-1301, USA.
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+  */
+ 
+@@ -25,6 +31,7 @@ bob_install_group {
+ 
+ bob_defaults {
+     name: "kernel_defaults",
++    build_by_default: false,
+     enabled: false,
+     exclude_srcs: [
+         "**/*.mod.c",
+@@ -33,6 +40,7 @@ bob_defaults {
+         "include",
+     ],
+     build_kernel_modules: {
++        build_by_default: true,
+         enabled: true,
+         kernel_dir: "{{.kernel_dir}}",
+         kernel_cross_compile: "{{.kernel_compiler}}",
+@@ -42,6 +50,8 @@ bob_defaults {
+         kernel_ld: "{{.kernel_ld}}",
+     },
+     install_group: "IG_kernel_modules",
++    add_to_alias: ["kernel"],
++    owner: "{{.android_module_owner}}",
+     cflags: [
+         "-Wall",
+     ],
+@@ -54,49 +64,12 @@ bob_defaults {
+             "optional",
+         ],
+     },
+-    kbuild_options: [
+-        // Start of CS experimental features definitions.
+-        // If there is nothing below, definition should be added as follows:
+-        // "MALI_EXPERIMENTAL_FEATURE={{.experimental_feature}}"
+-        // experimental_feature above comes from Mconfig in
+-        // <ddk_root>/product/base/
+-        // However, in Mconfig, experimental_feature should be looked up (for
+-        // similar explanation to this one) as ALLCAPS, i.e.
+-        // EXPERIMENTAL_FEATURE.
+-        //
+-        // IMPORTANT: MALI_CS_EXPERIMENTAL should NEVER be defined below as it
+-        // is an umbrella feature that would be open for inappropriate use
+-        // (catch-all for experimental CS code without separating it into
+-        // different features).
+-        "MALI_JIT_PRESSURE_LIMIT={{.jit_pressure_limit}}",
+-        "MALI_INCREMENTAL_RENDERING={{.incremental_rendering}}",
+-    ],
+-}
+-
+-bob_defaults {
+-    name: "kutf_includes",
+-    local_include_dirs: [
+-        "drivers/gpu/arm/midgard/tests/include",
+-    ],
+-}
+-
+-bob_defaults {
+-    name: "kernel_test_includes",
+-    defaults: ["kutf_includes"],
+-    local_include_dirs: [
+-        "drivers/gpu/arm",
+-        "drivers/gpu/arm/midgard",
+-        "drivers/gpu/arm/midgard/backend/gpu",
+-        "drivers/gpu/arm/midgard/debug",
+-        "drivers/gpu/arm/midgard/debug/backend",
+-    ],
+ }
+ 
++// Alias for all kernel modules. `kernel_defaults` uses `add_to_alias` to
++// ensure any modules using that are included in this alias; any
++// bob_kernel_modules not using those defaults must explicitly use
++// `add_to_alias` or be listed here.
+ bob_alias {
+     name: "kernel",
+-    srcs: [
+-        "dma-buf-test-exporter",
+-        "memory_group_manager",
+-        "mali_kbase",
+-    ],
+ }
+diff --git a/dvalin/kernel/drivers/base/arm/Kbuild b/dvalin/kernel/drivers/base/arm/Kbuild
+new file mode 100644
+index 0000000..b0fbf93
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/Kbuild
+@@ -0,0 +1,34 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++#
++# ccflags
++#
++ccflags-y += -I$(src)/../../../include
++
++subdir-ccflags-y += $(ccflags-y)
++
++#
++# Kernel modules
++#
++obj-$(CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER) += dma_buf_test_exporter/
++obj-$(CONFIG_MALI_MEMORY_GROUP_MANAGER) += memory_group_manager/
++obj-$(CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR) += protected_memory_allocator/
++
+diff --git a/dvalin/kernel/drivers/base/arm/Kconfig b/dvalin/kernel/drivers/base/arm/Kconfig
+new file mode 100644
+index 0000000..75d5434
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/Kconfig
+@@ -0,0 +1,64 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++menuconfig MALI_BASE_MODULES
++	bool "Mali Base extra modules"
++	default n
++	help
++	  Enable this option to build support for a Arm Mali base modules.
++	  Those modules provide extra features or debug interfaces and,
++	  are optional for the use of the Mali GPU modules.
++
++config DMA_SHARED_BUFFER_TEST_EXPORTER
++	bool "Build dma-buf framework test exporter module"
++	depends on MALI_BASE_MODULES && DMA_SHARED_BUFFER
++	default y
++	help
++	  This option will build the dma-buf framework test exporter module.
++	  Usable to help test importers.
++
++	  Modules:
++	    - dma-buf-test-exporter.ko
++
++config MALI_MEMORY_GROUP_MANAGER
++	bool "Build Mali Memory Group Manager module"
++	depends on MALI_BASE_MODULES
++	default y
++	help
++	  This option will build the memory group manager module.
++	  This is an example implementation for allocation and release of pages
++	  for memory pools managed by Mali GPU device drivers.
++
++	  Modules:
++	    - memory_group_manager.ko
++
++config MALI_PROTECTED_MEMORY_ALLOCATOR
++	bool "Build Mali Protected Memory Allocator module"
++	depends on MALI_BASE_MODULES && MALI_CSF_SUPPORT
++	default y
++	help
++	  This option will build the protected memory allocator module.
++	  This is an example implementation for allocation and release of pages
++	  of secure memory intended to be used by the firmware
++	  of Mali GPU device drivers.
++
++	  Modules:
++	    - protected_memory_allocator.ko
++
+diff --git a/dvalin/kernel/drivers/base/arm/Makefile b/dvalin/kernel/drivers/base/arm/Makefile
+new file mode 100644
+index 0000000..0bd6ab5
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/Makefile
+@@ -0,0 +1,98 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++#
++# Paths
++#
++KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
++KDIR ?= $(KERNEL_SRC)
++
++ifeq ($(KDIR),)
++    $(error Must specify KDIR to point to the kernel to target))
++endif
++
++vars :=
++#
++# Default configuration values
++#
++CONFIG_MALI_BASE_MODULES ?= n
++
++ifeq ($(CONFIG_MALI_BASE_MODULES),y)
++    CONFIG_MALI_CSF_SUPPORT ?= n
++
++    ifneq ($(CONFIG_DMA_SHARED_BUFFER),n)
++        CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER ?= y
++    else
++        # Prevent misuse when CONFIG_DMA_SHARED_BUFFER=n
++        CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER = n
++    endif
++
++    CONFIG_MALI_MEMORY_GROUP_MANAGER ?= y
++
++    ifneq ($(CONFIG_MALI_CSF_SUPPORT), n)
++        CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR ?= y
++    endif
++
++else
++    # Prevent misuse when CONFIG_MALI_BASE_MODULES=n
++    CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER = n
++    CONFIG_MALI_MEMORY_GROUP_MANAGER = n
++    CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR = n
++
++endif
++
++CONFIGS := \
++    CONFIG_MALI_BASE_MODULES \
++    CONFIG_MALI_CSF_SUPPORT \
++    CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER \
++    CONFIG_MALI_MEMORY_GROUP_MANAGER \
++    CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR
++
++
++#
++# MAKE_ARGS to pass the custom CONFIGs on out-of-tree build
++#
++# Generate the list of CONFIGs and values.
++# $(value config) is the name of the CONFIG option.
++# $(value $(value config)) is its value (y, m).
++# When the CONFIG is not set to y or m, it defaults to n.
++MAKE_ARGS := $(foreach config,$(CONFIGS), \
++                    $(if $(filter y m,$(value $(value config))), \
++                        $(value config)=$(value $(value config)), \
++                        $(value config)=n))
++
++#
++# EXTRA_CFLAGS to define the custom CONFIGs on out-of-tree build
++#
++# Generate the list of CONFIGs defines with values from CONFIGS.
++# $(value config) is the name of the CONFIG option.
++# When set to y or m, the CONFIG gets defined to 1.
++EXTRA_CFLAGS := $(foreach config,$(CONFIGS), \
++                    $(if $(filter y m,$(value $(value config))), \
++                        -D$(value config)=1))
++
++all:
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
++
++modules_install:
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) modules_install
++
++clean:
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) clean
+diff --git a/dvalin/kernel/drivers/base/arm/Mconfig b/dvalin/kernel/drivers/base/arm/Mconfig
+new file mode 100644
+index 0000000..d21a72e
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/Mconfig
+@@ -0,0 +1,64 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++menuconfig MALI_BASE_MODULES
++	bool "Mali Base extra modules"
++	default y if BACKEND_KERNEL
++	help
++	  Enable this option to build support for a Arm Mali base modules.
++	  Those modules provide extra features or debug interfaces and,
++	  are optional for the use of the Mali GPU modules.
++
++config DMA_SHARED_BUFFER_TEST_EXPORTER
++	bool "Build dma-buf framework test exporter module"
++	depends on MALI_BASE_MODULES
++	default y
++	help
++	  This option will build the dma-buf framework test exporter module.
++	  Usable to help test importers.
++
++	  Modules:
++	    - dma-buf-test-exporter.ko
++
++config MALI_MEMORY_GROUP_MANAGER
++	bool "Build Mali Memory Group Manager module"
++	depends on MALI_BASE_MODULES
++	default y
++	help
++	  This option will build the memory group manager module.
++	  This is an example implementation for allocation and release of pages
++	  for memory pools managed by Mali GPU device drivers.
++
++	  Modules:
++	    - memory_group_manager.ko
++
++config MALI_PROTECTED_MEMORY_ALLOCATOR
++	bool "Build Mali Protected Memory Allocator module"
++	depends on MALI_BASE_MODULES && GPU_HAS_CSF
++	default y
++	help
++	  This option will build the protected memory allocator module.
++	  This is an example implementation for allocation and release of pages
++	  of secure memory intended to be used by the firmware
++	  of Mali GPU device drivers.
++
++	  Modules:
++	    - protected_memory_allocator.ko
++
+diff --git a/dvalin/kernel/drivers/base/dma_buf_lock/src/Kbuild b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Kbuild
+similarity index 78%
+rename from dvalin/kernel/drivers/base/dma_buf_lock/src/Kbuild
+rename to dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Kbuild
+index ddf1bb5..c7ae332 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_lock/src/Kbuild
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012, 2020-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,10 +16,8 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+-ifneq ($(CONFIG_DMA_SHARED_BUFFER),)
++ifeq ($(CONFIG_DMA_SHARED_BUFFER), y)
+ obj-m := dma_buf_lock.o
+ endif
+diff --git a/dvalin/kernel/drivers/base/dma_buf_lock/src/Makefile b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Makefile
+similarity index 71%
+rename from dvalin/kernel/drivers/base/dma_buf_lock/src/Makefile
+rename to dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Makefile
+index 3b10406..451d2d7 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_lock/src/Makefile
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/Makefile
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012, 2020-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ # linux build system bootstrap for out-of-tree module
+@@ -24,15 +23,17 @@
+ # default to building for the host
+ ARCH ?= $(shell uname -m)
+ 
+-ifeq ($(KDIR),)
+-$(error Must specify KDIR to point to the kernel to target))
+-endif
++# Handle Android Common Kernel source naming
++KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
++KDIR ?= $(KERNEL_SRC)
+ 
+ all: dma_buf_lock
+ 
+ dma_buf_lock:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include"
++	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../../include"
+ 
+ clean:
+ 	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+ 
++modules_install:
++	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules_install
+diff --git a/dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.c
+similarity index 90%
+rename from dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c
+rename to dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.c
+index 529ce71..f5ab1ed 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.c
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2012-2013, 2017-2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012-2014, 2017-2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <linux/version.h>
+@@ -29,7 +28,11 @@
+ #include <linux/device.h>
+ #include <linux/slab.h>
+ #include <linux/atomic.h>
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ #include <linux/reservation.h>
++#else
++#include <linux/dma-resv.h>
++#endif
+ #include <linux/dma-buf.h>
+ #include <linux/wait.h>
+ #include <linux/sched.h>
+@@ -37,7 +40,7 @@
+ #include <linux/anon_inodes.h>
+ #include <linux/file.h>
+ 
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ 
+ #include <linux/fence.h>
+ 
+@@ -60,7 +63,7 @@
+ 
+ #include <linux/dma-fence.h>
+ 
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
++#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE)
+ #define dma_fence_get_status(a) (dma_fence_is_signaled(a) ? \
+ 	(a)->status ?: 1 \
+ 	: 0)
+@@ -101,7 +104,7 @@ static struct file_operations dma_buf_lock_fops =
+ 
+ typedef struct dma_buf_lock_resource
+ {
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ 	struct fence fence;
+ #else
+ 	struct dma_fence fence;
+@@ -127,7 +130,7 @@ typedef struct dma_buf_lock_resource
+  * @node:     List head for linking this callback to the lock resource
+  */
+ struct dma_buf_lock_fence_cb {
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ 	struct fence_cb fence_cb;
+ 	struct fence *fence;
+ #else
+@@ -151,7 +154,7 @@ static void dma_buf_lock_dounlock(struct kref *ref);
+ static DEFINE_SPINLOCK(dma_buf_lock_fence_lock);
+ 
+ static const char *
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ dma_buf_lock_fence_get_driver_name(struct fence *fence)
+ #else
+ dma_buf_lock_fence_get_driver_name(struct dma_fence *fence)
+@@ -161,7 +164,7 @@ dma_buf_lock_fence_get_driver_name(struct dma_fence *fence)
+ }
+ 
+ static const char *
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ dma_buf_lock_fence_get_timeline_name(struct fence *fence)
+ #else
+ dma_buf_lock_fence_get_timeline_name(struct dma_fence *fence)
+@@ -171,7 +174,7 @@ dma_buf_lock_fence_get_timeline_name(struct dma_fence *fence)
+ }
+ 
+ static bool
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ dma_buf_lock_fence_enable_signaling(struct fence *fence)
+ #else
+ dma_buf_lock_fence_enable_signaling(struct dma_fence *fence)
+@@ -180,7 +183,7 @@ dma_buf_lock_fence_enable_signaling(struct dma_fence *fence)
+ 	return true;
+ }
+ 
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ const struct fence_ops dma_buf_lock_fence_ops = {
+ 	.wait = fence_default_wait,
+ #else
+@@ -235,7 +238,7 @@ dma_buf_lock_fence_work(struct work_struct *pwork)
+ }
+ 
+ static void
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ dma_buf_lock_fence_callback(struct fence *fence, struct fence_cb *cb)
+ #else
+ dma_buf_lock_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+@@ -256,14 +259,13 @@ dma_buf_lock_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+ 		atomic_set(&resource->locked, 1);
+ 		wake_up(&resource->wait);
+ 
+-		if (resource->exclusive) {
++		if (resource->exclusive)
+ 			/* Warn if the work was already queued */
+ 			WARN_ON(!schedule_work(&resource->work));
+-		}
+ 	}
+ }
+ 
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ static int
+ dma_buf_lock_fence_add_callback(dma_buf_lock_resource *resource,
+ 				struct fence *fence,
+@@ -317,12 +319,19 @@ dma_buf_lock_fence_add_callback(dma_buf_lock_resource *resource,
+ 	return err;
+ }
+ 
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ static int
+ dma_buf_lock_add_fence_reservation_callback(dma_buf_lock_resource *resource,
+ 					    struct reservation_object *resv,
+ 					    bool exclusive)
++#else
++static int
++dma_buf_lock_add_fence_reservation_callback(dma_buf_lock_resource *resource,
++					    struct dma_resv *resv,
++					    bool exclusive)
++#endif
+ {
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
+ 	struct fence *excl_fence = NULL;
+ 	struct fence **shared_fences = NULL;
+ #else
+@@ -332,7 +341,12 @@ dma_buf_lock_add_fence_reservation_callback(dma_buf_lock_resource *resource,
+ 	unsigned int shared_count = 0;
+ 	int err, i;
+ 
+-	err = reservation_object_get_fences_rcu(resv,
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
++	err = reservation_object_get_fences_rcu(
++#else
++	err = dma_resv_get_fences_rcu(
++#endif
++						resv,
+ 						&excl_fence,
+ 						&shared_count,
+ 						&shared_fences);
+@@ -393,7 +407,11 @@ static int
+ dma_buf_lock_acquire_fence_reservation(dma_buf_lock_resource *resource,
+ 				       struct ww_acquire_ctx *ctx)
+ {
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ 	struct reservation_object *content_resv = NULL;
++#else
++	struct dma_resv *content_resv = NULL;
++#endif
+ 	unsigned int content_resv_idx = 0;
+ 	unsigned int r;
+ 	int err = 0;
+@@ -472,21 +490,16 @@ static unsigned int dma_buf_lock_handle_poll(struct file *file,
+ #if DMA_BUF_LOCK_DEBUG
+ 	printk("dma_buf_lock_handle_poll\n");
+ #endif
+-	if (1 == atomic_read(&resource->locked))
+-	{
++	if (atomic_read(&resource->locked) == 1) {
+ 		/* Resources have been locked */
+ 		ret = POLLIN | POLLRDNORM;
+ 		if (resource->exclusive)
+-		{
+ 			ret |=  POLLOUT | POLLWRNORM;
+-		}
+ 	}
+ 	else
+ 	{
+-		if (!poll_does_not_wait(wait)) 
+-		{
++		if (!poll_does_not_wait(wait))
+ 			poll_wait(file, &resource->wait, wait);
+-		}
+ 	}
+ #if DMA_BUF_LOCK_DEBUG
+ 	printk("dma_buf_lock_handle_poll : return %i\n", ret);
+@@ -525,29 +538,19 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 	int i;
+ 	int ret;
+ 
+-	if (NULL == request->list_of_dma_buf_fds)
+-	{
++	if (request->list_of_dma_buf_fds == NULL)
+ 		return -EINVAL;
+-	}
+ 	if (request->count <= 0)
+-	{
+ 		return -EINVAL;
+-	}
+ 	if (request->count > DMA_BUF_LOCK_BUF_MAX)
+-	{
+ 		return -EINVAL;
+-	}
+ 	if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
+ 	    request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
+-	{
+ 		return -EINVAL;
+-	}
+ 
+ 	resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
+-	if (NULL == resource)
+-	{
++	if (resource == NULL)
+ 		return -ENOMEM;
+-	}
+ 
+ 	atomic_set(&resource->locked, 0);
+ 	kref_init(&resource->refcount);
+@@ -559,8 +562,7 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 	size = request->count * sizeof(int);
+ 	resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);
+ 
+-	if (NULL == resource->list_of_dma_buf_fds)
+-	{
++	if (resource->list_of_dma_buf_fds == NULL) {
+ 		kfree(resource);
+ 		return -ENOMEM;
+ 	}
+@@ -569,8 +571,7 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 	size = sizeof(struct dma_buf *) * request->count;
+ 	resource->dma_bufs = kmalloc(size, GFP_KERNEL);
+ 
+-	if (NULL == resource->dma_bufs)
+-	{
++	if (resource->dma_bufs == NULL) {
+ 		kfree(resource->list_of_dma_buf_fds);
+ 		kfree(resource);
+ 		return -ENOMEM;
+@@ -578,8 +579,9 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 
+ 	/* Copy requested list of dma_buf_fds from user space */
+ 	size = request->count * sizeof(int);
+-	if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
+-	{
++	if (copy_from_user(resource->list_of_dma_buf_fds,
++			   (void __user *)request->list_of_dma_buf_fds,
++			   size) != 0) {
+ 		kfree(resource->list_of_dma_buf_fds);
+ 		kfree(resource->dma_bufs);
+ 		kfree(resource);
+@@ -587,9 +589,7 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 	}
+ #if DMA_BUF_LOCK_DEBUG
+ 	for (i = 0; i < request->count; i++)
+-	{
+ 		printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
+-	}
+ #endif
+ 
+ 	/* Initialize the fence associated with dma_buf_lock resource */
+@@ -620,8 +620,7 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 		}
+ 
+ 		/*Check the reservation object associated with dma_buf */
+-		if (NULL == resource->dma_bufs[i]->resv)
+-		{
++		if (resource->dma_bufs[i]->resv == NULL) {
+ 			mutex_lock(&dma_buf_lock_mutex);
+ 			kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ 			mutex_unlock(&dma_buf_lock_mutex);
+@@ -680,10 +679,18 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 		kref_get(&resource->refcount);
+ 
+ 	for (i = 0; i < request->count; i++) {
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ 		struct reservation_object *resv = resource->dma_bufs[i]->resv;
+-
++#else
++		struct dma_resv *resv = resource->dma_bufs[i]->resv;
++#endif
+ 		if (!test_bit(i, &resource->exclusive)) {
++
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ 			ret = reservation_object_reserve_shared(resv);
++#else
++			ret = dma_resv_reserve_shared(resv, 0);
++#endif
+ 			if (ret) {
+ #if DMA_BUF_LOCK_DEBUG
+ 				printk(KERN_DEBUG "dma_buf_lock_dolock : Error %d reserving space for shared fence.\n", ret);
+@@ -701,7 +708,11 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 				break;
+ 			}
+ 
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ 			reservation_object_add_shared_fence(resv, &resource->fence);
++#else
++			dma_resv_add_shared_fence(resv, &resource->fence);
++#endif
+ 		} else {
+ 			ret = dma_buf_lock_add_fence_reservation_callback(resource,
+ 									  resv,
+@@ -713,7 +724,11 @@ static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+ 				break;
+ 			}
+ 
++#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
+ 			reservation_object_add_excl_fence(resv, &resource->fence);
++#else
++			dma_resv_add_excl_fence(resv, &resource->fence);
++#endif
+ 		}
+ 	}
+ 
+@@ -783,27 +798,21 @@ static int __init dma_buf_lock_init(void)
+ #endif
+ 	err = alloc_chrdev_region(&dma_buf_lock_dev, 0, 1, dma_buf_lock_dev_name);
+ 
+-	if (0 == err)
+-	{
++	if (err == 0) {
+ 		cdev_init(&dma_buf_lock_cdev, &dma_buf_lock_fops);
+ 
+ 		err = cdev_add(&dma_buf_lock_cdev, dma_buf_lock_dev, 1);
+ 
+-		if (0 == err)
+-		{
++		if (err == 0) {
+ 			dma_buf_lock_class = class_create(THIS_MODULE, dma_buf_lock_dev_name);
+ 			if (IS_ERR(dma_buf_lock_class))
+-			{
+ 				err = PTR_ERR(dma_buf_lock_class);
+-			}
+ 			else
+ 			{
+ 				struct device *mdev;
+ 				mdev = device_create(dma_buf_lock_class, NULL, dma_buf_lock_dev, NULL, dma_buf_lock_dev_name);
+ 				if (!IS_ERR(mdev))
+-				{
+ 					return 0;
+-				}
+ 
+ 				err = PTR_ERR(mdev);
+ 				class_destroy(dma_buf_lock_class);
+@@ -836,7 +845,7 @@ static void __exit dma_buf_lock_exit(void)
+ 		}
+ 		else
+ 		{
+-			dma_buf_lock_resource *resource = list_entry(dma_buf_lock_resource_list.next, 
++			dma_buf_lock_resource *resource = list_entry(dma_buf_lock_resource_list.next,
+ 			                                             dma_buf_lock_resource, link);
+ 			kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ 			mutex_unlock(&dma_buf_lock_mutex);
+@@ -862,26 +871,17 @@ static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned i
+ 	int size = _IOC_SIZE(cmd);
+ 
+ 	if (_IOC_TYPE(cmd) != DMA_BUF_LOCK_IOC_MAGIC)
+-	{
+ 		return -ENOTTY;
+-
+-	}
+ 	if ((_IOC_NR(cmd) < DMA_BUF_LOCK_IOC_MINNR) || (_IOC_NR(cmd) > DMA_BUF_LOCK_IOC_MAXNR))
+-	{
+ 		return -ENOTTY;
+-	}
+ 
+ 	switch (cmd)
+ 	{
+ 		case DMA_BUF_LOCK_FUNC_LOCK_ASYNC:
+ 			if (size != sizeof(dma_buf_lock_k_request))
+-			{
+ 				return -ENOTTY;
+-			}
+ 			if (copy_from_user(&request, (void __user *)arg, size))
+-			{
+ 				return -EFAULT;
+-			}
+ #if DMA_BUF_LOCK_DEBUG
+ 			printk("DMA_BUF_LOCK_FUNC_LOCK_ASYNC - %i\n", request.count);
+ #endif
+diff --git a/dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.h
+similarity index 88%
+rename from dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h
+rename to dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.h
+index f2ae575..104af1f 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_lock/src/dma_buf_lock.h
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_lock/src/dma_buf_lock.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #ifndef _DMA_BUF_LOCK_H
+diff --git a/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/Kbuild b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/Kbuild
+new file mode 100644
+index 0000000..0e20cb4
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/Kbuild
+@@ -0,0 +1,23 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2012, 2020-2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++ifeq ($(CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER), y)
++obj-m += dma-buf-test-exporter.o
++endif
+diff --git a/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/build.bp b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/build.bp
+new file mode 100644
+index 0000000..a49fb81
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/build.bp
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ *
++ * (C) COPYRIGHT 2017, 2020-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++bob_kernel_module {
++    name: "dma-buf-test-exporter",
++    defaults: [
++        "kernel_defaults"
++    ],
++    srcs: [
++        "Kbuild",
++        "dma-buf-test-exporter.c",
++    ],
++    enabled: false,
++    dma_shared_buffer_test_exporter: {
++        kbuild_options: ["CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER=y"],
++        enabled: true,
++    },
++}
+diff --git a/dvalin/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.c
+similarity index 89%
+rename from dvalin/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c
+rename to dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.c
+index 3eb34c0..ccf73cc 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c
++++ b/dvalin/kernel/drivers/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2012-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <linux/dma-buf-test-exporter.h>
+@@ -31,18 +30,16 @@
+ #include <linux/atomic.h>
+ #include <linux/mm.h>
+ #include <linux/highmem.h>
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
++#if (KERNEL_VERSION(4, 8, 0) > LINUX_VERSION_CODE)
+ #include <linux/dma-attrs.h>
+ #endif
+ #include <linux/dma-mapping.h>
+-#endif
+ 
+ /* Maximum size allowed in a single DMA_BUF_TE_ALLOC call */
+ #define DMA_BUF_TE_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
+ 
+ /* Since kernel version 5.0 CONFIG_ARCH_NO_SG_CHAIN replaced CONFIG_ARCH_HAS_SG_CHAIN */
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
++#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE
+ #if (!defined(ARCH_HAS_SG_CHAIN) && !defined(CONFIG_ARCH_HAS_SG_CHAIN))
+ #define NO_SG_CHAIN
+ #endif
+@@ -77,7 +74,7 @@ struct dma_buf_te_attachment {
+ 
+ static struct miscdevice te_device;
+ 
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
++#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
+ static int dma_buf_te_attach(struct dma_buf *buf, struct device *dev, struct dma_buf_attachment *attachment)
+ #else
+ static int dma_buf_te_attach(struct dma_buf *buf, struct dma_buf_attachment *attachment)
+@@ -206,30 +203,29 @@ static void dma_buf_te_release(struct dma_buf *buf)
+ 	/* no need for locking */
+ 
+ 	if (alloc->contiguous) {
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
++#if (KERNEL_VERSION(4, 8, 0) <= LINUX_VERSION_CODE)
+ 		dma_free_attrs(te_device.this_device,
+ 						alloc->nr_pages * PAGE_SIZE,
+ 						alloc->contig_cpu_addr,
+ 						alloc->contig_dma_addr,
+ 						DMA_ATTR_WRITE_COMBINE);
+-
+-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#else
+ 		DEFINE_DMA_ATTRS(attrs);
+ 
+ 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ 		dma_free_attrs(te_device.this_device,
+ 						alloc->nr_pages * PAGE_SIZE,
+ 						alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs);
+-#else
+-		dma_free_writecombine(te_device.this_device,
+-								alloc->nr_pages * PAGE_SIZE,
+-								alloc->contig_cpu_addr, alloc->contig_dma_addr);
+ #endif
+ 	} else {
+ 		for (i = 0; i < alloc->nr_pages; i++)
+ 			__free_page(alloc->pages[i]);
+ 	}
++#if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
++	kvfree(alloc->pages);
++#else
+ 	kfree(alloc->pages);
++#endif
+ 	kfree(alloc);
+ }
+ 
+@@ -316,7 +312,7 @@ static void dma_buf_te_mmap_close(struct vm_area_struct *vma)
+ 	mutex_unlock(&dma_buf->lock);
+ }
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ static int dma_buf_te_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ #elif KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE
+ static int dma_buf_te_mmap_fault(struct vm_fault *vmf)
+@@ -328,7 +324,7 @@ static vm_fault_t dma_buf_te_mmap_fault(struct vm_fault *vmf)
+ 	struct dma_buf *dmabuf;
+ 	struct page *pageptr;
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	dmabuf = vma->vm_private_data;
+ #else
+ 	dmabuf = vmf->vma->vm_private_data;
+@@ -362,11 +358,7 @@ static int dma_buf_te_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+ 	if (alloc->fail_mmap)
+ 		return -ENOMEM;
+ 
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+-#else
+-	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTEXPAND;
+-#endif
+ 	vma->vm_ops = &dma_buf_te_vm_ops;
+ 	vma->vm_private_data = dmabuf;
+ 
+@@ -378,7 +370,7 @@ static int dma_buf_te_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+ 	return 0;
+ }
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
+ static void *dma_buf_te_kmap_atomic(struct dma_buf *buf, unsigned long page_num)
+ {
+ 	/* IGNORE */
+@@ -419,19 +411,19 @@ static struct dma_buf_ops dma_buf_te_ops = {
+ 	.mmap = dma_buf_te_mmap,
+ 	.begin_cpu_access = dma_buf_te_begin_cpu_access,
+ 	.end_cpu_access = dma_buf_te_end_cpu_access,
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
++#if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE
+ 	.kmap = dma_buf_te_kmap,
+ 	.kunmap = dma_buf_te_kunmap,
+ 
+ 	/* nop handlers for mandatory functions we ignore */
+ 	.kmap_atomic = dma_buf_te_kmap_atomic
+ #else
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
++#if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
+ 	.map = dma_buf_te_kmap,
+ 	.unmap = dma_buf_te_kunmap,
+ #endif
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
+ 	/* nop handlers for mandatory functions we ignore */
+ 	.map_atomic = dma_buf_te_kmap_atomic
+ #endif
+@@ -481,7 +473,8 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	/* Whilst it is possible to allocate larger buffer, we won't be able to
+ 	 * map it during actual usage (mmap() still succeeds). We fail here so
+ 	 * userspace code can deal with it early than having driver failure
+-	 * later on. */
++	 * later on.
++	 */
+ 	if (max_nr_pages > SG_MAX_SINGLE_ALLOC)
+ 		max_nr_pages = SG_MAX_SINGLE_ALLOC;
+ #endif /* NO_SG_CHAIN */
+@@ -493,7 +486,7 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	}
+ 
+ 	alloc = kzalloc(sizeof(struct dma_buf_te_alloc), GFP_KERNEL);
+-	if (NULL == alloc) {
++	if (alloc == NULL) {
+ 		dev_err(te_device.this_device, "%s: couldn't alloc object", __func__);
+ 		goto no_alloc_object;
+ 	}
+@@ -501,7 +494,12 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	alloc->nr_pages = alloc_req.size;
+ 	alloc->contiguous = contiguous;
+ 
++#if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
++	alloc->pages = kvzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL);
++#else
+ 	alloc->pages = kzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL);
++#endif
++
+ 	if (!alloc->pages) {
+ 		dev_err(te_device.this_device,
+ 				"%s: couldn't alloc %zu page structures",
+@@ -512,14 +510,13 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	if (contiguous) {
+ 		dma_addr_t dma_aux;
+ 
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
++#if (KERNEL_VERSION(4, 8, 0) <= LINUX_VERSION_CODE)
+ 		alloc->contig_cpu_addr = dma_alloc_attrs(te_device.this_device,
+ 				alloc->nr_pages * PAGE_SIZE,
+ 				&alloc->contig_dma_addr,
+ 				GFP_KERNEL | __GFP_ZERO,
+ 				DMA_ATTR_WRITE_COMBINE);
+-
+-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#else
+ 		DEFINE_DMA_ATTRS(attrs);
+ 
+ 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+@@ -527,11 +524,6 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 				alloc->nr_pages * PAGE_SIZE,
+ 				&alloc->contig_dma_addr,
+ 				GFP_KERNEL | __GFP_ZERO, &attrs);
+-#else
+-		alloc->contig_cpu_addr = dma_alloc_writecombine(te_device.this_device,
+-				alloc->nr_pages * PAGE_SIZE,
+-				&alloc->contig_dma_addr,
+-				GFP_KERNEL | __GFP_ZERO);
+ #endif
+ 		if (!alloc->contig_cpu_addr) {
+ 			dev_err(te_device.this_device, "%s: couldn't alloc contiguous buffer %zu pages",
+@@ -546,7 +538,7 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	} else {
+ 		for (i = 0; i < alloc->nr_pages; i++) {
+ 			alloc->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+-			if (NULL == alloc->pages[i]) {
++			if (alloc->pages[i] == NULL) {
+ 				dev_err(te_device.this_device, "%s: couldn't alloc page", __func__);
+ 				goto no_page;
+ 			}
+@@ -554,13 +546,10 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 	}
+ 
+ 	/* alloc ready, let's export it */
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ 	{
+ 		struct dma_buf_export_info export_info = {
+ 			.exp_name = "dma_buf_te",
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ 			.owner = THIS_MODULE,
+-#endif
+ 			.ops = &dma_buf_te_ops,
+ 			.size = alloc->nr_pages << PAGE_SHIFT,
+ 			.flags = O_CLOEXEC | O_RDWR,
+@@ -569,13 +558,6 @@ static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf,
+ 
+ 		dma_buf = dma_buf_export(&export_info);
+ 	}
+-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+-	dma_buf = dma_buf_export(alloc, &dma_buf_te_ops,
+-			alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR, NULL);
+-#else
+-	dma_buf = dma_buf_export(alloc, &dma_buf_te_ops,
+-			alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR);
+-#endif
+ 
+ 	if (IS_ERR_OR_NULL(dma_buf)) {
+ 		dev_err(te_device.this_device, "%s: couldn't export dma_buf", __func__);
+@@ -598,32 +580,30 @@ no_export:
+ 	/* i still valid */
+ no_page:
+ 	if (contiguous) {
+-
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
++#if (KERNEL_VERSION(4, 8, 0) <= LINUX_VERSION_CODE)
+ 		dma_free_attrs(te_device.this_device,
+ 						alloc->nr_pages * PAGE_SIZE,
+ 						alloc->contig_cpu_addr,
+ 						alloc->contig_dma_addr,
+ 						DMA_ATTR_WRITE_COMBINE);
+-
+-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#else
+ 		DEFINE_DMA_ATTRS(attrs);
+ 
+ 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ 		dma_free_attrs(te_device.this_device,
+ 						alloc->nr_pages * PAGE_SIZE,
+ 						alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs);
+-#else
+-		dma_free_writecombine(te_device.this_device,
+-								alloc->nr_pages * PAGE_SIZE,
+-								alloc->contig_cpu_addr, alloc->contig_dma_addr);
+ #endif
+ 	} else {
+ 		while (i-- > 0)
+ 			__free_page(alloc->pages[i]);
+ 	}
+ free_page_struct:
++#if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
++	kvfree(alloc->pages);
++#else
+ 	kfree(alloc->pages);
++#endif
+ free_alloc_object:
+ 	kfree(alloc);
+ no_alloc_object:
+@@ -727,17 +707,17 @@ static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value)
+ 	}
+ 
+ 	ret = dma_buf_begin_cpu_access(dma_buf,
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+-			0, dma_buf->size,
++#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
++				       0, dma_buf->size,
+ #endif
+-			DMA_BIDIRECTIONAL);
++				       DMA_BIDIRECTIONAL);
+ 	if (ret)
+ 		goto no_cpu_access;
+ 
+ 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ 		for (i = 0; i < sg_dma_len(sg); i = i + PAGE_SIZE) {
+ 			void *addr = NULL;
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
++#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+ 			addr = dma_buf_te_kmap(dma_buf, i >> PAGE_SHIFT);
+ #else
+ 			addr = dma_buf_kmap(dma_buf, i >> PAGE_SHIFT);
+@@ -747,7 +727,7 @@ static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value)
+ 				goto no_kmap;
+ 			}
+ 			memset(addr, value, PAGE_SIZE);
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
++#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+ 			dma_buf_te_kunmap(dma_buf, i >> PAGE_SHIFT, addr);
+ #else
+ 			dma_buf_kunmap(dma_buf, i >> PAGE_SHIFT, addr);
+@@ -758,10 +738,10 @@ static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value)
+ 
+ no_kmap:
+ 	dma_buf_end_cpu_access(dma_buf,
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+-			0, dma_buf->size,
++#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
++			       0, dma_buf->size,
+ #endif
+-			DMA_BIDIRECTIONAL);
++			       DMA_BIDIRECTIONAL);
+ no_cpu_access:
+ 	dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+ no_import:
+diff --git a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Kbuild b/dvalin/kernel/drivers/base/arm/memory_group_manager/Kbuild
+similarity index 74%
+rename from dvalin/kernel/drivers/base/dma_buf_test_exporter/Kbuild
+rename to dvalin/kernel/drivers/base/arm/memory_group_manager/Kbuild
+index c382b79..99ce311 100644
+--- a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Kbuild
++++ b/dvalin/kernel/drivers/base/arm/memory_group_manager/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,10 +16,8 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+-ifneq ($(CONFIG_DMA_SHARED_BUFFER),)
+-obj-$(CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER) += dma-buf-test-exporter.o
++ifeq ($(CONFIG_MALI_MEMORY_GROUP_MANAGER), y)
++obj-m := memory_group_manager.o
+ endif
+diff --git a/dvalin/kernel/drivers/base/arm/memory_group_manager/build.bp b/dvalin/kernel/drivers/base/arm/memory_group_manager/build.bp
+new file mode 100644
+index 0000000..23db183
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/memory_group_manager/build.bp
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ *
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++bob_kernel_module {
++    name: "memory_group_manager",
++    defaults: [
++        "kernel_defaults"
++    ],
++    srcs: [
++        "Kbuild",
++        "memory_group_manager.c",
++    ],
++    enabled: false,
++    mali_memory_group_manager: {
++        kbuild_options: ["CONFIG_MALI_MEMORY_GROUP_MANAGER=y"],
++        enabled: true,
++    },
++}
+diff --git a/dvalin/kernel/drivers/base/memory_group_manager/memory_group_manager.c b/dvalin/kernel/drivers/base/arm/memory_group_manager/memory_group_manager.c
+similarity index 98%
+rename from dvalin/kernel/drivers/base/memory_group_manager/memory_group_manager.c
+rename to dvalin/kernel/drivers/base/arm/memory_group_manager/memory_group_manager.c
+index 44f848a..a70fe70 100644
+--- a/dvalin/kernel/drivers/base/memory_group_manager/memory_group_manager.c
++++ b/dvalin/kernel/drivers/base/arm/memory_group_manager/memory_group_manager.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <linux/fs.h>
+@@ -26,7 +25,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/version.h>
+ #include <linux/module.h>
+-#ifdef CONFIG_DEBUG_FS
++#if IS_ENABLED(CONFIG_DEBUG_FS)
+ #include <linux/debugfs.h>
+ #endif
+ #include <linux/mm.h>
+@@ -92,12 +91,12 @@ struct mgm_group {
+ struct mgm_groups {
+ 	struct mgm_group groups[MEMORY_GROUP_MANAGER_NR_GROUPS];
+ 	struct device *dev;
+-#ifdef CONFIG_DEBUG_FS
++#if IS_ENABLED(CONFIG_DEBUG_FS)
+ 	struct dentry *mgm_debugfs_root;
+ #endif
+ };
+ 
+-#ifdef CONFIG_DEBUG_FS
++#if IS_ENABLED(CONFIG_DEBUG_FS)
+ 
+ static int mgm_size_get(void *data, u64 *val)
+ {
+@@ -475,7 +474,6 @@ static struct platform_driver memory_group_manager_driver = {
+ 	.remove = memory_group_manager_remove,
+ 	.driver = {
+ 		.name = "physical-memory-group-manager",
+-		.owner = THIS_MODULE,
+ 		.of_match_table = of_match_ptr(memory_group_manager_dt_ids),
+ 		/*
+ 		 * Prevent the mgm_dev from being unbound and freed, as other's
+diff --git a/dvalin/kernel/drivers/base/arm/protected_memory_allocator/Kbuild b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/Kbuild
+new file mode 100644
+index 0000000..25295a9
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/Kbuild
+@@ -0,0 +1,23 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++#
++# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
++#
++# This program is free software and is provided to you under the terms of the
++# GNU General Public License version 2 as published by the Free Software
++# Foundation, and any use by you of this program is subject to the terms
++# of such GNU license.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
++#
++#
++
++ifeq ($(CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR), y)
++obj-m := protected_memory_allocator.o
++endif
+diff --git a/dvalin/kernel/drivers/base/arm/protected_memory_allocator/build.bp b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/build.bp
+new file mode 100644
+index 0000000..4c56154
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/build.bp
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ *
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++bob_kernel_module {
++    name: "protected_memory_allocator",
++    defaults: [
++        "kernel_defaults"
++    ],
++    srcs: [
++        "Kbuild",
++        "protected_memory_allocator.c",
++    ],
++    enabled: false,
++    mali_protected_memory_allocator: {
++        kbuild_options: ["CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR=y"],
++        enabled: true,
++    },
++}
+diff --git a/dvalin/kernel/drivers/base/arm/protected_memory_allocator/protected_memory_allocator.c b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/protected_memory_allocator.c
+new file mode 100644
+index 0000000..6684210
+--- /dev/null
++++ b/dvalin/kernel/drivers/base/arm/protected_memory_allocator/protected_memory_allocator.c
+@@ -0,0 +1,551 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++/*
++ *
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++#include <linux/version.h>
++#include <linux/of.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/platform_device.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/io.h>
++#include <linux/protected_memory_allocator.h>
++
++/* Size of a bitfield element in bytes */
++#define BITFIELD_ELEM_SIZE sizeof(u64)
++
++/* We can track whether or not 64 pages are currently allocated in a u64 */
++#define PAGES_PER_BITFIELD_ELEM (BITFIELD_ELEM_SIZE * BITS_PER_BYTE)
++
++/* Order 6 (ie, 64) corresponds to the number of pages held in a bitfield */
++#define ORDER_OF_PAGES_PER_BITFIELD_ELEM 6
++
++/**
++ * struct simple_pma_device -	Simple implementation of a protected memory
++ *				allocator device
++ *
++ * @pma_dev:			Protected memory allocator device pointer
++ * @dev:  			Device pointer
++ * @alloc_pages_bitfield_arr:	Status of all the physical memory pages within the
++ *				protected memory region, one bit per page
++ * @rmem_base:			Base address of the reserved memory region
++ * @rmem_size:			Size of the reserved memory region, in pages
++ * @num_free_pages:		Number of free pages in the memory region
++ * @rmem_lock:			Lock to serialize the allocation and freeing of
++ *				physical pages from the protected memory region
++ */
++struct simple_pma_device {
++	struct protected_memory_allocator_device pma_dev;
++	struct device *dev;
++	u64 *allocated_pages_bitfield_arr;
++	phys_addr_t rmem_base;
++	size_t rmem_size;
++	size_t num_free_pages;
++	spinlock_t rmem_lock;
++};
++
++/**
++ * Number of elements in array 'allocated_pages_bitfield_arr'. If the number of
++ * pages required does not divide exactly by PAGES_PER_BITFIELD_ELEM, adds an
++ * extra page for the remainder.
++ */
++#define ALLOC_PAGES_BITFIELD_ARR_SIZE(num_pages) \
++	((PAGES_PER_BITFIELD_ELEM * (0 != (num_pages % PAGES_PER_BITFIELD_ELEM)) + \
++	num_pages) / PAGES_PER_BITFIELD_ELEM)
++
++/**
++ * Allocate a power-of-two number of pages, N, where
++ * 0 <= N <= ORDER_OF_PAGES_PER_BITFIELD_ELEM - 1.  ie, Up to 32 pages. The routine
++ * fills-in a pma structure and sets the appropriate bits in the allocated-pages
++ * bitfield array but assumes the caller has already determined that these are
++ * already clear.
++ *
++ * This routine always works within only a single allocated-pages bitfield element.
++ * It can be thought of as the 'small-granularity' allocator.
++ */
++static void small_granularity_alloc(struct simple_pma_device *const epma_dev,
++				    size_t alloc_bitfield_idx, size_t start_bit,
++				    size_t order,
++				    struct protected_memory_allocation *pma)
++{
++	size_t i;
++	size_t page_idx;
++	u64 *bitfield;
++	size_t alloc_pages_bitfield_size;
++
++	if (WARN_ON(!epma_dev) ||
++	    WARN_ON(!pma))
++		return;
++
++	WARN(epma_dev->rmem_size == 0, "%s: rmem_size is 0", __func__);
++	alloc_pages_bitfield_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
++
++	WARN(alloc_bitfield_idx >= alloc_pages_bitfield_size,
++	     "%s: idx>bf_size: %zu %zu", __FUNCTION__,
++	     alloc_bitfield_idx, alloc_pages_bitfield_size);
++
++	WARN((start_bit + (1 << order)) > PAGES_PER_BITFIELD_ELEM,
++	     "%s: start=%zu order=%zu ppbe=%zu",
++	     __FUNCTION__, start_bit, order, PAGES_PER_BITFIELD_ELEM);
++
++	bitfield = &epma_dev->allocated_pages_bitfield_arr[alloc_bitfield_idx];
++
++	for (i = 0; i < (1 << order); i++) {
++		/* Check the pages represented by this bit are actually free */
++		WARN (*bitfield & (1ULL << (start_bit + i)),
++		      "in %s: page not free: %zu %zu %.16llx %zu\n",
++		      __FUNCTION__, i, order, *bitfield, alloc_pages_bitfield_size);
++
++		/* Mark the pages as now allocated */
++		*bitfield |= (1ULL << (start_bit + i));
++	}
++
++	/* Compute the page index */
++	page_idx = (alloc_bitfield_idx * PAGES_PER_BITFIELD_ELEM) + start_bit;
++
++	/* Fill-in the allocation struct for the caller */
++	pma->pa = epma_dev->rmem_base + (page_idx << PAGE_SHIFT);
++	pma->order = order;
++}
++
++/**
++ * Allocate a power-of-two number of pages, N, where
++ * N >= ORDER_OF_PAGES_PER_BITFIELD_ELEM. ie, 64 pages or more. The routine fills-in
++ * a pma structure and sets the appropriate bits in the allocated-pages bitfield array
++ * but assumes the caller has already determined that these are already clear.
++ *
++ * Unlike small_granularity_alloc, this routine can work with multiple 64-page groups,
++ * ie multiple elements from the allocated-pages bitfield array. However, it always
++ * works with complete sets of these 64-page groups. It can therefore be thought of
++ * as the 'large-granularity' allocator.
++ */
++static void large_granularity_alloc(struct simple_pma_device *const epma_dev,
++				    size_t start_alloc_bitfield_idx,
++				    size_t order,
++				    struct protected_memory_allocation *pma)
++{
++	size_t i;
++	size_t num_pages_to_alloc = (size_t)1 << order;
++	size_t num_bitfield_elements_needed = num_pages_to_alloc / PAGES_PER_BITFIELD_ELEM;
++	size_t start_page_idx = start_alloc_bitfield_idx * PAGES_PER_BITFIELD_ELEM;
++
++	if (WARN_ON(!epma_dev) ||
++	    WARN_ON(!pma))
++		return;
++
++	/*
++	 * Are there anough bitfield array elements (groups of 64 pages)
++	 * between the start element and the end of the bitfield array
++	 * to fulfill the request?
++	 */
++	WARN((start_alloc_bitfield_idx + order) >= ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size),
++	     "%s: start=%zu order=%zu ms=%zu",
++	     __FUNCTION__, start_alloc_bitfield_idx, order, epma_dev->rmem_size);
++
++	for (i = 0; i < num_bitfield_elements_needed; i++) {
++		u64 *bitfield = &epma_dev->allocated_pages_bitfield_arr[start_alloc_bitfield_idx + i];
++
++		/* We expect all pages that relate to this bitfield element to be free */
++		WARN((*bitfield != 0),
++		     "in %s: pages not free: i=%zu o=%zu bf=%.16llx\n",
++		     __FUNCTION__, i, order, *bitfield);
++
++		/* Mark all the pages for this element as not free */
++		*bitfield = ~0ULL;
++	}
++
++	/* Fill-in the allocation struct for the caller */
++	pma->pa = epma_dev->rmem_base + (start_page_idx  << PAGE_SHIFT);
++	pma->order = order;
++}
++
++static struct protected_memory_allocation *simple_pma_alloc_page(
++	struct protected_memory_allocator_device *pma_dev, unsigned int order)
++{
++	struct simple_pma_device *const epma_dev =
++		container_of(pma_dev, struct simple_pma_device, pma_dev);
++	struct protected_memory_allocation *pma;
++	size_t num_pages_to_alloc;
++
++	u64 *bitfields = epma_dev->allocated_pages_bitfield_arr;
++	size_t i;
++	size_t bit;
++	size_t count;
++
++	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, order=%u\n",
++		__func__, (void *)pma_dev, order);
++
++	/* This is an example function that follows an extremely simple logic
++	 * and is very likely to fail to allocate memory if put under stress.
++	 *
++	 * The simple_pma_device maintains an array of u64s, with one bit used
++	 * to track the status of each page.
++	 *
++	 * In order to create a memory allocation, the allocator looks for an
++	 * adjacent group of cleared bits. This does leave the algorithm open
++	 * to fragmentation issues, but is deemed sufficient for now.
++	 * If successful, the allocator shall mark all the pages as allocated
++	 * and increment the offset accordingly.
++	 *
++	 * Allocations of 64 pages or more (order 6) can be allocated only with
++	 * 64-page alignment, in order to keep the algorithm as simple as
++	 * possible. ie, starting from bit 0 of any 64-bit page-allocation
++	 * bitfield. For this, the large-granularity allocator is utilised.
++	 *
++	 * Allocations of lower-order can only be allocated entirely within the
++	 * same group of 64 pages, with the small-ganularity allocator  (ie
++	 * always from the same 64-bit page-allocation bitfield) - again, to
++	 * keep things as simple as possible, but flexible to meet
++	 * current needs.
++	 */
++
++	num_pages_to_alloc = (size_t)1 << order;
++
++	pma = devm_kzalloc(epma_dev->dev, sizeof(*pma), GFP_KERNEL);
++	if (!pma) {
++		dev_err(epma_dev->dev, "Failed to alloc pma struct");
++		return NULL;
++	}
++
++	spin_lock(&epma_dev->rmem_lock);
++
++	if (epma_dev->num_free_pages < num_pages_to_alloc) {
++		dev_err(epma_dev->dev, "not enough free pages\n");
++		devm_kfree(epma_dev->dev, pma);
++		spin_unlock(&epma_dev->rmem_lock);
++		return NULL;
++	}
++
++	/*
++	 * For order 0-5 (ie, 1 to 32 pages) we always allocate within the same set of 64 pages
++	 * Currently, most allocations will be very small (1 page), so the more likely path
++	 * here is order < ORDER_OF_PAGES_PER_BITFIELD_ELEM.
++	 */
++	if (likely(order < ORDER_OF_PAGES_PER_BITFIELD_ELEM)) {
++		size_t alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
++
++		for (i = 0; i < alloc_pages_bitmap_size; i++) {
++			count = 0;
++
++			for (bit = 0; bit < PAGES_PER_BITFIELD_ELEM; bit++) {
++				if  (0 == (bitfields[i] & (1ULL << bit))) {
++					if ((count + 1) >= num_pages_to_alloc) {
++						/*
++						 * We've found enough free, consecutive pages with which to
++						 * make an allocation
++						 */
++						small_granularity_alloc(
++							epma_dev, i,
++							bit - count, order,
++							pma);
++
++						epma_dev->num_free_pages -=
++							num_pages_to_alloc;
++
++						spin_unlock(
++							&epma_dev->rmem_lock);
++						return pma;
++					}
++
++					/* So far so good, but we need more set bits yet */
++					count++;
++				} else {
++					/*
++					 * We found an allocated page, so nothing we've seen so far can be used.
++					 * Keep looking.
++					 */
++					count = 0;
++				}
++			}
++		}
++	} else {
++		/**
++		 * For allocations of order ORDER_OF_PAGES_PER_BITFIELD_ELEM and above (>= 64 pages), we know
++		 * we'll only get allocations for whole groups of 64 pages, which hugely simplifies the task.
++		 */
++		size_t alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
++
++		/* How many 64-bit bitfield elements will be needed for the allocation? */
++		size_t num_bitfield_elements_needed = num_pages_to_alloc / PAGES_PER_BITFIELD_ELEM;
++
++		count = 0;
++
++		for (i = 0; i < alloc_pages_bitmap_size; i++) {
++			/* Are all the pages free for the i'th u64 bitfield element? */
++			if (bitfields[i] == 0) {
++				count += PAGES_PER_BITFIELD_ELEM;
++
++				if (count >= (1 << order)) {
++					size_t start_idx = (i + 1) - num_bitfield_elements_needed;
++
++					large_granularity_alloc(epma_dev,
++								start_idx,
++								order, pma);
++
++					epma_dev->num_free_pages -= 1 << order;
++					spin_unlock(&epma_dev->rmem_lock);
++					return pma;
++				}
++			}
++			else
++			{
++				count = 0;
++			}
++		}
++	}
++
++	spin_unlock(&epma_dev->rmem_lock);
++	devm_kfree(epma_dev->dev, pma);
++
++	dev_err(epma_dev->dev, "not enough contiguous pages (need %zu), total free pages left %zu\n",
++		num_pages_to_alloc, epma_dev->num_free_pages);
++	return NULL;
++}
++
++static phys_addr_t simple_pma_get_phys_addr(
++	struct protected_memory_allocator_device *pma_dev,
++	struct protected_memory_allocation *pma)
++{
++	struct simple_pma_device *const epma_dev =
++		container_of(pma_dev, struct simple_pma_device, pma_dev);
++
++	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
++		__func__, (void *)pma_dev, (void *)pma,
++		(unsigned long long)pma->pa);
++
++	return pma->pa;
++}
++
++static void simple_pma_free_page(
++	struct protected_memory_allocator_device *pma_dev,
++	struct protected_memory_allocation *pma)
++{
++	struct simple_pma_device *const epma_dev =
++		container_of(pma_dev, struct simple_pma_device, pma_dev);
++	size_t num_pages_in_allocation;
++	size_t offset;
++	size_t i;
++	size_t bitfield_idx;
++	size_t bitfield_start_bit;
++	size_t page_num;
++	u64 *bitfield;
++	size_t alloc_pages_bitmap_size;
++	size_t num_bitfield_elems_used_by_alloc;
++
++	WARN_ON(pma == NULL);
++
++	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
++		__func__, (void *)pma_dev, (void *)pma,
++		(unsigned long long)pma->pa);
++
++	WARN_ON(pma->pa < epma_dev->rmem_base);
++
++	/* This is an example function that follows an extremely simple logic
++	 * and is vulnerable to abuse.
++	 */
++	offset = (pma->pa - epma_dev->rmem_base);
++	num_pages_in_allocation = (size_t)1 << pma->order;
++
++	/* The number of bitfield elements used by the allocation */
++	num_bitfield_elems_used_by_alloc = num_pages_in_allocation / PAGES_PER_BITFIELD_ELEM;
++
++	/* The page number of the first page of the allocation, relative to rmem_base */
++	page_num = offset >> PAGE_SHIFT;
++
++	/* Which u64 bitfield refers to this page? */
++	bitfield_idx = page_num / PAGES_PER_BITFIELD_ELEM;
++
++	alloc_pages_bitmap_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
++
++	/* Is the allocation within expected bounds? */
++	WARN_ON((bitfield_idx + num_bitfield_elems_used_by_alloc) >= alloc_pages_bitmap_size);
++
++	spin_lock(&epma_dev->rmem_lock);
++
++	if (pma->order < ORDER_OF_PAGES_PER_BITFIELD_ELEM) {
++		bitfield = &epma_dev->allocated_pages_bitfield_arr[bitfield_idx];
++
++		/* Which bit within that u64 bitfield is the lsb covering this allocation?  */
++		bitfield_start_bit = page_num % PAGES_PER_BITFIELD_ELEM;
++
++		/* Clear the bits for the pages we're now freeing */
++		*bitfield &= ~(((1ULL << num_pages_in_allocation) - 1) << bitfield_start_bit);
++	}
++	else {
++		WARN(page_num % PAGES_PER_BITFIELD_ELEM,
++		     "%s: Expecting allocs of order >= %d to be %zu-page aligned\n",
++		     __FUNCTION__, ORDER_OF_PAGES_PER_BITFIELD_ELEM, PAGES_PER_BITFIELD_ELEM);
++
++		for (i = 0; i < num_bitfield_elems_used_by_alloc; i++) {
++			bitfield = &epma_dev->allocated_pages_bitfield_arr[bitfield_idx + i];
++
++			/* We expect all bits to be set (all pages allocated) */
++			WARN((*bitfield != ~0),
++			     "%s: alloc being freed is not fully allocated: of=%zu np=%zu bf=%.16llx\n",
++			     __FUNCTION__, offset, num_pages_in_allocation, *bitfield);
++
++			/*
++			 * Now clear all the bits in the bitfield element to mark all the pages
++			 * it refers to as free.
++			 */
++			*bitfield = 0ULL;
++		}
++	}
++
++	epma_dev->num_free_pages += num_pages_in_allocation;
++	spin_unlock(&epma_dev->rmem_lock);
++	devm_kfree(epma_dev->dev, pma);
++}
++
++static int protected_memory_allocator_probe(struct platform_device *pdev)
++{
++	struct simple_pma_device *epma_dev;
++	struct device_node *np;
++	phys_addr_t rmem_base;
++	size_t rmem_size;
++	size_t alloc_bitmap_pages_arr_size;
++#if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
++	struct reserved_mem *rmem;
++#endif
++
++	np = pdev->dev.of_node;
++
++	if (!np) {
++		dev_err(&pdev->dev, "device node pointer not set\n");
++		return -ENODEV;
++	}
++
++	np = of_parse_phandle(np, "memory-region", 0);
++	if (!np) {
++		dev_err(&pdev->dev, "memory-region node not set\n");
++		return -ENODEV;
++	}
++
++#if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
++	rmem = of_reserved_mem_lookup(np);
++	if (rmem) {
++		rmem_base = rmem->base;
++		rmem_size = rmem->size >> PAGE_SHIFT;
++	} else
++#endif
++	{
++		of_node_put(np);
++		dev_err(&pdev->dev, "could not read reserved memory-region\n");
++		return -ENODEV;
++	}
++
++	of_node_put(np);
++	epma_dev = devm_kzalloc(&pdev->dev, sizeof(*epma_dev), GFP_KERNEL);
++	if (!epma_dev)
++		return -ENOMEM;
++
++	epma_dev->pma_dev.ops.pma_alloc_page = simple_pma_alloc_page;
++	epma_dev->pma_dev.ops.pma_get_phys_addr = simple_pma_get_phys_addr;
++	epma_dev->pma_dev.ops.pma_free_page = simple_pma_free_page;
++	epma_dev->pma_dev.owner = THIS_MODULE;
++	epma_dev->dev = &pdev->dev;
++	epma_dev->rmem_base = rmem_base;
++	epma_dev->rmem_size = rmem_size;
++	epma_dev->num_free_pages = rmem_size;
++	spin_lock_init(&epma_dev->rmem_lock);
++
++	alloc_bitmap_pages_arr_size = ALLOC_PAGES_BITFIELD_ARR_SIZE(epma_dev->rmem_size);
++
++	epma_dev->allocated_pages_bitfield_arr = devm_kzalloc(&pdev->dev,
++		alloc_bitmap_pages_arr_size * BITFIELD_ELEM_SIZE, GFP_KERNEL);
++
++	if (!epma_dev->allocated_pages_bitfield_arr) {
++		dev_err(&pdev->dev, "failed to allocate resources\n");
++		devm_kfree(&pdev->dev, epma_dev);
++		return -ENOMEM;
++	}
++
++	if (epma_dev->rmem_size % PAGES_PER_BITFIELD_ELEM) {
++		size_t extra_pages =
++			alloc_bitmap_pages_arr_size * PAGES_PER_BITFIELD_ELEM -
++			epma_dev->rmem_size;
++		size_t last_bitfield_index = alloc_bitmap_pages_arr_size - 1;
++
++		/* Mark the extra pages (that lie outside the reserved range) as
++		 * always in use.
++		 */
++		epma_dev->allocated_pages_bitfield_arr[last_bitfield_index] =
++			((1ULL << extra_pages) - 1) <<
++			(PAGES_PER_BITFIELD_ELEM - extra_pages);
++	}
++
++	platform_set_drvdata(pdev, &epma_dev->pma_dev);
++	dev_info(&pdev->dev,
++		"Protected memory allocator probed successfully\n");
++	dev_info(&pdev->dev, "Protected memory region: base=%llx num pages=%zu\n",
++		(unsigned long long)rmem_base, rmem_size);
++
++	return 0;
++}
++
++static int protected_memory_allocator_remove(struct platform_device *pdev)
++{
++	struct protected_memory_allocator_device *pma_dev =
++		platform_get_drvdata(pdev);
++	struct simple_pma_device *epma_dev;
++	struct device *dev;
++
++	if (!pma_dev)
++		return -EINVAL;
++
++	epma_dev = container_of(pma_dev, struct simple_pma_device, pma_dev);
++	dev = epma_dev->dev;
++
++	if (epma_dev->num_free_pages < epma_dev->rmem_size) {
++		dev_warn(&pdev->dev, "Leaking %zu pages of protected memory\n",
++			epma_dev->rmem_size - epma_dev->num_free_pages);
++	}
++
++	platform_set_drvdata(pdev, NULL);
++	devm_kfree(dev, epma_dev->allocated_pages_bitfield_arr);
++	devm_kfree(dev, epma_dev);
++
++	dev_info(&pdev->dev,
++		"Protected memory allocator removed successfully\n");
++
++	return 0;
++}
++
++static const struct of_device_id protected_memory_allocator_dt_ids[] = {
++	{ .compatible = "arm,protected-memory-allocator" },
++	{ /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, protected_memory_allocator_dt_ids);
++
++static struct platform_driver protected_memory_allocator_driver = {
++	.probe = protected_memory_allocator_probe,
++	.remove = protected_memory_allocator_remove,
++	.driver = {
++		.name = "simple_protected_memory_allocator",
++		.of_match_table = of_match_ptr(protected_memory_allocator_dt_ids),
++	}
++};
++
++module_platform_driver(protected_memory_allocator_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("ARM Ltd.");
++MODULE_VERSION("1.0");
+diff --git a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Kconfig b/dvalin/kernel/drivers/base/dma_buf_test_exporter/Kconfig
+deleted file mode 100644
+index 66ca1bc..0000000
+--- a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Kconfig
++++ /dev/null
+@@ -1,26 +0,0 @@
+-#
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+-#
+-# This program is free software and is provided to you under the terms of the
+-# GNU General Public License version 2 as published by the Free Software
+-# Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
+-#
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, you can access it online at
+-# http://www.gnu.org/licenses/gpl-2.0.html.
+-#
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-#
+-
+-config DMA_SHARED_BUFFER_TEST_EXPORTER
+-	tristate "Test exporter for the dma-buf framework"
+-	depends on DMA_SHARED_BUFFER
+-	help
+-	  This option enables the test exporter usable to help test importerts.
+diff --git a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Makefile b/dvalin/kernel/drivers/base/dma_buf_test_exporter/Makefile
+deleted file mode 100644
+index 528582c..0000000
+--- a/dvalin/kernel/drivers/base/dma_buf_test_exporter/Makefile
++++ /dev/null
+@@ -1,36 +0,0 @@
+-#
+-# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+-#
+-# This program is free software and is provided to you under the terms of the
+-# GNU General Public License version 2 as published by the Free Software
+-# Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
+-#
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, you can access it online at
+-# http://www.gnu.org/licenses/gpl-2.0.html.
+-#
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-#
+-
+-# linux build system bootstrap for out-of-tree module
+-
+-# default to building for the host
+-ARCH ?= $(shell uname -m)
+-
+-ifeq ($(KDIR),)
+-$(error Must specify KDIR to point to the kernel to target))
+-endif
+-
+-all:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER=m
+-
+-clean:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+-
+diff --git a/dvalin/kernel/drivers/base/dma_buf_test_exporter/build.bp b/dvalin/kernel/drivers/base/dma_buf_test_exporter/build.bp
+deleted file mode 100644
+index 7b0bd5d..0000000
+--- a/dvalin/kernel/drivers/base/dma_buf_test_exporter/build.bp
++++ /dev/null
+@@ -1,26 +0,0 @@
+-/*
+- *
+- * (C) COPYRIGHT 2017, 2020 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * A copy of the licence is included with the program, and can also be obtained
+- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+- * Boston, MA 02110-1301, USA.
+- *
+- */
+-
+-bob_kernel_module {
+-    name: "dma-buf-test-exporter",
+-    srcs: [
+-        "Kbuild",
+-        "dma-buf-test-exporter.c",
+-    ],
+-    kbuild_options: [
+-        "CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER=m",
+-    ],
+-    defaults: ["kernel_defaults"],
+-}
+diff --git a/dvalin/kernel/drivers/base/memory_group_manager/Makefile b/dvalin/kernel/drivers/base/memory_group_manager/Makefile
+deleted file mode 100644
+index a5bceae..0000000
+--- a/dvalin/kernel/drivers/base/memory_group_manager/Makefile
++++ /dev/null
+@@ -1,35 +0,0 @@
+-#
+-# (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+-#
+-# This program is free software and is provided to you under the terms of the
+-# GNU General Public License version 2 as published by the Free Software
+-# Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
+-#
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, you can access it online at
+-# http://www.gnu.org/licenses/gpl-2.0.html.
+-#
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-#
+-
+-# linux build system bootstrap for out-of-tree module
+-
+-# default to building for the host
+-ARCH ?= $(shell uname -m)
+-
+-ifeq ($(KDIR),)
+-$(error Must specify KDIR to point to the kernel to target))
+-endif
+-
+-all:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" modules CONFIG_MALI_MEMORY_GROUP_MANAGER=m
+-
+-clean:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+diff --git a/dvalin/kernel/drivers/base/memory_group_manager/build.bp b/dvalin/kernel/drivers/base/memory_group_manager/build.bp
+deleted file mode 100644
+index 04dbfd3..0000000
+--- a/dvalin/kernel/drivers/base/memory_group_manager/build.bp
++++ /dev/null
+@@ -1,22 +0,0 @@
+-/*
+- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * A copy of the licence is included with the program, and can also be obtained
+- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+- * Boston, MA  02110-1301, USA.
+- */
+-
+-bob_kernel_module {
+-    name: "memory_group_manager",
+-    srcs: [
+-        "Kbuild",
+-        "memory_group_manager.c",
+-    ],
+-    kbuild_options: ["CONFIG_MALI_MEMORY_GROUP_MANAGER=m"],
+-    defaults: ["kernel_defaults"],
+-}
+diff --git a/dvalin/kernel/drivers/base/protected_memory_allocator/Makefile b/dvalin/kernel/drivers/base/protected_memory_allocator/Makefile
+deleted file mode 100644
+index 17b2600..0000000
+--- a/dvalin/kernel/drivers/base/protected_memory_allocator/Makefile
++++ /dev/null
+@@ -1,35 +0,0 @@
+-#
+-# (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+-#
+-# This program is free software and is provided to you under the terms of the
+-# GNU General Public License version 2 as published by the Free Software
+-# Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
+-#
+-# This program is distributed in the hope that it will be useful,
+-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program; if not, you can access it online at
+-# http://www.gnu.org/licenses/gpl-2.0.html.
+-#
+-# SPDX-License-Identifier: GPL-2.0
+-#
+-#
+-
+-# linux build system bootstrap for out-of-tree module
+-
+-# default to building for the host
+-ARCH ?= $(shell uname -m)
+-
+-ifeq ($(KDIR),)
+-$(error Must specify KDIR to point to the kernel to target))
+-endif
+-
+-all:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" modules CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR=m
+-
+-clean:
+-	$(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+diff --git a/dvalin/kernel/drivers/base/protected_memory_allocator/build.bp b/dvalin/kernel/drivers/base/protected_memory_allocator/build.bp
+deleted file mode 100644
+index 165b17b..0000000
+--- a/dvalin/kernel/drivers/base/protected_memory_allocator/build.bp
++++ /dev/null
+@@ -1,26 +0,0 @@
+-/*
+- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * A copy of the licence is included with the program, and can also be obtained
+- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+- * Boston, MA  02110-1301, USA.
+- */
+-
+-bob_kernel_module {
+-    name: "protected_memory_allocator",
+-    srcs: [
+-        "Kbuild",
+-        "protected_memory_allocator.c",
+-    ],
+-    kbuild_options: ["CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR=m"],
+-    defaults: ["kernel_defaults"],
+-    enabled: false,
+-    build_csf_only_module: {
+-        enabled: true,
+-    },
+-}
+diff --git a/dvalin/kernel/drivers/base/protected_memory_allocator/protected_memory_allocator.c b/dvalin/kernel/drivers/base/protected_memory_allocator/protected_memory_allocator.c
+deleted file mode 100644
+index bb0b1dd..0000000
+--- a/dvalin/kernel/drivers/base/protected_memory_allocator/protected_memory_allocator.c
++++ /dev/null
+@@ -1,308 +0,0 @@
+-/*
+- *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- */
+-
+-#include <linux/version.h>
+-#include <linux/of.h>
+-#include <linux/of_reserved_mem.h>
+-#include <linux/platform_device.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/mm.h>
+-#include <linux/io.h>
+-#include <linux/protected_memory_allocator.h>
+-
+-/**
+- * struct simple_pma_device - Simple implementation of a protected memory
+- *                            allocator device
+- *
+- * @pma_dev:        Protected memory allocator device pointer
+- * @dev:            Device pointer
+- * @alloc_pages:    Status of all the physical memory pages within the
+- *                  protected memory region; true for allocated pages
+- * @rmem_base:      Base address of the reserved memory region
+- * @rmem_size:      Size of the reserved memory region, in pages
+- * @free_pa_offset: Offset of the lowest physical address within the protected
+- *                  memory region that is currently associated with a free page
+- * @num_free_pages: Number of free pages in the memory region
+- */
+-struct simple_pma_device {
+-	struct protected_memory_allocator_device pma_dev;
+-	struct device *dev;
+-	bool *alloc_pages;
+-	phys_addr_t rmem_base;
+-	size_t rmem_size;
+-	size_t free_pa_offset;
+-	size_t num_free_pages;
+-};
+-
+-static struct protected_memory_allocation *simple_pma_alloc_page(
+-	struct protected_memory_allocator_device *pma_dev, unsigned int order)
+-{
+-	struct simple_pma_device *const epma_dev =
+-		container_of(pma_dev, struct simple_pma_device, pma_dev);
+-	struct protected_memory_allocation *pma;
+-	size_t num_pages;
+-	size_t i;
+-
+-	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, order=%u\n",
+-		__func__, (void *)pma_dev, order);
+-
+-	/* This is an example function that follows an extremely simple logic
+-	 * and is very likely to fail to allocate memory if put under stress.
+-	 *
+-	 * The simple_pma_device maintains an array of booleans to track
+-	 * the status of every page and an offset to the free page to use
+-	 * for the next allocation. The offset starts from 0 and can only grow,
+-	 * and be reset when the end of the memory region is reached.
+-	 *
+-	 * In order to create a memory allocation, the allocator simply looks
+-	 * at the offset and verifies whether there are enough free pages
+-	 * after it to accommodate the allocation request. If successful,
+-	 * the allocator shall mark all the pages as allocated and increment
+-	 * the offset accordingly.
+-	 *
+-	 * The allocator does not look for any other free pages inside the
+-	 * memory region, even if plenty of free memory is available.
+-	 * Free memory pages are counted and the offset is ignored if the
+-	 * memory region is fully allocated.
+-	 */
+-
+-	/* The only candidate for allocation is the sub-region starting
+-	 * from the free_pa_offset. Verify that enough contiguous pages
+-	 * are available and that they are all free.
+-	 */
+-	num_pages = (size_t)1 << order;
+-
+-	if (epma_dev->num_free_pages < num_pages)
+-		dev_err(epma_dev->dev, "not enough free pages\n");
+-
+-	if (epma_dev->free_pa_offset + num_pages > epma_dev->rmem_size) {
+-		dev_err(epma_dev->dev, "not enough contiguous pages\n");
+-		return NULL;
+-	}
+-
+-	for (i = 0; i < num_pages; i++)
+-		if (epma_dev->alloc_pages[epma_dev->free_pa_offset + i])
+-			break;
+-
+-	if (i < num_pages) {
+-		dev_err(epma_dev->dev, "free pages are not contiguous\n");
+-		return NULL;
+-	}
+-
+-	/* Memory allocation is successful. Mark pages as allocated.
+-	 * Update the free_pa_offset if free pages are still available:
+-	 * increment the free_pa_offset accordingly, and then making sure
+-	 * that it points at the next free page, potentially wrapping over
+-	 * the end of the memory region.
+-	 */
+-	pma = devm_kzalloc(epma_dev->dev, sizeof(*pma), GFP_KERNEL);
+-	if (!pma)
+-		return NULL;
+-
+-	pma->pa = epma_dev->rmem_base + (epma_dev->free_pa_offset << PAGE_SHIFT);
+-	pma->order = order;
+-
+-	for (i = 0; i < num_pages; i++)
+-		epma_dev->alloc_pages[epma_dev->free_pa_offset + i] = true;
+-
+-	epma_dev->num_free_pages -= num_pages;
+-
+-	if (epma_dev->num_free_pages) {
+-		epma_dev->free_pa_offset += num_pages;
+-		i = 0;
+-		while (epma_dev->alloc_pages[epma_dev->free_pa_offset + i]) {
+-			epma_dev->free_pa_offset++;
+-			if (epma_dev->free_pa_offset > epma_dev->rmem_size)
+-				epma_dev->free_pa_offset = 0;
+-		}
+-	}
+-
+-	return pma;
+-}
+-
+-static phys_addr_t simple_pma_get_phys_addr(
+-	struct protected_memory_allocator_device *pma_dev,
+-	struct protected_memory_allocation *pma)
+-{
+-	struct simple_pma_device *const epma_dev =
+-		container_of(pma_dev, struct simple_pma_device, pma_dev);
+-
+-	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
+-		__func__, (void *)pma_dev, (void *)pma, pma->pa);
+-
+-	return pma->pa;
+-}
+-
+-static void simple_pma_free_page(
+-	struct protected_memory_allocator_device *pma_dev,
+-	struct protected_memory_allocation *pma)
+-{
+-	struct simple_pma_device *const epma_dev =
+-		container_of(pma_dev, struct simple_pma_device, pma_dev);
+-	size_t num_pages;
+-	size_t offset;
+-	size_t i;
+-
+-	dev_dbg(epma_dev->dev, "%s(pma_dev=%px, pma=%px, pa=%llx\n",
+-		__func__, (void *)pma_dev, (void *)pma, pma->pa);
+-
+-	/* This is an example function that follows an extremely simple logic
+-	 * and is vulnerable to abuse. For instance, double frees won't be
+-	 * detected.
+-	 *
+-	 * If memory is full, must update the free_pa_offset that is currently
+-	 * pointing at an allocated page.
+-	 *
+-	 * Increase the number of free pages and mark them as free.
+-	 */
+-	offset = (pma->pa - epma_dev->rmem_base) >> PAGE_SHIFT;
+-	num_pages = (size_t)1 << pma->order;
+-
+-	if (epma_dev->num_free_pages == 0)
+-		epma_dev->free_pa_offset = offset;
+-
+-	epma_dev->num_free_pages += num_pages;
+-	for (i = 0; i < num_pages; i++)
+-		epma_dev->alloc_pages[offset + i] = false;
+-
+-	devm_kfree(epma_dev->dev, pma);
+-}
+-
+-static int protected_memory_allocator_probe(struct platform_device *pdev)
+-{
+-	struct simple_pma_device *epma_dev;
+-	struct device_node *np;
+-	phys_addr_t rmem_base;
+-	size_t rmem_size;
+-#if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
+-	struct reserved_mem *rmem;
+-#endif
+-
+-	np = pdev->dev.of_node;
+-
+-	if (!np) {
+-		dev_err(&pdev->dev, "device node pointer not set\n");
+-		return -ENODEV;
+-	}
+-
+-	np = of_parse_phandle(np, "memory-region", 0);
+-	if (!np) {
+-		dev_err(&pdev->dev, "memory-region node not set\n");
+-		return -ENODEV;
+-	}
+-
+-#if (KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE)
+-	rmem = of_reserved_mem_lookup(np);
+-	if (rmem) {
+-		rmem_base = rmem->base;
+-		rmem_size = rmem->size >> PAGE_SHIFT;
+-	} else
+-#endif
+-	{
+-		of_node_put(np);
+-		dev_err(&pdev->dev, "could not read reserved memory-region\n");
+-		return -ENODEV;
+-	}
+-
+-	of_node_put(np);
+-	epma_dev = devm_kzalloc(&pdev->dev, sizeof(*epma_dev), GFP_KERNEL);
+-	if (!epma_dev)
+-		return -ENOMEM;
+-
+-	epma_dev->pma_dev.ops.pma_alloc_page = simple_pma_alloc_page;
+-	epma_dev->pma_dev.ops.pma_get_phys_addr = simple_pma_get_phys_addr;
+-	epma_dev->pma_dev.ops.pma_free_page = simple_pma_free_page;
+-	epma_dev->pma_dev.owner = THIS_MODULE;
+-	epma_dev->dev = &pdev->dev;
+-	epma_dev->rmem_base = rmem_base;
+-	epma_dev->rmem_size = rmem_size;
+-	epma_dev->free_pa_offset = 0;
+-	epma_dev->num_free_pages = rmem_size;
+-
+-	epma_dev->alloc_pages =	devm_kzalloc(&pdev->dev,
+-		sizeof(bool) * epma_dev->rmem_size, GFP_KERNEL);
+-
+-	if (!epma_dev->alloc_pages) {
+-		dev_err(&pdev->dev, "failed to allocate resources\n");
+-		devm_kfree(&pdev->dev, epma_dev);
+-		return -ENOMEM;
+-	}
+-
+-	platform_set_drvdata(pdev, &epma_dev->pma_dev);
+-	dev_info(&pdev->dev,
+-		"Protected memory allocator probed successfully\n");
+-	dev_info(&pdev->dev, "Protected memory region: base=%llx num pages=%zu\n",
+-		rmem_base, rmem_size);
+-
+-	return 0;
+-}
+-
+-static int protected_memory_allocator_remove(struct platform_device *pdev)
+-{
+-	struct protected_memory_allocator_device *pma_dev =
+-		platform_get_drvdata(pdev);
+-	struct simple_pma_device *epma_dev;
+-	struct device *dev;
+-
+-	if (!pma_dev)
+-		return -EINVAL;
+-
+-	epma_dev = container_of(pma_dev, struct simple_pma_device, pma_dev);
+-	dev = epma_dev->dev;
+-
+-	if (epma_dev->num_free_pages < epma_dev->rmem_size) {
+-		dev_warn(&pdev->dev, "Leaking %zu pages of protected memory\n",
+-			epma_dev->rmem_size - epma_dev->num_free_pages);
+-	}
+-
+-	platform_set_drvdata(pdev, NULL);
+-	devm_kfree(dev, epma_dev->alloc_pages);
+-	devm_kfree(dev, epma_dev);
+-
+-	dev_info(&pdev->dev,
+-		"Protected memory allocator removed successfully\n");
+-
+-	return 0;
+-}
+-
+-static const struct of_device_id protected_memory_allocator_dt_ids[] = {
+-	{ .compatible = "arm,protected-memory-allocator" },
+-	{ /* sentinel */ }
+-};
+-MODULE_DEVICE_TABLE(of, protected_memory_allocator_dt_ids);
+-
+-static struct platform_driver protected_memory_allocator_driver = {
+-	.probe = protected_memory_allocator_probe,
+-	.remove = protected_memory_allocator_remove,
+-	.driver = {
+-		.name = "simple_protected_memory_allocator",
+-		.owner = THIS_MODULE,
+-		.of_match_table = of_match_ptr(protected_memory_allocator_dt_ids),
+-	}
+-};
+-
+-module_platform_driver(protected_memory_allocator_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR("ARM Ltd.");
+-MODULE_VERSION("1.0");
+diff --git a/dvalin/kernel/drivers/gpu/arm/Kbuild b/dvalin/kernel/drivers/gpu/arm/Kbuild
+index 1a6fa3c..52ea5fb 100644
+--- a/dvalin/kernel/drivers/gpu/arm/Kbuild
++++ b/dvalin/kernel/drivers/gpu/arm/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012, 2020-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,9 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
+-#
+-
+ 
+ obj-$(CONFIG_MALI_MIDGARD) += midgard/
+diff --git a/dvalin/kernel/drivers/gpu/arm/Kconfig b/dvalin/kernel/drivers/gpu/arm/Kconfig
+index 693b86f..2da8c98 100644
+--- a/dvalin/kernel/drivers/gpu/arm/Kconfig
++++ b/dvalin/kernel/drivers/gpu/arm/Kconfig
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012, 2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,10 +16,7 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
+-#
+-
+ 
+ menu "ARM GPU Configuration"
+ source "drivers/gpu/arm/midgard/Kconfig"
+diff --git a/dvalin/kernel/drivers/base/protected_memory_allocator/Kbuild b/dvalin/kernel/drivers/gpu/arm/Makefile
+similarity index 77%
+rename from dvalin/kernel/drivers/base/protected_memory_allocator/Kbuild
+rename to dvalin/kernel/drivers/gpu/arm/Makefile
+index 241aeb9..ea9ecc7 100644
+--- a/dvalin/kernel/drivers/base/protected_memory_allocator/Kbuild
++++ b/dvalin/kernel/drivers/gpu/arm/Makefile
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,8 +16,6 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+-obj-$(CONFIG_MALI_PROTECTED_MEMORY_ALLOCATOR) := protected_memory_allocator.o
+\ No newline at end of file
++include midgard/Makefile
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/Kbuild b/dvalin/kernel/drivers/gpu/arm/midgard/Kbuild
+index fa52548..d3c4ee1 100755
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/Kbuild
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,202 +16,240 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
++
++# make $(src) as absolute path if it is not already, by prefixing $(srctree)
++# This is to prevent any build issue due to wrong path.
++src:=$(if $(patsubst /%,,$(src)),$(srctree)/$(src),$(src))
++
+ #
++# Prevent misuse when Kernel configurations are not present by default
++# in out-of-tree builds
++#
++ifneq ($(CONFIG_ANDROID),n)
++ifeq ($(CONFIG_GPU_TRACEPOINTS),n)
++    $(error CONFIG_GPU_TRACEPOINTS must be set in Kernel configuration)
++endif
++endif
+ 
++ifeq ($(CONFIG_DMA_SHARED_BUFFER),n)
++    $(error CONFIG_DMA_SHARED_BUFFER must be set in Kernel configuration)
++endif
+ 
+-# Driver version string which is returned to userspace via an ioctl
+-MALI_RELEASE_NAME ?= "r25p0-01rel0"
++ifeq ($(CONFIG_PM_DEVFREQ),n)
++    $(error CONFIG_PM_DEVFREQ must be set in Kernel configuration)
++endif
+ 
+-# Paths required for build
+-# make $(src) as absolute path if it isn't already, by prefixing $(srctree)
+-src:=$(if $(patsubst /%,,$(src)),$(srctree)/$(src),$(src))
+-KBASE_PATH = $(src)
+-KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
+-UMP_PATH = $(src)/../../../base
++ifeq ($(CONFIG_DEVFREQ_THERMAL),n)
++    $(error CONFIG_DEVFREQ_THERMAL must be set in Kernel configuration)
++endif
++
++ifeq ($(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND),n)
++    $(error CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND must be set in Kernel configuration)
++endif
++
++ifeq ($(CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS), y)
++    ifneq ($(CONFIG_DEBUG_FS), y)
++        $(error CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS depends on CONFIG_DEBUG_FS to be set in Kernel configuration)
++    endif
++endif
++
++ifeq ($(CONFIG_MALI_FENCE_DEBUG), y)
++    ifneq ($(CONFIG_SYNC), y)
++        ifneq ($(CONFIG_SYNC_FILE), y)
++            $(error CONFIG_MALI_FENCE_DEBUG depends on CONFIG_SYNC || CONFIG_SYNC_FILE to be set in Kernel configuration)
++        endif
++    endif
++endif
+ 
++#
++# Configurations
++#
++
++# Driver version string which is returned to userspace via an ioctl
++MALI_RELEASE_NAME ?= '"r32p1-01bet0"'
+ # Set up defaults if not defined by build system
+-MALI_CUSTOMER_RELEASE ?= 1
+-MALI_USE_CSF ?= 0
+-MALI_UNIT_TEST ?= 0
+-MALI_KERNEL_TEST_API ?= 0
++ifeq ($(CONFIG_MALI_DEBUG), y)
++    MALI_UNIT_TEST = 1
++    MALI_CUSTOMER_RELEASE ?= 0
++else
++    MALI_UNIT_TEST ?= 0
++    MALI_CUSTOMER_RELEASE ?= 1
++endif
+ MALI_COVERAGE ?= 0
++
+ CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
++
++# Kconfig passes in the name with quotes for in-tree builds - remove them.
++MALI_PLATFORM_DIR := $(shell echo $(CONFIG_MALI_PLATFORM_NAME))
++
++ifeq ($(CONFIG_MALI_CSF_SUPPORT),y)
++    MALI_JIT_PRESSURE_LIMIT_BASE = 0
++    MALI_USE_CSF = 1
++else
++    MALI_JIT_PRESSURE_LIMIT_BASE ?= 1
++    MALI_USE_CSF ?= 0
++endif
++
++ifneq ($(CONFIG_MALI_KUTF), n)
++    MALI_KERNEL_TEST_API ?= 1
++else
++    MALI_KERNEL_TEST_API ?= 0
++endif
++
+ # Experimental features (corresponding -D definition should be appended to
+-# DEFINES below, e.g. for MALI_EXPERIMENTAL_FEATURE,
++# ccflags-y below, e.g. for MALI_EXPERIMENTAL_FEATURE,
+ # -DMALI_EXPERIMENTAL_FEATURE=$(MALI_EXPERIMENTAL_FEATURE) should be appended)
+ #
+ # Experimental features must default to disabled, e.g.:
+ # MALI_EXPERIMENTAL_FEATURE ?= 0
+-MALI_JIT_PRESSURE_LIMIT ?= 0
+ MALI_INCREMENTAL_RENDERING ?= 0
+ 
+-# Set up our defines, which will be passed to gcc
+-DEFINES = \
+-	-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+-	-DMALI_USE_CSF=$(MALI_USE_CSF) \
+-	-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
+-	-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
+-	-DMALI_COVERAGE=$(MALI_COVERAGE) \
+-	-DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
+-	-DMALI_JIT_PRESSURE_LIMIT=$(MALI_JIT_PRESSURE_LIMIT) \
+-	-DMALI_INCREMENTAL_RENDERING=$(MALI_INCREMENTAL_RENDERING)
++#
++# ccflags
++#
++ccflags-y = \
++    -DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
++    -DMALI_USE_CSF=$(MALI_USE_CSF) \
++    -DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
++    -DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
++    -DMALI_COVERAGE=$(MALI_COVERAGE) \
++    -DMALI_RELEASE_NAME=$(MALI_RELEASE_NAME) \
++    -DMALI_JIT_PRESSURE_LIMIT_BASE=$(MALI_JIT_PRESSURE_LIMIT_BASE) \
++    -DMALI_INCREMENTAL_RENDERING=$(MALI_INCREMENTAL_RENDERING) \
++    -DMALI_KBASE_BUILD \
++    -DMALI_PLATFORM_DIR=$(MALI_PLATFORM_DIR)
++
+ 
+ ifeq ($(KBUILD_EXTMOD),)
+ # in-tree
+-DEFINES +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
++    ccflags-y +=-DMALI_KBASE_PLATFORM_PATH=../../$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+ else
+ # out-of-tree
+-DEFINES +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+-endif
+-
+-DEFINES += -I$(srctree)/drivers/staging/android
+-#meson graphics start
+-ldflags-y += --strip-debug
+-#meson graphics end
+-
+-DEFINES += -DMALI_KBASE_BUILD
+-
+-# Use our defines when compiling
+-ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+-subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH)   -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
+-
+-SRC := \
+-	context/mali_kbase_context.c \
+-	debug/mali_kbase_debug_ktrace.c \
+-	device/mali_kbase_device.c \
+-	mali_kbase_cache_policy.c \
+-	mali_kbase_mem.c \
+-	mali_kbase_mem_pool_group.c \
+-	mali_kbase_native_mgm.c \
+-	mali_kbase_ctx_sched.c \
+-	mali_kbase_jm.c \
+-	mali_kbase_gpuprops.c \
+-	mali_kbase_pm.c \
+-	mali_kbase_config.c \
+-	mali_kbase_vinstr.c \
+-	mali_kbase_hwcnt.c \
+-	mali_kbase_hwcnt_backend_gpu.c \
+-	mali_kbase_hwcnt_gpu.c \
+-	mali_kbase_hwcnt_legacy.c \
+-	mali_kbase_hwcnt_types.c \
+-	mali_kbase_hwcnt_virtualizer.c \
+-	mali_kbase_softjobs.c \
+-	mali_kbase_hw.c \
+-	mali_kbase_debug.c \
+-	mali_kbase_gpu_memory_debugfs.c \
+-	mali_kbase_mem_linux.c \
+-	mali_kbase_core_linux.c \
+-	mali_kbase_mem_profile_debugfs.c \
+-	mmu/mali_kbase_mmu.c \
+-	mmu/mali_kbase_mmu_hw_direct.c \
+-	mmu/mali_kbase_mmu_mode_lpae.c \
+-	mmu/mali_kbase_mmu_mode_aarch64.c \
+-	mali_kbase_disjoint_events.c \
+-	mali_kbase_debug_mem_view.c \
+-	mali_kbase_smc.c \
+-	mali_kbase_mem_pool.c \
+-	mali_kbase_mem_pool_debugfs.c \
+-	mali_kbase_debugfs_helper.c \
+-	mali_kbase_strings.c \
+-	mali_kbase_as_fault_debugfs.c \
+-	mali_kbase_regs_history_debugfs.c \
+-	thirdparty/mali_kbase_mmap.c \
+-	tl/mali_kbase_timeline.c \
+-	tl/mali_kbase_timeline_io.c \
+-	tl/mali_kbase_tlstream.c \
+-	tl/mali_kbase_tracepoints.c \
+-	gpu/mali_kbase_gpu.c
+-
+-ifeq ($(MALI_USE_CSF),1)
+-	SRC += \
+-		debug/backend/mali_kbase_debug_ktrace_csf.c \
+-		device/backend/mali_kbase_device_csf.c \
+-		gpu/backend/mali_kbase_gpu_fault_csf.c \
+-		tl/backend/mali_kbase_timeline_csf.c \
+-		mmu/backend/mali_kbase_mmu_csf.c \
+-		context/backend/mali_kbase_context_csf.c
+-else
+-	SRC += \
+-		mali_kbase_dummy_job_wa.c \
+-		mali_kbase_debug_job_fault.c \
+-		mali_kbase_event.c \
+-		mali_kbase_jd.c \
+-		mali_kbase_jd_debugfs.c \
+-		mali_kbase_js.c \
+-		mali_kbase_js_ctx_attr.c \
+-		debug/backend/mali_kbase_debug_ktrace_jm.c \
+-		device/backend/mali_kbase_device_jm.c \
+-		gpu/backend/mali_kbase_gpu_fault_jm.c \
+-		tl/backend/mali_kbase_timeline_jm.c \
+-		mmu/backend/mali_kbase_mmu_jm.c \
+-		context/backend/mali_kbase_context_jm.c
++    ccflags-y +=-DMALI_KBASE_PLATFORM_PATH=$(src)/platform/$(CONFIG_MALI_PLATFORM_NAME)
+ endif
+ 
+-ifeq ($(CONFIG_MALI_CINSTR_GWT),y)
+-	SRC += mali_kbase_gwt.c
+-endif
++ccflags-y += \
++    -I$(srctree)/include/linux \
++    -I$(srctree)/drivers/staging/android \
++    -I$(src) \
++    -I$(src)/platform/$(MALI_PLATFORM_DIR) \
++    -I$(src)/../../../base \
++    -I$(src)/../../../../include
+ 
+-ifeq ($(MALI_UNIT_TEST),1)
+-	SRC += tl/mali_kbase_timeline_test.c
+-endif
++subdir-ccflags-y += $(ccflags-y)
+ 
+-ifeq ($(MALI_CUSTOMER_RELEASE),0)
+-	SRC += mali_kbase_regs_dump_debugfs.c
+-endif
++#
++# Kernel Modules
++#
++obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
++obj-$(CONFIG_MALI_ARBITRATION) += arbitration/
++obj-$(CONFIG_MALI_KUTF)    += tests/
++
++mali_kbase-y := \
++    mali_kbase_cache_policy.o \
++    mali_kbase_ccswe.o \
++    mali_kbase_mem.o \
++    mali_kbase_mem_pool_group.o \
++    mali_kbase_native_mgm.o \
++    mali_kbase_ctx_sched.o \
++    mali_kbase_gpuprops.o \
++    mali_kbase_pm.o \
++    mali_kbase_config.o \
++    mali_kbase_vinstr.o \
++    mali_kbase_hwcnt.o \
++    mali_kbase_hwcnt_gpu.o \
++    mali_kbase_hwcnt_legacy.o \
++    mali_kbase_hwcnt_types.o \
++    mali_kbase_hwcnt_virtualizer.o \
++    mali_kbase_softjobs.o \
++    mali_kbase_hw.o \
++    mali_kbase_debug.o \
++    mali_kbase_gpu_memory_debugfs.o \
++    mali_kbase_mem_linux.o \
++    mali_kbase_core_linux.o \
++    mali_kbase_mem_profile_debugfs.o \
++    mali_kbase_disjoint_events.o \
++    mali_kbase_debug_mem_view.o \
++    mali_kbase_smc.o \
++    mali_kbase_mem_pool.o \
++    mali_kbase_mem_pool_debugfs.o \
++    mali_kbase_debugfs_helper.o \
++    mali_kbase_strings.o \
++    mali_kbase_as_fault_debugfs.o \
++    mali_kbase_regs_history_debugfs.o \
++    mali_kbase_dvfs_debugfs.o \
++    mali_power_gpu_frequency_trace.o \
++    mali_kbase_trace_gpu_mem.o
++
++mali_kbase-$(CONFIG_MALI_CINSTR_GWT) += mali_kbase_gwt.o
+ 
++mali_kbase-$(CONFIG_SYNC) += \
++    mali_kbase_sync_android.o \
++    mali_kbase_sync_common.o
+ 
+-ccflags-y += -I$(KBASE_PATH) -I$(KBASE_PATH)/debug \
+-	-I$(KBASE_PATH)/debug/backend
++mali_kbase-$(CONFIG_SYNC_FILE) += \
++    mali_kbase_fence_ops.o \
++    mali_kbase_sync_file.o \
++    mali_kbase_sync_common.o
++
++ifeq ($(CONFIG_MALI_CSF_SUPPORT),y)
++    mali_kbase-y += \
++        mali_kbase_hwcnt_backend_csf.o \
++        mali_kbase_hwcnt_backend_csf_if_fw.o
++else
++    mali_kbase-y += \
++        mali_kbase_jm.o \
++        mali_kbase_hwcnt_backend_jm.o \
++        mali_kbase_dummy_job_wa.o \
++        mali_kbase_debug_job_fault.o \
++        mali_kbase_event.o \
++        mali_kbase_jd.o \
++        mali_kbase_jd_debugfs.o \
++        mali_kbase_js.o \
++        mali_kbase_js_ctx_attr.o \
++        mali_kbase_kinstr_jm.o
++
++    mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
++        mali_kbase_fence_ops.o \
++        mali_kbase_dma_fence.o \
++        mali_kbase_fence.o
++
++    mali_kbase-$(CONFIG_SYNC_FILE) += \
++        mali_kbase_fence_ops.o \
++        mali_kbase_fence.o
++endif
+ 
+-# Tell the Linux build system from which .o file to create the kernel module
+-obj-$(CONFIG_MALI_MIDGARD) += mali_kbase.o
+ 
+-# Tell the Linux build system to enable building of our .c files
+-mali_kbase-y := $(SRC:.c=.o)
++INCLUDE_SUBDIR = \
++    $(src)/context/Kbuild \
++    $(src)/debug/Kbuild \
++    $(src)/device/Kbuild \
++    $(src)/backend/gpu/Kbuild \
++    $(src)/mmu/Kbuild \
++    $(src)/tl/Kbuild \
++    $(src)/gpu/Kbuild \
++    $(src)/thirdparty/Kbuild \
++    $(src)/platform/$(MALI_PLATFORM_DIR)/Kbuild
+ 
+-# Kconfig passes in the name with quotes for in-tree builds - remove them.
+-platform_name := $(shell echo $(CONFIG_MALI_PLATFORM_NAME))
+-MALI_PLATFORM_DIR := platform/$(platform_name)
+-ccflags-y += -I$(src)/$(MALI_PLATFORM_DIR)
+-include $(src)/$(MALI_PLATFORM_DIR)/Kbuild
++ifeq ($(CONFIG_MALI_CSF_SUPPORT),y)
++    INCLUDE_SUBDIR += $(src)/csf/Kbuild
++endif
+ 
+-ifeq ($(CONFIG_MALI_DEVFREQ),y)
+-  ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
+-    include $(src)/ipa/Kbuild
+-  endif
++ifeq ($(CONFIG_MALI_ARBITER_SUPPORT),y)
++    INCLUDE_SUBDIR += $(src)/arbiter/Kbuild
+ endif
+ 
+-ifeq ($(MALI_USE_CSF),1)
+-	include $(src)/csf/Kbuild
+-else
+-# empty
++ifeq ($(CONFIG_MALI_DEVFREQ),y)
++    ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
++        INCLUDE_SUBDIR += $(src)/ipa/Kbuild
++    endif
+ endif
+ 
+-ifeq ($(CONFIG_MALI_ARBITER_SUPPORT),y)
+-	include $(src)/arbiter/Kbuild
++ifeq ($(KBUILD_EXTMOD),)
++# in-tree
++    -include $(INCLUDE_SUBDIR)
+ else
+-# empty
++# out-of-tree
++    include $(INCLUDE_SUBDIR)
+ endif
+-
+-mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
+-	mali_kbase_dma_fence.o \
+-	mali_kbase_fence.o
+-mali_kbase-$(CONFIG_SYNC) += \
+-	mali_kbase_sync_android.o \
+-	mali_kbase_sync_common.o
+-mali_kbase-$(CONFIG_SYNC_FILE) += \
+-	mali_kbase_sync_file.o \
+-	mali_kbase_sync_common.o \
+-	mali_kbase_fence.o
+-
+-include  $(src)/backend/gpu/Kbuild
+-mali_kbase-y += $(BACKEND:.c=.o)
+-
+-
+-ccflags-y += -I$(src)/backend/gpu
+-subdir-ccflags-y += -I$(src)/backend/gpu
+-
+-# For kutf and mali_kutf_irq_latency_test
+-obj-$(CONFIG_MALI_KUTF) += tests/
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/Kconfig b/dvalin/kernel/drivers/gpu/arm/midgard/Kconfig
+index ca59dbb..5541383 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/Kconfig
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/Kconfig
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,15 +16,14 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
+-#
+-
+ 
+ menuconfig MALI_MIDGARD
+ 	tristate "Mali Midgard series support"
+ 	select GPU_TRACEPOINTS if ANDROID
+ 	select DMA_SHARED_BUFFER
++	select PM_DEVFREQ
++	select DEVFREQ_THERMAL
+ 	default n
+ 	help
+ 	  Enable this option to build support for a ARM Mali Midgard GPU.
+@@ -31,13 +31,43 @@ menuconfig MALI_MIDGARD
+ 	  To compile this driver as a module, choose M here:
+ 	  this will generate a single module, called mali_kbase.
+ 
+-config MALI_GATOR_SUPPORT
+-	bool "Enable Streamline tracing support"
++if MALI_MIDGARD
++
++config MALI_PLATFORM_NAME
+ 	depends on MALI_MIDGARD
++	string "Platform name"
++	default "devicetree"
++	help
++	  Enter the name of the desired platform configuration directory to
++	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Makefile' must
++	  exist.
++
++config MALI_REAL_HW
++	depends on MALI_MIDGARD
++	def_bool !MALI_NO_MALI
++
++menu "Platform specific options"
++source "drivers/gpu/arm/midgard/platform/Kconfig"
++endmenu
++
++config MALI_CSF_SUPPORT
++	bool "Enable Mali CSF based GPU support"
++	depends on MALI_MIDGARD=m
++	default n
++	help
++	  Enables support for CSF based GPUs.
++
++config MALI_DEVFREQ
++	bool "Enable devfreq support for Mali"
++	depends on MALI_MIDGARD && PM_DEVFREQ
++	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ 	default y
+ 	help
+-	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+-	  The tracepoints are used to derive GPU activity charts in Streamline.
++	  Support devfreq for Mali.
++
++	  Using the devfreq framework and, by default, the simple on-demand
++	  governor, the frequency of Mali will be dynamically selected from the
++	  available OPPs.
+ 
+ config MALI_MIDGARD_DVFS
+ 	bool "Enable legacy DVFS"
+@@ -46,28 +76,25 @@ config MALI_MIDGARD_DVFS
+ 	help
+ 	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+ 
++config MALI_GATOR_SUPPORT
++	bool "Enable Streamline tracing support"
++	depends on MALI_MIDGARD
++	default y
++	help
++	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
++	  The tracepoints are used to derive GPU activity charts in Streamline.
++
+ config MALI_MIDGARD_ENABLE_TRACE
+ 	bool "Enable kbase tracing"
+ 	depends on MALI_MIDGARD
+ 	default y if MALI_DEBUG
+ 	default n
+ 	help
+-	  Enables tracing in kbase.  Trace log available through
++	  Enables tracing in kbase. Trace log available through
+ 	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+ 
+-config MALI_DEVFREQ
+-	bool "devfreq support for Mali"
+-	depends on MALI_MIDGARD && PM_DEVFREQ
+-	default y
+-	help
+-	  Support devfreq for Mali.
+-
+-	  Using the devfreq framework and, by default, the simpleondemand
+-	  governor, the frequency of Mali will be dynamically selected from the
+-	  available OPPs.
+-
+ config MALI_DMA_FENCE
+-	bool "DMA_BUF fence support for Mali"
++	bool "Enable DMA_BUF fence support for Mali"
+ 	depends on MALI_MIDGARD
+ 	default n
+ 	help
+@@ -76,18 +103,9 @@ config MALI_DMA_FENCE
+ 	  This option should only be enabled if the Linux Kernel has built in
+ 	  support for DMA_BUF fences.
+ 
+-config MALI_PLATFORM_NAME
+-	depends on MALI_MIDGARD
+-	string "Platform name"
+-	default "devicetree"
+-	help
+-	  Enter the name of the desired platform configuration directory to
+-	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+-	  exist.
+-
+ config MALI_ARBITER_SUPPORT
+ 	bool "Enable arbiter support for Mali"
+-	depends on MALI_MIDGARD
++	depends on MALI_MIDGARD && !MALI_CSF_SUPPORT
+ 	default n
+ 	help
+ 	  Enable support for the arbiter interface in the driver.
+@@ -96,18 +114,64 @@ config MALI_ARBITER_SUPPORT
+ 
+ 	  If unsure, say N.
+ 
+-# MALI_EXPERT configuration options
++config MALI_DMA_BUF_MAP_ON_DEMAND
++	bool "Enable map imported dma-bufs on demand"
++	depends on MALI_MIDGARD
++	default n
++	help
++	  This option caused kbase to set up the GPU mapping of imported
++	  dma-buf when needed to run atoms. This is the legacy behavior.
++
++	  This is intended for testing and the option will get removed in the
++	  future.
++
++config MALI_DMA_BUF_LEGACY_COMPAT
++	bool "Enable legacy compatibility cache flush on dma-buf map"
++	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
++	default n
++	help
++	  This option enables compatibility with legacy dma-buf mapping
++	  behavior, then the dma-buf is mapped on import, by adding cache
++	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
++	  including a cache flush.
++
++	  This option might work-around issues related to missing cache
++	  flushes in other drivers. This only has an effect for clients using
++	  UK 11.18 or older. For later UK versions it is not possible.
+ 
+ menuconfig MALI_EXPERT
+ 	depends on MALI_MIDGARD
+ 	bool "Enable Expert Settings"
+ 	default n
+ 	help
+-	  Enabling this option and modifying the default settings may produce a driver with performance or
+-	  other limitations.
++	  Enabling this option and modifying the default settings may produce
++	  a driver with performance or other limitations.
++
++if MALI_EXPERT
++
++config MALI_2MB_ALLOC
++	bool "Attempt to allocate 2MB pages"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default n
++	help
++	  Rather than allocating all GPU memory page-by-page, attempt to
++	  allocate 2MB pages from the kernel. This reduces TLB pressure and
++	  helps to prevent memory fragmentation.
++
++	  If in doubt, say N
++
++config MALI_MEMORY_FULLY_BACKED
++	bool "Enable memory fully physically-backed"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default n
++	help
++	  This option enables full physical backing of all virtual
++	  memory allocations in the kernel. Notice that this build
++	  option only affects allocations of grow-on-GPU-page-fault
++	  memory.
+ 
+ config MALI_CORESTACK
+-	bool "Support controlling power to the GPU core stack"
++	bool "Enable support of GPU core stack power control"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+@@ -119,15 +183,48 @@ config MALI_CORESTACK
+ 
+ 	  If unsure, say N.
+ 
++comment "Platform options"
++	depends on MALI_MIDGARD && MALI_EXPERT
++
++config MALI_NO_MALI
++	bool "Enable No Mali"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default n
++	help
++	  This can be used to test the driver in a simulated environment
++	  whereby the hardware is not physically present. If the hardware is physically
++	  present it will not be used. This can be used to test the majority of the
++	  driver without needing actual hardware or for software benchmarking.
++	  All calls to the simulated hardware will complete immediately as if the hardware
++	  completed the task.
++
++config MALI_ERROR_INJECT
++	bool "Enable No Mali error injection"
++	depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
++	default n
++	help
++	  Enables insertion of errors to test module failure and recovery mechanisms.
++
++config MALI_GEM5_BUILD
++	bool "Enable build of Mali kernel driver for GEM5"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default n
++	help
++	  This option is to do a Mali GEM5 build.
++	  If unsure, say N.
++
++comment "Debug options"
++	depends on MALI_MIDGARD && MALI_EXPERT
++
+ config MALI_DEBUG
+-	bool "Debug build"
++	bool "Enable debug build"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+ 	  Select this option for increased checking and reporting of errors.
+ 
+ config MALI_FENCE_DEBUG
+-	bool "Debug sync fence usage"
++	bool "Enable debug sync fence usage"
+ 	depends on MALI_MIDGARD && MALI_EXPERT && (SYNC || SYNC_FILE)
+ 	default y if MALI_DEBUG
+ 	help
+@@ -143,28 +240,6 @@ config MALI_FENCE_DEBUG
+ 	  The timeout can be changed at runtime through the js_soft_timeout
+ 	  device attribute, where the timeout is specified in milliseconds.
+ 
+-config MALI_NO_MALI
+-	bool "No Mali"
+-	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+-	help
+-	  This can be used to test the driver in a simulated environment
+-	  whereby the hardware is not physically present. If the hardware is physically
+-	  present it will not be used. This can be used to test the majority of the
+-	  driver without needing actual hardware or for software benchmarking.
+-	  All calls to the simulated hardware will complete immediately as if the hardware
+-	  completed the task.
+-
+-config MALI_REAL_HW
+-	def_bool !MALI_NO_MALI
+-
+-config MALI_ERROR_INJECT
+-	bool "Error injection"
+-	depends on MALI_MIDGARD && MALI_EXPERT && MALI_NO_MALI
+-	default n
+-	help
+-	  Enables insertion of errors to test module failure and recovery mechanisms.
+-
+ config MALI_SYSTEM_TRACE
+ 	bool "Enable system event tracing support"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+@@ -176,63 +251,93 @@ config MALI_SYSTEM_TRACE
+ 	  minimal overhead when not in use. Enable only if you know what
+ 	  you are doing.
+ 
+-config MALI_2MB_ALLOC
+-	bool "Attempt to allocate 2MB pages"
++comment "Instrumentation options"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+-	help
+-	  Rather than allocating all GPU memory page-by-page, attempt to
+-	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+-	  helps to prevent memory fragmentation.
+ 
+-	  If in doubt, say N
++choice
++	prompt "Select Performance counters set"
++	default MALI_PRFCNT_SET_PRIMARY
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 
+-config MALI_PWRSOFT_765
+-	bool "PWRSOFT-765 ticket"
++config MALI_PRFCNT_SET_PRIMARY
++	bool "Primary"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+ 	help
+-	  PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
+-	  in kernel v4.10, however if backported into the kernel then this
+-	  option must be manually selected.
++	  Select this option to use primary set of performance counters.
+ 
+-	  If using kernel >= v4.10 then say N, otherwise if devfreq cooling
+-	  changes have been backported say Y to avoid compilation errors.
++config MALI_PRFCNT_SET_SECONDARY
++	bool "Secondary"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	help
++	  Select this option to use secondary set of performance counters. Kernel
++	  features that depend on an access to the primary set of counters may
++	  become unavailable. Enabling this option will prevent power management
++	  from working optimally and may cause instrumentation tools to return
++	  bogus results.
+ 
+-config MALI_MEMORY_FULLY_BACKED
+-	bool "Memory fully physically-backed"
++	  If unsure, use MALI_PRFCNT_SET_PRIMARY.
++
++config MALI_PRFCNT_SET_TERTIARY
++	bool "Tertiary"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
++	help
++	  Select this option to use tertiary set of performance counters. Kernel
++	  features that depend on an access to the primary set of counters may
++	  become unavailable. Enabling this option will prevent power management
++	  from working optimally and may cause instrumentation tools to return
++	  bogus results.
++
++	  If unsure, use MALI_PRFCNT_SET_PRIMARY.
++
++endchoice
++
++config MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
++	bool "Enable runtime selection of performance counters set via debugfs"
++	depends on MALI_MIDGARD && MALI_EXPERT && DEBUG_FS
+ 	default n
+ 	help
+-	  This option enables full physical backing of all virtual
+-	  memory allocations in the kernel. Notice that this build
+-	  option only affects allocations of grow-on-GPU-page-fault
+-	  memory.
++	  Select this option to make the secondary set of performance counters
++	  available at runtime via debugfs. Kernel features that depend on an
++	  access to the primary set of counters may become unavailable.
+ 
+-config MALI_DMA_BUF_MAP_ON_DEMAND
+-	bool "Map imported dma-bufs on demand"
+-	depends on MALI_MIDGARD
++	  If no runtime debugfs option is set, the build time counter set
++	  choice will be used.
++
++	  This feature is unsupported and unstable, and may break at any time.
++	  Enabling this option will prevent power management from working
++	  optimally and may cause instrumentation tools to return bogus results.
++
++	  No validation is done on the debugfs input. Invalid input could cause
++	  performance counter errors. Valid inputs are the values accepted by
++	  the SET_SELECT bits of the PRFCNT_CONFIG register as defined in the
++	  architecture specification.
++
++	  If unsure, say N.
++
++config MALI_JOB_DUMP
++	bool "Enable system level support needed for job dumping"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+-	  This option caused kbase to set up the GPU mapping of imported
+-	  dma-buf when needed to run atoms.  This is the legacy behaviour.
++	  Choose this option to enable system level support needed for
++	  job dumping. This is typically used for instrumentation but has
++	  minimal overhead when not in use. Enable only if you know what
++	  you are doing.
+ 
+-	  This is intended for testing and the option will get removed in the
+-	  future.
++comment "Workarounds"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 
+-config MALI_DMA_BUF_LEGACY_COMPAT
+-	bool "Enable legacy compatibility cache flush on dma-buf map"
+-	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
++config MALI_PWRSOFT_765
++	bool "Enable workaround for PWRSOFT-765"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+-	  This option enables compatibility with legacy dma-buf mapping
+-	  behavior, then the dma-buf is mapped on import, by adding cache
+-	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+-	  including a cache flush.
++	  PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
++	  in kernel v4.10, however if backported into the kernel then this
++	  option must be manually selected.
+ 
+-	  This option might work-around issues related to missing cache
+-	  flushes in other drivers. This only has an effect for clients using
+-	  UK 11.18 or older. For later UK versions it is not possible.
++	  If using kernel >= v4.10 then say N, otherwise if devfreq cooling
++	  changes have been backported say Y to avoid compilation errors.
+ 
+ config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ 	bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+@@ -252,58 +357,22 @@ config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+ 	default n
+ 	help
+ 	  This option uses an alternative workaround for GPU2017-1336. Lowering
+-	  the GPU clock to a, platform specific, known good frequeuncy before
++	  the GPU clock to a, platform specific, known good frequency before
+ 	  powering down the L2 cache. The clock can be specified in the device
+ 	  tree using the property, opp-mali-errata-1485982. Otherwise the
+ 	  slowest clock will be selected.
+ 
+-config MALI_GEM5_BUILD
+-	bool "Enable build of Mali kernel driver for GEM5"
+-	depends on MALI_MIDGARD
+-	default n
+-	help
+-	  This option is to do a Mali GEM5 build.
+-	  If unsure, say N.
+-
+-# Instrumentation options.
++endif
+ 
+-config MALI_JOB_DUMP
+-	bool "Enable system level support needed for job dumping"
+-	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+-	help
+-	  Choose this option to enable system level support needed for
+-	  job dumping. This is typically used for instrumentation but has
+-	  minimal overhead when not in use. Enable only if you know what
+-	  you are doing.
+-
+-config MALI_PRFCNT_SET_SECONDARY
+-	bool "Use secondary set of performance counters"
+-	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+-	help
+-	  Select this option to use secondary set of performance counters. Kernel
+-	  features that depend on an access to the primary set of counters may
+-	  become unavailable. Enabling this option will prevent power management
+-	  from working optimally and may cause instrumentation tools to return
+-	  bogus results.
+-
+-	  If unsure, say N.
+-
+-config MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
+-	bool "Use secondary set of performance counters"
+-	depends on MALI_MIDGARD && MALI_EXPERT && !MALI_PRFCNT_SET_SECONDARY && DEBUG_FS
++config MALI_ARBITRATION
++	bool "Enable Virtualization reference code"
++	depends on MALI_MIDGARD
+ 	default n
+ 	help
+-	  Select this option to make the secondary set of performance counters
+-	  available at runtime via debugfs. Kernel features that depend on an
+-	  access to the primary set of counters may become unavailable.
+-
+-	  This feature is unsupported and unstable, and may break at any time.
+-	  Enabling this option will prevent power management from working
+-	  optimally and may cause instrumentation tools to return bogus results.
+-
++	  Enables the build of several reference modules used in the reference
++	  virtualization setup for Mali
+ 	  If unsure, say N.
+ 
+-source "drivers/gpu/arm/midgard/platform/Kconfig"
+ source "drivers/gpu/arm/midgard/tests/Kconfig"
++
++endif
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/Makefile b/dvalin/kernel/drivers/gpu/arm/midgard/Makefile
+index 53a1209..4384e80 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/Makefile
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/Makefile
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,24 +16,200 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+ #
++
++KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
++KDIR ?= $(KERNEL_SRC)
++
++ifeq ($(KDIR),)
++    $(error Must specify KDIR to point to the kernel to target))
++endif
++
++#
++# Default configuration values
++#
++# Dependency resolution is done through statements as Kconfig
++# is not supported for out-of-tree builds.
+ #
+ 
++CONFIG_MALI_MIDGARD ?= m
++ifeq ($(CONFIG_MALI_MIDGARD),m)
++    CONFIG_MALI_GATOR_SUPPORT ?= y
++    CONFIG_MALI_ARBITRATION ?= n
++    CONFIG_MALI_PARTITION_MANAGER ?= n
++
++    ifneq ($(CONFIG_MALI_NO_MALI),y)
++        # Prevent misuse when CONFIG_MALI_NO_MALI=y
++        CONFIG_MALI_REAL_HW ?= y
++    endif
++
++    ifeq ($(CONFIG_MALI_MIDGARD_DVFS),y)
++        # Prevent misuse when CONFIG_MALI_MIDGARD_DVFS=y
++        CONFIG_MALI_DEVFREQ ?= n
++    else
++        CONFIG_MALI_DEVFREQ ?= y
++    endif
++
++    ifeq ($(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND), y)
++        # Prevent misuse when CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND=y
++        CONFIG_MALI_DMA_BUF_LEGACY_COMPAT = n
++    endif
++
++    ifeq ($(CONFIG_BSP_HAS_HYPERVISOR),y)
++        ifneq ($(CONFIG_MALI_ARBITRATION), n)
++            CONFIG_MALI_XEN ?= m
++        endif
++    endif
++
++    #
++    # Expert/Debug/Test released configurations
++    #
++    ifeq ($(CONFIG_MALI_EXPERT), y)
++        ifeq ($(CONFIG_MALI_NO_MALI), y)
++            CONFIG_MALI_REAL_HW = n
++        else
++            # Prevent misuse when CONFIG_MALI_NO_MALI=n
++            CONFIG_MALI_REAL_HW = y
++            CONFIG_MALI_ERROR_INJECT = n
++        endif
++
++        ifeq ($(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED), y)
++            # Prevent misuse when CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED=y
++            CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE = n
++        endif
+ 
+-KDIR ?= /lib/modules/$(shell uname -r)/build
++        ifeq ($(CONFIG_MALI_DEBUG), y)
++            CONFIG_MALI_MIDGARD_ENABLE_TRACE ?= y
++            CONFIG_MALI_SYSTEM_TRACE ?= y
+ 
+-BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
+-KBASE_PATH_RELATIVE = $(CURDIR)
++            ifeq ($(CONFIG_SYNC), y)
++                CONFIG_MALI_FENCE_DEBUG ?= y
++            else
++                ifeq ($(CONFIG_SYNC_FILE), y)
++                    CONFIG_MALI_FENCE_DEBUG ?= y
++                else
++                    CONFIG_MALI_FENCE_DEBUG = n
++                endif
++            endif
++        else
++            # Prevent misuse when CONFIG_MALI_DEBUG=n
++            CONFIG_MALI_MIDGARD_ENABLE_TRACE = n
++            CONFIG_MALI_SYSTEM_TRACE = n
++            CONFIG_MALI_FENCE_DEBUG = n
++        endif
++    else
++        # Prevent misuse when CONFIG_MALI_EXPERT=n
++        CONFIG_MALI_CORESTACK = n
++        CONFIG_MALI_2MB_ALLOC = n
++        CONFIG_MALI_PWRSOFT_765 = n
++        CONFIG_MALI_MEMORY_FULLY_BACKED = n
++        CONFIG_MALI_JOB_DUMP = n
++        CONFIG_MALI_NO_MALI = n
++        CONFIG_MALI_REAL_HW = y
++        CONFIG_MALI_ERROR_INJECT = n
++        CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED = n
++        CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE = n
++        CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS = n
++        CONFIG_MALI_DEBUG = n
++        CONFIG_MALI_MIDGARD_ENABLE_TRACE = n
++        CONFIG_MALI_SYSTEM_TRACE = n
++        CONFIG_MALI_FENCE_DEBUG = n
++    endif
+ 
+-ifeq ($(CONFIG_MALI_BUSLOG),y)
+-#Add bus logger symbols
+-EXTRA_SYMBOLS += $(BUSLOG_PATH_RELATIVE)/drivers/base/bus_logger/Module.symvers
++    ifeq ($(CONFIG_MALI_DEBUG), y)
++        CONFIG_MALI_KUTF ?= y
++        ifeq ($(CONFIG_MALI_KUTF), y)
++            CONFIG_MALI_KUTF_IRQ_TEST ?= y
++            CONFIG_MALI_KUTF_CLK_RATE_TRACE ?= y
++        else
++            # Prevent misuse when CONFIG_MALI_KUTF=n
++            CONFIG_MALI_KUTF_IRQ_TEST = n
++            CONFIG_MALI_KUTF_CLK_RATE_TRACE = n
++        endif
++    else
++        # Prevent misuse when CONFIG_MALI_DEBUG=n
++        CONFIG_MALI_KUTF = n
++        CONFIG_MALI_KUTF_IRQ_TEST = n
++        CONFIG_MALI_KUTF_CLK_RATE_TRACE = n
++    endif
++else
++    # Prevent misuse when CONFIG_MALI_MIDGARD=n
++    CONFIG_MALI_ARBITRATION = n
++    CONFIG_MALI_KUTF = n
++    CONFIG_MALI_KUTF_IRQ_TEST = n
++    CONFIG_MALI_KUTF_CLK_RATE_TRACE = n
+ endif
+ 
+-# we get the symbols from modules using KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
++# All Mali CONFIG should be listed here
++CONFIGS := \
++    CONFIG_MALI_MIDGARD \
++    CONFIG_MALI_CSF_SUPPORT \
++    CONFIG_MALI_GATOR_SUPPORT \
++    CONFIG_MALI_DMA_FENCE \
++    CONFIG_MALI_ARBITER_SUPPORT \
++    CONFIG_MALI_ARBITRATION \
++    CONFIG_MALI_PARTITION_MANAGER \
++    CONFIG_MALI_REAL_HW \
++    CONFIG_MALI_GEM5_BUILD \
++    CONFIG_MALI_DEVFREQ \
++    CONFIG_MALI_MIDGARD_DVFS \
++    CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND \
++    CONFIG_MALI_DMA_BUF_LEGACY_COMPAT \
++    CONFIG_MALI_EXPERT \
++    CONFIG_MALI_CORESTACK \
++    CONFIG_MALI_2MB_ALLOC \
++    CONFIG_MALI_PWRSOFT_765 \
++    CONFIG_MALI_MEMORY_FULLY_BACKED \
++    CONFIG_MALI_JOB_DUMP \
++    CONFIG_MALI_NO_MALI \
++    CONFIG_MALI_ERROR_INJECT \
++    CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED \
++    CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE \
++    CONFIG_MALI_PRFCNT_SET_PRIMARY \
++    CONFIG_MALI_PRFCNT_SET_SECONDARY \
++    CONFIG_MALI_PRFCNT_SET_TERTIARY \
++    CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS \
++    CONFIG_MALI_DEBUG \
++    CONFIG_MALI_MIDGARD_ENABLE_TRACE \
++    CONFIG_MALI_SYSTEM_TRACE \
++    CONFIG_MALI_FENCE_DEBUG \
++    CONFIG_MALI_KUTF \
++    CONFIG_MALI_KUTF_IRQ_TEST \
++    CONFIG_MALI_KUTF_CLK_RATE_TRACE \
++    CONFIG_MALI_XEN
++
++
++#
++# MAKE_ARGS to pass the custom CONFIGs on out-of-tree build
++#
++# Generate the list of CONFIGs and values.
++# $(value config) is the name of the CONFIG option.
++# $(value $(value config)) is its value (y, m).
++# When the CONFIG is not set to y or m, it defaults to n.
++MAKE_ARGS := $(foreach config,$(CONFIGS), \
++                    $(if $(filter y m,$(value $(value config))), \
++                        $(value config)=$(value $(value config)), \
++                        $(value config)=n))
++
++#
++# EXTRA_CFLAGS to define the custom CONFIGs on out-of-tree build
++#
++# Generate the list of CONFIGs defines with values from CONFIGS.
++# $(value config) is the name of the CONFIG option.
++# When set to y or m, the CONFIG gets defined to 1.
++EXTRA_CFLAGS := $(foreach config,$(CONFIGS), \
++                    $(if $(filter y m,$(value $(value config))), \
++                        -D$(value config)=1))
++
++#
++# KBUILD_EXTRA_SYMBOLS to prevent warnings about unknown functions
++#
++
+ all:
+-	$(MAKE) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include -I$(CURDIR)/../../../../tests/include $(SCONS_CFLAGS)" $(SCONS_CONFIGS) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
++
++modules_install:
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) modules_install
+ 
+ clean:
+-	$(MAKE) -C $(KDIR) M=$(CURDIR) clean
++	$(MAKE) -C $(KDIR) M=$(CURDIR) $(MAKE_ARGS) clean
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/Mconfig b/dvalin/kernel/drivers/gpu/arm/midgard/Mconfig
+index b137793..d71a113 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/Mconfig
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/Mconfig
+@@ -1,17 +1,22 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2012-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2012-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+-# A copy of the licence is included with the program, and can also be obtained
+-# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+-# Boston, MA  02110-1301, USA.
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, you can access it online at
++# http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+ #
+-
+ 
+ menuconfig MALI_MIDGARD
+ 	bool "Mali Midgard series support"
+@@ -22,13 +27,44 @@ menuconfig MALI_MIDGARD
+ 	  To compile this driver as a module, choose M here:
+ 	  this will generate a single module, called mali_kbase.
+ 
+-config MALI_GATOR_SUPPORT
+-	bool "Enable Streamline tracing support"
+-	depends on MALI_MIDGARD && !BACKEND_USER
++config MALI_PLATFORM_NAME
++	depends on MALI_MIDGARD
++	string "Platform name"
++	default "hisilicon" if PLATFORM_HIKEY960
++	default "hisilicon" if PLATFORM_HIKEY970
++	default "devicetree"
++	help
++	  Enter the name of the desired platform configuration directory to
++	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Makefile' must
++	  exist.
++
++	  When PLATFORM_CUSTOM is set, this needs to be set manually to
++	  pick up the desired platform files.
++
++config MALI_REAL_HW
++	bool
++	depends on MALI_MIDGARD
+ 	default y
++	default n if NO_MALI
++
++config MALI_CSF_SUPPORT
++	bool "Enable Mali CSF based GPU support"
++	depends on MALI_MIDGARD
++	default y if GPU_HAS_CSF
+ 	help
+-	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
+-	  The tracepoints are used to derive GPU activity charts in Streamline.
++	  Enables support for CSF based GPUs.
++
++config MALI_DEVFREQ
++	bool "Enable devfreq support for Mali"
++	depends on MALI_MIDGARD
++	default y if PLATFORM_JUNO
++	default y if PLATFORM_CUSTOM
++	help
++	  Support devfreq for Mali.
++
++	  Using the devfreq framework and, by default, the simple on-demand
++	  governor, the frequency of Mali will be dynamically selected from the
++	  available OPPs.
+ 
+ config MALI_MIDGARD_DVFS
+ 	bool "Enable legacy DVFS"
+@@ -37,29 +73,25 @@ config MALI_MIDGARD_DVFS
+ 	help
+ 	  Choose this option to enable legacy DVFS in the Mali Midgard DDK.
+ 
++config MALI_GATOR_SUPPORT
++	bool "Enable Streamline tracing support"
++	depends on MALI_MIDGARD && !BACKEND_USER
++	default y
++	help
++	  Enables kbase tracing used by the Arm Streamline Performance Analyzer.
++	  The tracepoints are used to derive GPU activity charts in Streamline.
++
+ config MALI_MIDGARD_ENABLE_TRACE
+ 	bool "Enable kbase tracing"
+ 	depends on MALI_MIDGARD
+ 	default y if MALI_DEBUG
+ 	default n
+ 	help
+-	  Enables tracing in kbase.  Trace log available through
++	  Enables tracing in kbase. Trace log available through
+ 	  the "mali_trace" debugfs file, when the CONFIG_DEBUG_FS is enabled
+ 
+-config MALI_DEVFREQ
+-	bool "devfreq support for Mali"
+-	depends on MALI_MIDGARD
+-	default y if PLATFORM_JUNO
+-	default y if PLATFORM_CUSTOM
+-	help
+-	  Support devfreq for Mali.
+-
+-	  Using the devfreq framework and, by default, the simpleondemand
+-	  governor, the frequency of Mali will be dynamically selected from the
+-	  available OPPs.
+-
+ config MALI_DMA_FENCE
+-	bool "DMA_BUF fence support for Mali"
++	bool "Enable DMA_BUF fence support for Mali"
+ 	depends on MALI_MIDGARD
+ 	default n
+ 	help
+@@ -68,23 +100,9 @@ config MALI_DMA_FENCE
+ 	  This option should only be enabled if the Linux Kernel has built in
+ 	  support for DMA_BUF fences.
+ 
+-config MALI_PLATFORM_NAME
+-	depends on MALI_MIDGARD
+-	string "Platform name"
+-	default "hisilicon" if PLATFORM_HIKEY960
+-	default "hisilicon" if PLATFORM_HIKEY970
+-	default "devicetree"
+-	help
+-	  Enter the name of the desired platform configuration directory to
+-	  include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
+-	  exist.
+-
+-	  When PLATFORM_CUSTOM is set, this needs to be set manually to
+-	  pick up the desired platform files.
+-
+ config MALI_ARBITER_SUPPORT
+ 	bool "Enable arbiter support for Mali"
+-	depends on MALI_MIDGARD
++	depends on MALI_MIDGARD && !MALI_CSF_SUPPORT
+ 	default n
+ 	help
+ 	  Enable support for the arbiter interface in the driver.
+@@ -93,62 +111,89 @@ config MALI_ARBITER_SUPPORT
+ 
+ 	  If unsure, say N.
+ 
+-# MALI_EXPERT configuration options
++config DMA_BUF_SYNC_IOCTL_SUPPORTED
++	bool "Enable Kernel DMA buffers support DMA_BUF_IOCTL_SYNC"
++	depends on MALI_MIDGARD && BACKEND_KERNEL
++	default y
++
++config MALI_DMA_BUF_MAP_ON_DEMAND
++	bool "Enable map imported dma-bufs on demand"
++	depends on MALI_MIDGARD
++	default n
++	default y if !DMA_BUF_SYNC_IOCTL_SUPPORTED
++	help
++	  This option caused kbase to set up the GPU mapping of imported
++	  dma-buf when needed to run atoms. This is the legacy behavior.
++
++	  This is intended for testing and the option will get removed in the
++	  future.
++
++config MALI_DMA_BUF_LEGACY_COMPAT
++	bool "Enable legacy compatibility cache flush on dma-buf map"
++	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
++	default n
++	help
++	  This option enables compatibility with legacy dma-buf mapping
++	  behavior, then the dma-buf is mapped on import, by adding cache
++	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
++	  including a cache flush.
++
++	  This option might work-around issues related to missing cache
++	  flushes in other drivers. This only has an effect for clients using
++	  UK 11.18 or older. For later UK versions it is not possible.
+ 
+ menuconfig MALI_EXPERT
+ 	depends on MALI_MIDGARD
+ 	bool "Enable Expert Settings"
+ 	default y
+ 	help
+-	  Enabling this option and modifying the default settings may produce a driver with performance or
+-	  other limitations.
++	  Enabling this option and modifying the default settings may produce
++	  a driver with performance or other limitations.
+ 
+-config MALI_CORESTACK
+-	bool "Support controlling power to the GPU core stack"
++config MALI_2MB_ALLOC
++	bool "Attempt to allocate 2MB pages"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+-	  Enabling this feature on supported GPUs will let the driver powering
+-	  on/off the GPU core stack independently without involving the Power
+-	  Domain Controller. This should only be enabled on platforms which
+-	  integration of the PDC to the Mali GPU is known to be problematic.
+-	  This feature is currently only supported on t-Six and t-HEx GPUs.
++	  Rather than allocating all GPU memory page-by-page, attempt to
++	  allocate 2MB pages from the kernel. This reduces TLB pressure and
++	  helps to prevent memory fragmentation.
+ 
+-	  If unsure, say N.
++	  If in doubt, say N
+ 
+-config MALI_DEBUG
+-	bool "Debug build"
++config MALI_MEMORY_FULLY_BACKED
++	bool "Enable memory fully physically-backed"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+-	default y if DEBUG
+ 	default n
+ 	help
+-	  Select this option for increased checking and reporting of errors.
++	  This option enables full physical backing of all virtual
++	  memory allocations in the kernel. Notice that this build
++	  option only affects allocations of grow-on-GPU-page-fault
++	  memory.
+ 
+-config MALI_FENCE_DEBUG
+-	bool "Debug sync fence usage"
++config MALI_CORESTACK
++	bool "Enable support of GPU core stack power control"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+-	default y if MALI_DEBUG
++	default n
+ 	help
+-	  Select this option to enable additional checking and reporting on the
+-	  use of sync fences in the Mali driver.
+-
+-	  This will add a 3s timeout to all sync fence waits in the Mali
+-	  driver, so that when work for Mali has been waiting on a sync fence
+-	  for a long time a debug message will be printed, detailing what fence
+-	  is causing the block, and which dependent Mali atoms are blocked as a
+-	  result of this.
++	  Enabling this feature on supported GPUs will let the driver powering
++	  on/off the GPU core stack independently without involving the Power
++	  Domain Controller. This should only be enabled on platforms which
++	  integration of the PDC to the Mali GPU is known to be problematic.
++	  This feature is currently only supported on t-Six and t-HEx GPUs.
+ 
+-	  The timeout can be changed at runtime through the js_soft_timeout
+-	  device attribute, where the timeout is specified in milliseconds.
++	  If unsure, say N.
+ 
+ choice
+ 	prompt "Error injection level"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default MALI_ERROR_INJECT_NONE
+ 	help
+ 	  Enables insertion of errors to test module failure and recovery mechanisms.
+ 
+ config MALI_ERROR_INJECT_NONE
+ 	bool "disabled"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	help
+ 	  Error injection is disabled.
+ 
+@@ -168,14 +213,49 @@ endchoice
+ 
+ config MALI_ERROR_INJECT_ON
+ 	string
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default "0" if MALI_ERROR_INJECT_NONE
+ 	default "1" if MALI_ERROR_INJECT_TRACK_LIST
+ 	default "2" if MALI_ERROR_INJECT_RANDOM
+ 
+ config MALI_ERROR_INJECT
+ 	bool
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default y if !MALI_ERROR_INJECT_NONE
+ 
++config MALI_GEM5_BUILD
++	bool "Enable build of Mali kernel driver for GEM5"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default n
++	help
++	  This option is to do a Mali GEM5 build.
++	  If unsure, say N.
++
++config MALI_DEBUG
++	bool "Enable debug build"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default y if DEBUG
++	default n
++	help
++	  Select this option for increased checking and reporting of errors.
++
++config MALI_FENCE_DEBUG
++	bool "Enable debug sync fence usage"
++	depends on MALI_MIDGARD && MALI_EXPERT
++	default y if MALI_DEBUG
++	help
++	  Select this option to enable additional checking and reporting on the
++	  use of sync fences in the Mali driver.
++
++	  This will add a 3s timeout to all sync fence waits in the Mali
++	  driver, so that when work for Mali has been waiting on a sync fence
++	  for a long time a debug message will be printed, detailing what fence
++	  is causing the block, and which dependent Mali atoms are blocked as a
++	  result of this.
++
++	  The timeout can be changed at runtime through the js_soft_timeout
++	  device attribute, where the timeout is specified in milliseconds.
++
+ config MALI_SYSTEM_TRACE
+ 	bool "Enable system event tracing support"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+@@ -187,56 +267,35 @@ config MALI_SYSTEM_TRACE
+ 	  minimal overhead when not in use. Enable only if you know what
+ 	  you are doing.
+ 
+-config MALI_2MB_ALLOC
+-	bool "Attempt to allocate 2MB pages"
+-	depends on MALI_MIDGARD && MALI_EXPERT
+-	default n
+-	help
+-	  Rather than allocating all GPU memory page-by-page, attempt to
+-	  allocate 2MB pages from the kernel. This reduces TLB pressure and
+-	  helps to prevent memory fragmentation.
++# Instrumentation options.
+ 
+-	  If in doubt, say N
++# config MALI_PRFCNT_SET_PRIMARY exists in the Kernel Kconfig but is configured using CINSTR_PRIMARY_HWC in Mconfig.
++# config MALI_PRFCNT_SET_SECONDARY exists in the Kernel Kconfig but is configured using CINSTR_SECONDARY_HWC in Mconfig.
++# config MALI_PRFCNT_SET_TERTIARY exists in the Kernel Kconfig but is configured using CINSTR_TERTIARY_HWC in Mconfig.
++# config MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS exists in the Kernel Kconfig but is configured using CINSTR_HWC_SET_SELECT_VIA_DEBUG_FS in Mconfig.
+ 
+-config MALI_PWRSOFT_765
+-	bool "PWRSOFT-765 ticket"
++config MALI_JOB_DUMP
++	bool "Enable system level support needed for job dumping"
+ 	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+ 	help
+-	  PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
+-	  not merged in mainline kernel yet. So this define helps to guard those
+-	  parts of the code.
+-
+-config MALI_MEMORY_FULLY_BACKED
+-	bool "Memory fully physically-backed"
+-	default n
+-	help
+-	  This option enables full backing of all virtual memory allocations
+-	  for the kernel. This only affects grow-on-GPU-page-fault memory.
++	  Choose this option to enable system level support needed for
++	  job dumping. This is typically used for instrumentation but has
++	  minimal overhead when not in use. Enable only if you know what
++	  you are doing.
+ 
+-config MALI_DMA_BUF_MAP_ON_DEMAND
+-	bool "Map imported dma-bufs on demand"
+-	depends on MALI_MIDGARD
++config MALI_PWRSOFT_765
++	bool "Enable workaround for PWRSOFT-765"
++	depends on MALI_MIDGARD && MALI_EXPERT
+ 	default n
+-	default y if !DMA_BUF_SYNC_IOCTL_SUPPORTED
+ 	help
+-	  This option caused kbase to set up the GPU mapping of imported
+-	  dma-buf when needed to run atoms.  This is the legacy behaviour.
++	  PWRSOFT-765 fixes devfreq cooling devices issues. The fix was merged
++	  in kernel v4.10, however if backported into the kernel then this
++	  option must be manually selected.
+ 
+-config MALI_DMA_BUF_LEGACY_COMPAT
+-	bool "Enable legacy compatibility cache flush on dma-buf map"
+-	depends on MALI_MIDGARD && !MALI_DMA_BUF_MAP_ON_DEMAND
+-	default n
+-	help
+-	  This option enables compatibility with legacy dma-buf mapping
+-	  behavior, then the dma-buf is mapped on import, by adding cache
+-	  maintenance where MALI_DMA_BUF_MAP_ON_DEMAND would do the mapping,
+-	  including a cache flush.
++	  If using kernel >= v4.10 then say N, otherwise if devfreq cooling
++	  changes have been backported say Y to avoid compilation errors.
+ 
+-config MALI_REAL_HW
+-	bool
+-	default y
+-	default n if NO_MALI
+ 
+ config MALI_HW_ERRATA_1485982_NOT_AFFECTED
+ 	bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
+@@ -262,17 +321,6 @@ config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
+ 	  tree using the property, opp-mali-errata-1485982. Otherwise the
+ 	  slowest clock will be selected.
+ 
+-config MALI_GEM5_BUILD
+-	bool "Enable build of Mali kernel driver for GEM5"
+-	depends on MALI_MIDGARD
+-	default n
+-	help
+-	  This option is to do a Mali GEM5 build.
+-	  If unsure, say N.
+-
+-# Instrumentation options.
+-
+-# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
+-# config MALI_PRFCNT_SET_SECONDARY exists in the Kernel Kconfig but is configured using CINSTR_SECONDARY_HWC in Mconfig.
+ 
++source "kernel/drivers/gpu/arm/midgard/arbitration/Mconfig"
+ source "kernel/drivers/gpu/arm/midgard/tests/Mconfig"
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/Kbuild b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/Kbuild
+index 98e47be..5203281 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/Kbuild
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,10 +16,8 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+ mali_kbase-y += \
+-	arbiter/mali_kbase_arbif.o \
+-	arbiter/mali_kbase_arbiter_pm.o
++    arbiter/mali_kbase_arbif.o \
++    arbiter/mali_kbase_arbiter_pm.o
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.c b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.c
+index d193cb9..64e11ce 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.c
+@@ -1,13 +1,12 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -18,13 +17,10 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /**
+- * @file mali_kbase_arbif.c
+- * Mali arbiter interface APIs to share GPU between Virtual Machines
++ * DOC: Mali arbiter interface APIs to share GPU between Virtual Machines
+  */
+ 
+ #include <mali_kbase.h>
+@@ -34,32 +30,155 @@
+ #include <linux/of_platform.h>
+ #include "mali_kbase_arbiter_interface.h"
+ 
++/* Arbiter interface version against which was implemented this module */
++#define MALI_REQUIRED_KBASE_ARBITER_INTERFACE_VERSION 5
++#if MALI_REQUIRED_KBASE_ARBITER_INTERFACE_VERSION != \
++			MALI_KBASE_ARBITER_INTERFACE_VERSION
++#error "Unsupported Mali Arbiter interface version."
++#endif
++
++static void on_max_config(struct device *dev, uint32_t max_l2_slices,
++			  uint32_t max_core_mask)
++{
++	struct kbase_device *kbdev;
++
++	if (!dev) {
++		pr_err("%s(): dev is NULL", __func__);
++		return;
++	}
++
++	kbdev = dev_get_drvdata(dev);
++	if (!kbdev) {
++		dev_err(dev, "%s(): kbdev is NULL", __func__);
++		return;
++	}
++
++	if (!max_l2_slices || !max_core_mask) {
++		dev_dbg(dev,
++			"%s(): max_config ignored as one of the fields is zero",
++			__func__);
++		return;
++	}
++
++	/* set the max config info in the kbase device */
++	kbase_arbiter_set_max_config(kbdev, max_l2_slices, max_core_mask);
++}
++
++/**
++ * on_update_freq() - Updates GPU clock frequency
++ * @dev: arbiter interface device handle
++ * @freq: GPU clock frequency value reported from arbiter
++ *
++ * call back function to update GPU clock frequency with
++ * new value from arbiter
++ */
++static void on_update_freq(struct device *dev, uint32_t freq)
++{
++	struct kbase_device *kbdev;
++
++	if (!dev) {
++		pr_err("%s(): dev is NULL", __func__);
++		return;
++	}
++
++	kbdev = dev_get_drvdata(dev);
++	if (!kbdev) {
++		dev_err(dev, "%s(): kbdev is NULL", __func__);
++		return;
++	}
++
++	kbase_arbiter_pm_update_gpu_freq(&kbdev->arb.arb_freq, freq);
++}
++
++/**
++ * on_gpu_stop() - sends KBASE_VM_GPU_STOP_EVT event on VM stop
++ * @dev: arbiter interface device handle
++ *
++ * call back function to signal a GPU STOP event from arbiter interface
++ */
+ static void on_gpu_stop(struct device *dev)
+ {
+-	struct kbase_device *kbdev = dev_get_drvdata(dev);
++	struct kbase_device *kbdev;
++
++	if (!dev) {
++		pr_err("%s(): dev is NULL", __func__);
++		return;
++	}
+ 
+-	KBASE_TLSTREAM_TL_EVENT_ARB_STOP_REQUESTED(kbdev, kbdev);
++	kbdev = dev_get_drvdata(dev);
++	if (!kbdev) {
++		dev_err(dev, "%s(): kbdev is NULL", __func__);
++		return;
++	}
++
++	KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED(kbdev, kbdev);
+ 	kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_STOP_EVT);
+ }
+ 
++/**
++ * on_gpu_granted() - sends KBASE_VM_GPU_GRANTED_EVT event on GPU granted
++ * @dev: arbiter interface device handle
++ *
++ * call back function to signal a GPU GRANT event from arbiter interface
++ */
+ static void on_gpu_granted(struct device *dev)
+ {
+-	struct kbase_device *kbdev = dev_get_drvdata(dev);
++	struct kbase_device *kbdev;
++
++	if (!dev) {
++		pr_err("%s(): dev is NULL", __func__);
++		return;
++	}
++
++	kbdev = dev_get_drvdata(dev);
++	if (!kbdev) {
++		dev_err(dev, "%s(): kbdev is NULL", __func__);
++		return;
++	}
+ 
+-	KBASE_TLSTREAM_TL_EVENT_ARB_GRANTED(kbdev, kbdev);
++	KBASE_TLSTREAM_TL_ARBITER_GRANTED(kbdev, kbdev);
+ 	kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_GRANTED_EVT);
+ }
+ 
++/**
++ * on_gpu_lost() - sends KBASE_VM_GPU_LOST_EVT event  on GPU granted
++ * @dev: arbiter interface device handle
++ *
++ * call back function to signal a GPU LOST event from arbiter interface
++ */
+ static void on_gpu_lost(struct device *dev)
+ {
+-	struct kbase_device *kbdev = dev_get_drvdata(dev);
++	struct kbase_device *kbdev;
++
++	if (!dev) {
++		pr_err("%s(): dev is NULL", __func__);
++		return;
++	}
++
++	kbdev = dev_get_drvdata(dev);
++	if (!kbdev) {
++		dev_err(dev, "%s(): kbdev is NULL", __func__);
++		return;
++	}
+ 
+ 	kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_LOST_EVT);
+ }
+ 
++/**
++ * kbase_arbif_init() - Kbase Arbiter interface initialisation.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Initialise Kbase Arbiter interface and assign callback functions.
++ *
++ * Return:
++ * * 0			- the interface was initialized or was not specified
++ * *			in the device tree.
++ * * -EFAULT		- the interface was specified but failed to initialize.
++ * * -EPROBE_DEFER	- module dependencies are not yet available.
++ */
+ int kbase_arbif_init(struct kbase_device *kbdev)
+ {
+-#ifdef CONFIG_OF
++#if IS_ENABLED(CONFIG_OF)
+ 	struct arbiter_if_arb_vm_ops ops;
+ 	struct arbiter_if_dev *arb_if;
+ 	struct device_node *arbiter_if_node;
+@@ -100,17 +219,26 @@ int kbase_arbif_init(struct kbase_device *kbdev)
+ 	ops.arb_vm_gpu_stop = on_gpu_stop;
+ 	ops.arb_vm_gpu_granted = on_gpu_granted;
+ 	ops.arb_vm_gpu_lost = on_gpu_lost;
++	ops.arb_vm_max_config = on_max_config;
++	ops.arb_vm_update_freq = on_update_freq;
++
++	kbdev->arb.arb_freq.arb_freq = 0;
++	kbdev->arb.arb_freq.freq_updated = false;
++	mutex_init(&kbdev->arb.arb_freq.arb_freq_lock);
+ 
+ 	/* register kbase arbiter_if callbacks */
+ 	if (arb_if->vm_ops.vm_arb_register_dev) {
+ 		err = arb_if->vm_ops.vm_arb_register_dev(arb_if,
+ 			kbdev->dev, &ops);
+ 		if (err) {
+-			dev_err(kbdev->dev, "Arbiter registration failed.\n");
++			dev_err(&pdev->dev, "Failed to register with arbiter\n");
+ 			module_put(pdev->dev.driver->owner);
++			if (err != -EPROBE_DEFER)
++				err = -EFAULT;
+ 			return err;
+ 		}
+ 	}
++
+ #else /* CONFIG_OF */
+ 	dev_dbg(kbdev->dev, "No arbiter without Device Tree support\n");
+ 	kbdev->arb.arb_dev = NULL;
+@@ -119,6 +247,12 @@ int kbase_arbif_init(struct kbase_device *kbdev)
+ 	return 0;
+ }
+ 
++/**
++ * kbase_arbif_destroy() - De-init Kbase arbiter interface
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * De-initialise Kbase arbiter interface
++ */
+ void kbase_arbif_destroy(struct kbase_device *kbdev)
+ {
+ 	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
+@@ -133,27 +267,64 @@ void kbase_arbif_destroy(struct kbase_device *kbdev)
+ 	kbdev->arb.arb_dev = NULL;
+ }
+ 
++/**
++ * kbase_arbif_get_max_config() - Request max config info
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * call back function from arb interface to arbiter requesting max config info
++ */
++void kbase_arbif_get_max_config(struct kbase_device *kbdev)
++{
++	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
++
++	if (arb_if && arb_if->vm_ops.vm_arb_get_max_config) {
++		dev_dbg(kbdev->dev, "%s\n", __func__);
++		arb_if->vm_ops.vm_arb_get_max_config(arb_if);
++	}
++}
++
++/**
++ * kbase_arbif_gpu_request() - Request GPU from
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * call back function from arb interface to arbiter requesting GPU for VM
++ */
+ void kbase_arbif_gpu_request(struct kbase_device *kbdev)
+ {
+ 	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
+ 
+ 	if (arb_if && arb_if->vm_ops.vm_arb_gpu_request) {
+ 		dev_dbg(kbdev->dev, "%s\n", __func__);
++		KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
+ 		arb_if->vm_ops.vm_arb_gpu_request(arb_if);
+ 	}
+ }
+ 
++/**
++ * kbase_arbif_gpu_stopped() - send GPU stopped message to the arbiter
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ * @gpu_required: GPU request flag
++ *
++ */
+ void kbase_arbif_gpu_stopped(struct kbase_device *kbdev, u8 gpu_required)
+ {
+ 	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
+ 
+ 	if (arb_if && arb_if->vm_ops.vm_arb_gpu_stopped) {
+ 		dev_dbg(kbdev->dev, "%s\n", __func__);
+-		KBASE_TLSTREAM_TL_EVENT_ARB_STOPPED(kbdev, kbdev);
++		KBASE_TLSTREAM_TL_ARBITER_STOPPED(kbdev, kbdev);
++		if (gpu_required)
++			KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
+ 		arb_if->vm_ops.vm_arb_gpu_stopped(arb_if, gpu_required);
+ 	}
+ }
+ 
++/**
++ * kbase_arbif_gpu_active() - Sends a GPU_ACTIVE message to the Arbiter
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Informs the arbiter VM is active
++ */
+ void kbase_arbif_gpu_active(struct kbase_device *kbdev)
+ {
+ 	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
+@@ -164,6 +335,12 @@ void kbase_arbif_gpu_active(struct kbase_device *kbdev)
+ 	}
+ }
+ 
++/**
++ * kbase_arbif_gpu_idle() - Inform the arbiter that the VM has gone idle
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Informs the arbiter VM is idle
++ */
+ void kbase_arbif_gpu_idle(struct kbase_device *kbdev)
+ {
+ 	struct arbiter_if_dev *arb_if = kbdev->arb.arb_if;
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.h b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.h
+index e7e9de7..701ffd4 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbif.h
+@@ -1,28 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- *//* SPDX-License-Identifier: GPL-2.0 */
+-
+-/*
+- *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+@@ -38,12 +17,10 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- *
+  */
+ 
+ /**
+- * @file
+- * Mali arbiter interface APIs to share GPU between Virtual Machines
++ * DOC: Mali arbiter interface APIs to share GPU between Virtual Machines
+  */
+ 
+ #ifndef _MALI_KBASE_ARBIF_H_
+@@ -80,8 +57,11 @@ enum kbase_arbif_evt {
+  * Initialize the arbiter interface and also determines
+  * if Arbiter functionality is required.
+  *
+- * Return: 0 if the Arbiter interface was successfully initialized or the
+- *           Arbiter was not required.
++ * Return:
++ * * 0			- the interface was initialized or was not specified
++ * *			in the device tree.
++ * * -EFAULT		- the interface was specified but failed to initialize.
++ * * -EPROBE_DEFER	- module dependencies are not yet available.
+  */
+ int kbase_arbif_init(struct kbase_device *kbdev);
+ 
+@@ -94,6 +74,14 @@ int kbase_arbif_init(struct kbase_device *kbdev);
+  */
+ void kbase_arbif_destroy(struct kbase_device *kbdev);
+ 
++/**
++ * kbase_arbif_get_max_config() - Request max config info
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * call back function from arb interface to arbiter requesting max config info
++ */
++void kbase_arbif_get_max_config(struct kbase_device *kbdev);
++
+ /**
+  * kbase_arbif_gpu_request() - Send GPU request message to the arbiter
+  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_defs.h b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_defs.h
+index 1f53cbf..570a82a 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_defs.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_defs.h
+@@ -1,28 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- *//* SPDX-License-Identifier: GPL-2.0 */
+-
+-/*
+- *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+@@ -38,7 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- *
+  */
+ 
+ /**
+@@ -66,7 +44,8 @@
+  * @vm_resume_work:  Work item for vm_arb_wq to resume current work on GPU
+  * @vm_arb_starting: Work queue resume in progress
+  * @vm_arb_stopping: Work queue suspend in progress
+- * @vm_arb_users_waiting: Count of users waiting for GPU
++ * @interrupts_installed: Flag set when interrupts are installed
++ * @vm_request_timer: Timer to monitor GPU request
+  */
+ struct kbase_arbiter_vm_state {
+ 	struct kbase_device *kbdev;
+@@ -78,7 +57,8 @@ struct kbase_arbiter_vm_state {
+ 	struct work_struct vm_resume_work;
+ 	bool vm_arb_starting;
+ 	bool vm_arb_stopping;
+-	int vm_arb_users_waiting;
++	bool interrupts_installed;
++	struct hrtimer vm_request_timer;
+ };
+ 
+ /**
+@@ -86,10 +66,12 @@ struct kbase_arbiter_vm_state {
+  *                               allocated from the probe method of Mali driver
+  * @arb_if:                 Pointer to the arbiter interface device
+  * @arb_dev:                Pointer to the arbiter device
++ * @arb_freq:               GPU clock frequency retrieved from arbiter.
+  */
+ struct kbase_arbiter_device {
+ 	struct arbiter_if_dev *arb_if;
+ 	struct device *arb_dev;
++	struct kbase_arbiter_freq arb_freq;
+ };
+ 
+ #endif /* _MALI_KBASE_ARBITER_DEFS_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_interface.h b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_interface.h
+index 5d5d8a7..c0137f7 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_interface.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_interface.h
+@@ -1,28 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- *//* SPDX-License-Identifier: GPL-2.0 */
+-
+-/*
+- *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+@@ -38,7 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- *
+  */
+ 
+ /**
+@@ -50,7 +28,7 @@
+ #define _MALI_KBASE_ARBITER_INTERFACE_H_
+ 
+ /**
+- * @brief Mali arbiter interface version
++ *  Mali arbiter interface version
+  *
+  * This specifies the current version of the configuration interface. Whenever
+  * the arbiter interface changes, so that integration effort is required, the
+@@ -61,8 +39,15 @@
+  * 1 - Added the Mali arbiter configuration interface.
+  * 2 - Strip out reference code from header
+  * 3 - Removed DVFS utilization interface (DVFS moved to arbiter side)
++ * 4 - Added max_config support
++ * 5 - Added GPU clock frequency reporting support from arbiter
+  */
+-#define MALI_KBASE_ARBITER_INTERFACE_VERSION 3
++#define MALI_KBASE_ARBITER_INTERFACE_VERSION 5
++
++/**
++ * NO_FREQ is used in case platform doesn't support reporting frequency
++ */
++#define NO_FREQ 0
+ 
+ struct arbiter_if_dev;
+ 
+@@ -108,6 +93,27 @@ struct arbiter_if_arb_vm_ops {
+ 	 * If successful, will respond with a vm_arb_gpu_stopped message.
+ 	 */
+ 	void (*arb_vm_gpu_lost)(struct device *dev);
++
++	/**
++	 * arb_vm_max_config() - Send max config info to the VM
++	 * @dev: The arbif kernel module device.
++	 * @max_l2_slices: The maximum number of L2 slices.
++	 * @max_core_mask: The largest core mask.
++	 *
++	 * Informs KBase the maximum resources that can be allocated to the
++	 * partition in use.
++	 */
++	void (*arb_vm_max_config)(struct device *dev, uint32_t max_l2_slices,
++				  uint32_t max_core_mask);
++
++	/**
++	 * arb_vm_update_freq() - GPU clock frequency has been updated
++	 * @dev: The arbif kernel module device.
++	 * @freq: GPU clock frequency value reported from arbiter
++	 *
++	 * Informs KBase that the GPU clock frequency has been updated.
++	 */
++	void (*arb_vm_update_freq)(struct device *dev, uint32_t freq);
+ };
+ 
+ /**
+@@ -126,6 +132,11 @@ struct arbiter_if_vm_arb_ops {
+ 	 * @dev: The device structure to supply in the callbacks.
+ 	 * @ops: The callbacks that the device driver supports
+ 	 *       (none are optional).
++	 *
++	 * Return:
++	 * * 0			- successful.
++	 * * -EINVAL		- invalid argument.
++	 * * -EPROBE_DEFER	- module dependencies are not yet available.
+ 	 */
+ 	int (*vm_arb_register_dev)(struct arbiter_if_dev *arbif_dev,
+ 		struct device *dev, struct arbiter_if_arb_vm_ops *ops);
+@@ -136,6 +147,13 @@ struct arbiter_if_vm_arb_ops {
+ 	 */
+ 	void (*vm_arb_unregister_dev)(struct arbiter_if_dev *arbif_dev);
+ 
++	/**
++	 * vm_arb_gpu_get_max_config() - Request the max config from the
++	 * Arbiter.
++	 * @arbif_dev: The arbiter interface we want to issue the request.
++	 */
++	void (*vm_arb_get_max_config)(struct arbiter_if_dev *arbif_dev);
++
+ 	/**
+ 	 * vm_arb_gpu_request() - Ask the arbiter interface for GPU access.
+ 	 * @arbif_dev: The arbiter interface we want to issue the request.
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.c b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.c
+index 6c35e16..5c75686 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.c
+@@ -1,13 +1,12 @@
+-// SPDX-License-Identifier: GPL-2.0
+-
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -18,27 +17,49 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /**
+- * @file mali_kbase_arbiter_pm.c
++ * @file
+  * Mali arbiter power manager state machine and APIs
+  */
+ 
+ #include <mali_kbase.h>
+ #include <mali_kbase_pm.h>
+ #include <mali_kbase_hwaccess_jm.h>
+-#include <mali_kbase_irq_internal.h>
++#include <backend/gpu/mali_kbase_irq_internal.h>
+ #include <mali_kbase_hwcnt_context.h>
+-#include <mali_kbase_pm_internal.h>
++#include <backend/gpu/mali_kbase_pm_internal.h>
+ #include <tl/mali_kbase_tracepoints.h>
++#include <mali_kbase_gpuprops.h>
++
++/* A dmesg warning will occur if the GPU is not granted
++ * after the following time (in milliseconds) has ellapsed.
++ */
++#define GPU_REQUEST_TIMEOUT 1000
++#define KHZ_TO_HZ 1000
++
++#define MAX_L2_SLICES_MASK		0xFF
++
++/* Maximum time in ms, before deferring probe incase
++ * GPU_GRANTED message is not received
++ */
++static int gpu_req_timeout = 1;
++module_param(gpu_req_timeout, int, 0644);
++MODULE_PARM_DESC(gpu_req_timeout,
++	"On a virtualized platform, if the GPU is not granted within this time(ms) kbase will defer the probe");
+ 
+ static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev);
+ static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
+ 	struct kbase_device *kbdev);
+ 
++/**
++ * kbase_arbiter_pm_vm_state_str() - Helper function to get string
++ *                                   for kbase VM state.(debug)
++ * @state: kbase VM state
++ *
++ * Return: string representation of Kbase_vm_state
++ */
+ static inline const char *kbase_arbiter_pm_vm_state_str(
+ 	enum kbase_vm_state state)
+ {
+@@ -73,6 +94,13 @@ static inline const char *kbase_arbiter_pm_vm_state_str(
+ 	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_event_str() - Helper function to get string
++ *                                   for kbase VM event.(debug)
++ * @evt: kbase VM state
++ *
++ * Return: String representation of Kbase_arbif_event
++ */
+ static inline const char *kbase_arbiter_pm_vm_event_str(
+ 	enum kbase_arbif_evt evt)
+ {
+@@ -99,6 +127,13 @@ static inline const char *kbase_arbiter_pm_vm_event_str(
+ 	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_set_state() - Sets new kbase_arbiter_vm_state
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ * @new_state: kbase VM new state
++ *
++ * This function sets the new state for the VM
++ */
+ static void kbase_arbiter_pm_vm_set_state(struct kbase_device *kbdev,
+ 	enum kbase_vm_state new_state)
+ {
+@@ -107,11 +142,22 @@ static void kbase_arbiter_pm_vm_set_state(struct kbase_device *kbdev,
+ 	dev_dbg(kbdev->dev, "VM set_state %s -> %s",
+ 	kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state),
+ 	kbase_arbiter_pm_vm_state_str(new_state));
++
+ 	lockdep_assert_held(&arb_vm_state->vm_state_lock);
+ 	arb_vm_state->vm_state = new_state;
++	if (new_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
++		new_state != KBASE_VM_STATE_INITIALIZING)
++		KBASE_KTRACE_ADD(kbdev, ARB_VM_STATE, NULL, new_state);
+ 	wake_up(&arb_vm_state->vm_state_wait);
+ }
+ 
++/**
++ * kbase_arbiter_pm_suspend_wq() - suspend work queue of the driver.
++ * @data: work queue
++ *
++ * Suspends work queue of the driver, when VM is in SUSPEND_PENDING or
++ * STOPPING_IDLE or STOPPING_ACTIVE state
++ */
+ static void kbase_arbiter_pm_suspend_wq(struct work_struct *data)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
+@@ -136,6 +182,13 @@ static void kbase_arbiter_pm_suspend_wq(struct work_struct *data)
+ 	dev_dbg(kbdev->dev, "<%s\n", __func__);
+ }
+ 
++/**
++ * kbase_arbiter_pm_resume_wq() -Kbase resume work queue.
++ * @data: work item
++ *
++ * Resume work queue of the driver when VM is in STARTING state,
++ * else if its in STOPPING_ACTIVE will request a stop event.
++ */
+ static void kbase_arbiter_pm_resume_wq(struct work_struct *data)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = container_of(data,
+@@ -157,9 +210,74 @@ static void kbase_arbiter_pm_resume_wq(struct work_struct *data)
+ 	}
+ 	arb_vm_state->vm_arb_starting = false;
+ 	mutex_unlock(&arb_vm_state->vm_state_lock);
++	KBASE_TLSTREAM_TL_ARBITER_STARTED(kbdev, kbdev);
+ 	dev_dbg(kbdev->dev, "<%s\n", __func__);
+ }
+ 
++/**
++ * request_timer_callback() - Issue warning on request timer expiration
++ * @timer: Request hr timer data
++ *
++ * Called when the Arbiter takes too long to grant the GPU after a
++ * request has been made.  Issues a warning in dmesg.
++ *
++ * Return: Always returns HRTIMER_NORESTART
++ */
++static enum hrtimer_restart request_timer_callback(struct hrtimer *timer)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state = container_of(timer,
++			struct kbase_arbiter_vm_state, vm_request_timer);
++
++	KBASE_DEBUG_ASSERT(arb_vm_state);
++	KBASE_DEBUG_ASSERT(arb_vm_state->kbdev);
++
++	dev_warn(arb_vm_state->kbdev->dev,
++		"Still waiting for GPU to be granted from Arbiter after %d ms\n",
++		GPU_REQUEST_TIMEOUT);
++	return HRTIMER_NORESTART;
++}
++
++/**
++ * start_request_timer() - Start a timer after requesting GPU
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Start a timer to track when kbase is waiting for the GPU from the
++ * Arbiter.  If the timer expires before GPU is granted, a warning in
++ * dmesg will be issued.
++ */
++static void start_request_timer(struct kbase_device *kbdev)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++
++	hrtimer_start(&arb_vm_state->vm_request_timer,
++			HR_TIMER_DELAY_MSEC(GPU_REQUEST_TIMEOUT),
++			HRTIMER_MODE_REL);
++}
++
++/**
++ * cancel_request_timer() - Stop the request timer
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Stops the request timer once GPU has been granted.  Safe to call
++ * even if timer is no longer running.
++ */
++static void cancel_request_timer(struct kbase_device *kbdev)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++
++	hrtimer_cancel(&arb_vm_state->vm_request_timer);
++}
++
++/**
++ * kbase_arbiter_pm_early_init() - Initialize arbiter for VM
++ *                                 Paravirtualized use.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Initialize the arbiter and other required resources during the runtime
++ * and request the GPU for the VM for the first time.
++ *
++ * Return: 0 if success, or a Linux error code
++ */
+ int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
+ {
+ 	int err;
+@@ -179,29 +297,49 @@ int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
+ 		WQ_HIGHPRI);
+ 	if (!arb_vm_state->vm_arb_wq) {
+ 		dev_err(kbdev->dev, "Failed to allocate vm_arb workqueue\n");
++		kfree(arb_vm_state);
+ 		return -ENOMEM;
+ 	}
+ 	INIT_WORK(&arb_vm_state->vm_suspend_work, kbase_arbiter_pm_suspend_wq);
+ 	INIT_WORK(&arb_vm_state->vm_resume_work, kbase_arbiter_pm_resume_wq);
+ 	arb_vm_state->vm_arb_starting = false;
+-	arb_vm_state->vm_arb_users_waiting = 0;
++	atomic_set(&kbdev->pm.gpu_users_waiting, 0);
++	hrtimer_init(&arb_vm_state->vm_request_timer, CLOCK_MONOTONIC,
++							HRTIMER_MODE_REL);
++	arb_vm_state->vm_request_timer.function =
++						request_timer_callback;
+ 	kbdev->pm.arb_vm_state = arb_vm_state;
+ 
+ 	err = kbase_arbif_init(kbdev);
+ 	if (err) {
++		dev_err(kbdev->dev, "Failed to initialise arbif module\n");
+ 		goto arbif_init_fail;
+ 	}
++
+ 	if (kbdev->arb.arb_if) {
+ 		kbase_arbif_gpu_request(kbdev);
+ 		dev_dbg(kbdev->dev, "Waiting for initial GPU assignment...\n");
+-		wait_event(arb_vm_state->vm_state_wait,
++		err = wait_event_timeout(arb_vm_state->vm_state_wait,
+ 			arb_vm_state->vm_state ==
+-					KBASE_VM_STATE_INITIALIZING_WITH_GPU);
++					KBASE_VM_STATE_INITIALIZING_WITH_GPU,
++			msecs_to_jiffies(gpu_req_timeout));
++
++		if (!err) {
++			dev_dbg(kbdev->dev,
++			"Kbase probe Deferred after waiting %d ms to receive GPU_GRANT\n",
++			gpu_req_timeout);
++			err = -EPROBE_DEFER;
++			goto arbif_eprobe_defer;
++		}
++
+ 		dev_dbg(kbdev->dev,
+ 			"Waiting for initial GPU assignment - done\n");
+ 	}
+ 	return 0;
+ 
++arbif_eprobe_defer:
++	kbase_arbiter_pm_early_term(kbdev);
++	return err;
+ arbif_init_fail:
+ 	destroy_workqueue(arb_vm_state->vm_arb_wq);
+ 	kfree(arb_vm_state);
+@@ -209,35 +347,72 @@ arbif_init_fail:
+ 	return err;
+ }
+ 
++/**
++ * kbase_arbiter_pm_early_term() - Shutdown arbiter and free resources
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Clean up all the resources
++ */
+ void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+ 
++	cancel_request_timer(kbdev);
+ 	mutex_lock(&arb_vm_state->vm_state_lock);
+-	if (arb_vm_state->vm_state > KBASE_VM_STATE_STOPPED_GPU_REQUESTED)
++	if (arb_vm_state->vm_state > KBASE_VM_STATE_STOPPED_GPU_REQUESTED) {
++		kbase_pm_set_gpu_lost(kbdev, false);
+ 		kbase_arbif_gpu_stopped(kbdev, false);
+-
++	}
+ 	mutex_unlock(&arb_vm_state->vm_state_lock);
+-	kbase_arbif_destroy(kbdev);
+ 	destroy_workqueue(arb_vm_state->vm_arb_wq);
++	kbase_arbif_destroy(kbdev);
+ 	arb_vm_state->vm_arb_wq = NULL;
+ 	kfree(kbdev->pm.arb_vm_state);
+ 	kbdev->pm.arb_vm_state = NULL;
+ }
+ 
++/**
++ * kbase_arbiter_pm_release_interrupts() - Release the GPU interrupts
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Releases interrupts and set the interrupt flag to false
++ */
+ void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+ 
+ 	mutex_lock(&arb_vm_state->vm_state_lock);
+-	if (!kbdev->arb.arb_if ||
+-			arb_vm_state->vm_state >
+-					KBASE_VM_STATE_STOPPED_GPU_REQUESTED)
++	if (arb_vm_state->interrupts_installed == true) {
++		arb_vm_state->interrupts_installed = false;
+ 		kbase_release_interrupts(kbdev);
++	}
++	mutex_unlock(&arb_vm_state->vm_state_lock);
++}
+ 
++/**
++ * kbase_arbiter_pm_install_interrupts() - Install the GPU interrupts
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Install interrupts and set the interrupt_install flag to true.
++ */
++int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++	int err;
++
++	mutex_lock(&arb_vm_state->vm_state_lock);
++	arb_vm_state->interrupts_installed = true;
++	err = kbase_install_interrupts(kbdev);
+ 	mutex_unlock(&arb_vm_state->vm_state_lock);
++	return err;
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_stopped() - Handle stop state for the VM
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Handles a stop state for the VM
++ */
+ void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
+ {
+ 	bool request_gpu = false;
+@@ -245,14 +420,19 @@ void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
+ 
+ 	lockdep_assert_held(&arb_vm_state->vm_state_lock);
+ 
+-	if (arb_vm_state->vm_arb_users_waiting > 0 &&
++	if (atomic_read(&kbdev->pm.gpu_users_waiting) > 0 &&
+ 			arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
+ 		kbase_arbiter_pm_vm_set_state(kbdev,
+ 			 KBASE_VM_STATE_STOPPING_ACTIVE);
+ 
+ 	dev_dbg(kbdev->dev, "%s %s\n", __func__,
+ 		kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
+-	kbase_release_interrupts(kbdev);
++
++	if (arb_vm_state->interrupts_installed) {
++		arb_vm_state->interrupts_installed = false;
++		kbase_release_interrupts(kbdev);
++	}
++
+ 	switch (arb_vm_state->vm_state) {
+ 	case KBASE_VM_STATE_STOPPING_ACTIVE:
+ 		request_gpu = true;
+@@ -271,14 +451,95 @@ void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev)
+ 		break;
+ 	}
+ 
++	kbase_pm_set_gpu_lost(kbdev, false);
+ 	kbase_arbif_gpu_stopped(kbdev, request_gpu);
++	if (request_gpu)
++		start_request_timer(kbdev);
++}
++
++void kbase_arbiter_set_max_config(struct kbase_device *kbdev,
++				  uint32_t max_l2_slices,
++				  uint32_t max_core_mask)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state;
++	struct max_config_props max_config;
++
++	if (!kbdev)
++		return;
++
++	/* Mask the max_l2_slices as it is stored as 8 bits into kbase */
++	max_config.l2_slices = max_l2_slices & MAX_L2_SLICES_MASK;
++	max_config.core_mask = max_core_mask;
++	arb_vm_state = kbdev->pm.arb_vm_state;
++
++	mutex_lock(&arb_vm_state->vm_state_lock);
++	/* Just set the max_props in kbase during initialization. */
++	if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING)
++		kbase_gpuprops_set_max_config(kbdev, &max_config);
++	else
++		dev_dbg(kbdev->dev, "Unexpected max_config on VM state %s",
++			kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
++
++	mutex_unlock(&arb_vm_state->vm_state_lock);
++}
++
++int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev)
++{
++	struct kbase_arbiter_vm_state *arb_vm_state;
++	int result = -EINVAL;
++
++	if (!kbdev)
++		return result;
++
++	/* First check the GPU_LOST state */
++	kbase_pm_lock(kbdev);
++	if (kbase_pm_is_gpu_lost(kbdev)) {
++		kbase_pm_unlock(kbdev);
++		return 0;
++	}
++	kbase_pm_unlock(kbdev);
++
++	/* Then the arbitration state machine */
++	arb_vm_state = kbdev->pm.arb_vm_state;
++
++	mutex_lock(&arb_vm_state->vm_state_lock);
++	switch (arb_vm_state->vm_state) {
++	case KBASE_VM_STATE_INITIALIZING:
++	case KBASE_VM_STATE_SUSPENDED:
++	case KBASE_VM_STATE_STOPPED:
++	case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
++	case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
++		result = 0;
++		break;
++	default:
++		result = 1;
++		break;
++	}
++	mutex_unlock(&arb_vm_state->vm_state_lock);
++
++	return result;
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_gpu_start() - Handles the start state of the VM
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Handles the start state of the VM
++ */
+ static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++	bool freq_updated = false;
+ 
+ 	lockdep_assert_held(&arb_vm_state->vm_state_lock);
++	mutex_lock(&kbdev->arb.arb_freq.arb_freq_lock);
++	if (kbdev->arb.arb_freq.freq_updated) {
++		kbdev->arb.arb_freq.freq_updated = false;
++		freq_updated = true;
++	}
++	mutex_unlock(&kbdev->arb.arb_freq.arb_freq_lock);
++
++	cancel_request_timer(kbdev);
+ 	switch (arb_vm_state->vm_state) {
+ 	case KBASE_VM_STATE_INITIALIZING:
+ 		kbase_arbiter_pm_vm_set_state(kbdev,
+@@ -286,22 +547,43 @@ static void kbase_arbiter_pm_vm_gpu_start(struct kbase_device *kbdev)
+ 		break;
+ 	case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
+ 		kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STARTING);
++		arb_vm_state->interrupts_installed = true;
+ 		kbase_install_interrupts(kbdev);
++		/*
++		 * GPU GRANTED received while in stop can be a result of a
++		 * repartitioning.
++		 */
++		kbase_gpuprops_req_curr_config_update(kbdev);
++		/* curr_config will be updated while resuming the PM. */
+ 		queue_work(arb_vm_state->vm_arb_wq,
+ 			&arb_vm_state->vm_resume_work);
+ 		break;
+ 	case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
++		kbase_pm_set_gpu_lost(kbdev, false);
+ 		kbase_arbif_gpu_stopped(kbdev, false);
+ 		kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
+ 		break;
+ 	default:
+-		dev_warn(kbdev->dev,
+-			"GPU_GRANTED when not expected - state %s\n",
+-			kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
++		/*
++		 * GPU_GRANTED can be received when there is a frequency update
++		 * Only show a warning if received in an unexpected state
++		 * without a frequency update
++		 */
++		if (!freq_updated)
++			dev_warn(kbdev->dev,
++				"GPU_GRANTED when not expected - state %s\n",
++				kbase_arbiter_pm_vm_state_str(
++					arb_vm_state->vm_state));
+ 		break;
+ 	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_gpu_stop() - Handles the stop state of the VM
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Handles the start state of the VM
++ */
+ static void kbase_arbiter_pm_vm_gpu_stop(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+@@ -344,9 +626,16 @@ static void kbase_arbiter_pm_vm_gpu_stop(struct kbase_device *kbdev)
+ 	}
+ }
+ 
++/**
++ * kbase_gpu_lost() - Kbase signals GPU is lost on a lost event signal
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * On GPU lost event signals GPU_LOST to the aribiter
++ */
+ static void kbase_gpu_lost(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++	bool handle_gpu_lost = false;
+ 
+ 	lockdep_assert_held(&arb_vm_state->vm_state_lock);
+ 
+@@ -357,33 +646,47 @@ static void kbase_gpu_lost(struct kbase_device *kbdev)
+ 		dev_warn(kbdev->dev, "GPU lost in state %s",
+ 		kbase_arbiter_pm_vm_state_str(arb_vm_state->vm_state));
+ 		kbase_arbiter_pm_vm_gpu_stop(kbdev);
+-		mutex_unlock(&arb_vm_state->vm_state_lock);
+-		kbase_pm_handle_gpu_lost(kbdev);
+-		mutex_lock(&arb_vm_state->vm_state_lock);
++		handle_gpu_lost = true;
+ 		break;
+ 	case KBASE_VM_STATE_STOPPING_IDLE:
+ 	case KBASE_VM_STATE_STOPPING_ACTIVE:
+ 	case KBASE_VM_STATE_SUSPEND_PENDING:
+-		dev_info(kbdev->dev, "GPU lost while stopping");
+-		mutex_unlock(&arb_vm_state->vm_state_lock);
+-		kbase_pm_handle_gpu_lost(kbdev);
+-		mutex_lock(&arb_vm_state->vm_state_lock);
++		dev_dbg(kbdev->dev, "GPU lost while stopping");
++		handle_gpu_lost = true;
+ 		break;
+ 	case KBASE_VM_STATE_SUSPENDED:
+ 	case KBASE_VM_STATE_STOPPED:
+ 	case KBASE_VM_STATE_STOPPED_GPU_REQUESTED:
+-		dev_info(kbdev->dev, "GPU lost while already stopped");
++		dev_dbg(kbdev->dev, "GPU lost while already stopped");
+ 		break;
+ 	case KBASE_VM_STATE_SUSPEND_WAIT_FOR_GRANT:
+-		dev_info(kbdev->dev, "GPU lost while waiting to suspend");
++		dev_dbg(kbdev->dev, "GPU lost while waiting to suspend");
+ 		kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_SUSPENDED);
+ 		break;
+ 	default:
+ 		break;
+ 	}
+-
++	if (handle_gpu_lost) {
++		/* Releasing the VM state lock here is safe because
++		 * we are guaranteed to be in either STOPPING_IDLE,
++		 * STOPPING_ACTIVE or SUSPEND_PENDING at this point.
++		 * The only transitions that are valid from here are to
++		 * STOPPED, STOPPED_GPU_REQUESTED or SUSPENDED which can
++		 * only happen at the completion of the GPU lost handling.
++		 */
++		mutex_unlock(&arb_vm_state->vm_state_lock);
++		kbase_pm_handle_gpu_lost(kbdev);
++		mutex_lock(&arb_vm_state->vm_state_lock);
++	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_os_suspend_ready_state() - checks if VM is ready
++ *			to be moved to suspended state.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Return: True if its ready to be suspended else False.
++ */
+ static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(
+ 	struct kbase_device *kbdev)
+ {
+@@ -398,6 +701,14 @@ static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(
+ 	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_os_prepare_suspend() - Prepare OS to be in suspend state
++ *                             until it receives the grant message from arbiter
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Prepares OS to be in suspend state until it receives GRANT message
++ * from Arbiter asynchronously.
++ */
+ static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+@@ -463,6 +774,14 @@ static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
+ 	}
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_os_resume() - Resume OS function once it receives
++ *                                   a grant message from arbiter
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Resume OS function once it receives GRANT message
++ * from Arbiter asynchronously.
++ */
+ static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+@@ -475,6 +794,7 @@ static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
+ 	kbase_arbiter_pm_vm_set_state(kbdev,
+ 		KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
+ 	kbase_arbif_gpu_request(kbdev);
++	start_request_timer(kbdev);
+ 
+ 	/* Release lock and block resume OS function until we have
+ 	 * asynchronously received the GRANT message from the Arbiter and
+@@ -486,6 +806,14 @@ static void kbase_arbiter_pm_vm_os_resume(struct kbase_device *kbdev)
+ 	mutex_lock(&arb_vm_state->vm_state_lock);
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_event() - Dispatch VM event to the state machine.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ * @evt: VM event
++ *
++ * The state machine function. Receives events and transitions states
++ * according the event received and the current state
++ */
+ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 	enum kbase_arbif_evt evt)
+ {
+@@ -497,7 +825,9 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 	mutex_lock(&arb_vm_state->vm_state_lock);
+ 	dev_dbg(kbdev->dev, "%s %s\n", __func__,
+ 		kbase_arbiter_pm_vm_event_str(evt));
+-
++	if (arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING_WITH_GPU &&
++		arb_vm_state->vm_state != KBASE_VM_STATE_INITIALIZING)
++		KBASE_KTRACE_ADD(kbdev, ARB_VM_EVT, NULL, evt);
+ 	switch (evt) {
+ 	case KBASE_VM_GPU_GRANTED_EVT:
+ 		kbase_arbiter_pm_vm_gpu_start(kbdev);
+@@ -506,7 +836,7 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 		kbase_arbiter_pm_vm_gpu_stop(kbdev);
+ 		break;
+ 	case KBASE_VM_GPU_LOST_EVT:
+-		dev_info(kbdev->dev, "KBASE_ARBIF_GPU_LOST_EVT!");
++		dev_dbg(kbdev->dev, "KBASE_ARBIF_GPU_LOST_EVT!");
+ 		kbase_gpu_lost(kbdev);
+ 		break;
+ 	case KBASE_VM_OS_SUSPEND_EVENT:
+@@ -530,8 +860,6 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 	case KBASE_VM_REF_EVENT:
+ 		switch (arb_vm_state->vm_state) {
+ 		case KBASE_VM_STATE_STARTING:
+-			KBASE_TLSTREAM_TL_EVENT_ARB_STARTED(kbdev, kbdev);
+-			/* FALL THROUGH */
+ 		case KBASE_VM_STATE_IDLE:
+ 			kbase_arbiter_pm_vm_set_state(kbdev,
+ 			KBASE_VM_STATE_ACTIVE);
+@@ -547,15 +875,21 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 		break;
+ 
+ 	case KBASE_VM_GPU_INITIALIZED_EVT:
+-		lockdep_assert_held(&kbdev->pm.lock);
+-		if (kbdev->pm.active_count > 0) {
+-			kbase_arbiter_pm_vm_set_state(kbdev,
+-				KBASE_VM_STATE_ACTIVE);
+-			kbase_arbif_gpu_active(kbdev);
+-		} else {
+-			kbase_arbiter_pm_vm_set_state(kbdev,
+-				KBASE_VM_STATE_IDLE);
+-			kbase_arbif_gpu_idle(kbdev);
++		switch (arb_vm_state->vm_state) {
++		case KBASE_VM_STATE_INITIALIZING_WITH_GPU:
++			lockdep_assert_held(&kbdev->pm.lock);
++			if (kbdev->pm.active_count > 0) {
++				kbase_arbiter_pm_vm_set_state(kbdev,
++					KBASE_VM_STATE_ACTIVE);
++				kbase_arbif_gpu_active(kbdev);
++			} else {
++				kbase_arbiter_pm_vm_set_state(kbdev,
++					KBASE_VM_STATE_IDLE);
++				kbase_arbif_gpu_idle(kbdev);
++			}
++			break;
++		default:
++			break;
+ 		}
+ 		break;
+ 
+@@ -566,6 +900,14 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev,
+ 	mutex_unlock(&arb_vm_state->vm_state_lock);
+ }
+ 
++KBASE_EXPORT_TEST_API(kbase_arbiter_pm_vm_event);
++
++/**
++ * kbase_arbiter_pm_vm_wait_gpu_assignment() - VM wait for a GPU assignment.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * VM waits for a GPU assignment.
++ */
+ static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev)
+ {
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+@@ -577,6 +919,12 @@ static void kbase_arbiter_pm_vm_wait_gpu_assignment(struct kbase_device *kbdev)
+ 	dev_dbg(kbdev->dev, "Waiting for GPU assignment - done\n");
+ }
+ 
++/**
++ * kbase_arbiter_pm_vm_gpu_assigned_lockheld() - Check if VM holds VM state lock
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Checks if the virtual machine holds VM state lock.
++ */
+ static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
+ 	struct kbase_device *kbdev)
+ {
+@@ -587,11 +935,25 @@ static inline bool kbase_arbiter_pm_vm_gpu_assigned_lockheld(
+ 		arb_vm_state->vm_state == KBASE_VM_STATE_ACTIVE);
+ }
+ 
++/**
++ * kbase_arbiter_pm_ctx_active_handle_suspend() - Handle suspend operation for
++ *                                                arbitration mode
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ * @suspend_handler: The handler code for how to handle a suspend
++ *                   that might occur
++ *
++ * This function handles a suspend event from the driver,
++ * communicating with the arbiter and waiting synchronously for the GPU
++ * to be granted again depending on the VM state.
++ *
++ * Return: 0 on success else 1 suspend handler isn not possible.
++ */
+ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
+ 	enum kbase_pm_suspend_handler suspend_handler)
+ {
+ 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
++	int res = 0;
+ 
+ 	if (kbdev->arb.arb_if) {
+ 		mutex_lock(&arb_vm_state->vm_state_lock);
+@@ -606,30 +968,41 @@ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
+ 				kbase_arbiter_pm_vm_set_state(kbdev,
+ 					KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
+ 				kbase_arbif_gpu_request(kbdev);
++				start_request_timer(kbdev);
+ 			} else if (arb_vm_state->vm_state ==
+ 					KBASE_VM_STATE_INITIALIZING_WITH_GPU)
+ 				break;
+ 
+ 			if (suspend_handler !=
+ 				KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE) {
+-				if (suspend_handler ==
+-					KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED
+-						||
+-						kbdev->pm.active_count > 0)
+-					break;
+ 
+-				mutex_unlock(&arb_vm_state->vm_state_lock);
+-				mutex_unlock(&kbdev->pm.lock);
+-				mutex_unlock(&js_devdata->runpool_mutex);
+-				return 1;
+-			}
++				/* In case of GPU lost, even if
++				 * active_count > 0, we no longer have GPU
++				 * access
++				 */
++				if (kbase_pm_is_gpu_lost(kbdev))
++					res = 1;
+ 
+-			if (arb_vm_state->vm_state ==
+-					KBASE_VM_STATE_INITIALIZING_WITH_GPU)
++				switch (suspend_handler) {
++				case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
++					res = 1;
++					break;
++				case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
++					if (kbdev->pm.active_count == 0)
++						res = 1;
++					break;
++				case KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED:
++					break;
++				default:
++					WARN(1, "Unknown suspend_handler\n");
++					res = 1;
++					break;
++				}
+ 				break;
++			}
+ 
+ 			/* Need to synchronously wait for GPU assignment */
+-			arb_vm_state->vm_arb_users_waiting++;
++			atomic_inc(&kbdev->pm.gpu_users_waiting);
+ 			mutex_unlock(&arb_vm_state->vm_state_lock);
+ 			mutex_unlock(&kbdev->pm.lock);
+ 			mutex_unlock(&js_devdata->runpool_mutex);
+@@ -637,9 +1010,128 @@ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
+ 			mutex_lock(&js_devdata->runpool_mutex);
+ 			mutex_lock(&kbdev->pm.lock);
+ 			mutex_lock(&arb_vm_state->vm_state_lock);
+-			arb_vm_state->vm_arb_users_waiting--;
++			atomic_dec(&kbdev->pm.gpu_users_waiting);
+ 		}
+ 		mutex_unlock(&arb_vm_state->vm_state_lock);
+ 	}
+-	return 0;
++	return res;
++}
++
++/**
++ * kbase_arbiter_pm_update_gpu_freq() - Updates GPU clock frequency received
++ * from arbiter.
++ * @arb_freq - Pointer to struchture holding GPU clock frequenecy data
++ * @freq - New frequency value in KHz
++ */
++void kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq *arb_freq,
++	uint32_t freq)
++{
++	struct kbase_gpu_clk_notifier_data ndata;
++
++	mutex_lock(&arb_freq->arb_freq_lock);
++	if (arb_freq->arb_freq != freq) {
++		ndata.new_rate = freq * KHZ_TO_HZ;
++		ndata.old_rate = arb_freq->arb_freq * KHZ_TO_HZ;
++		ndata.gpu_clk_handle = arb_freq;
++		arb_freq->arb_freq = freq;
++		arb_freq->freq_updated = true;
++		if (arb_freq->nb)
++			arb_freq->nb->notifier_call(arb_freq->nb,
++						    POST_RATE_CHANGE, &ndata);
++	}
++
++	mutex_unlock(&arb_freq->arb_freq_lock);
++}
++
++/**
++ * enumerate_arb_gpu_clk() - Enumerate a GPU clock on the given index
++ * @kbdev - kbase_device pointer
++ * @index - GPU clock index
++ *
++ * Returns pointer to structure holding GPU clock frequency data reported from
++ * arbiter, only index 0 is valid.
++ */
++static void *enumerate_arb_gpu_clk(struct kbase_device *kbdev,
++		unsigned int index)
++{
++	if (index == 0)
++		return &kbdev->arb.arb_freq;
++	return NULL;
++}
++
++/**
++ * get_arb_gpu_clk_rate() - Get the current rate of GPU clock frequency value
++ * @kbdev - kbase_device pointer
++ * @index - GPU clock index
++ *
++ * Returns the GPU clock frequency value saved when gpu is granted from arbiter
++ */
++static unsigned long get_arb_gpu_clk_rate(struct kbase_device *kbdev,
++		void *gpu_clk_handle)
++{
++	uint32_t freq;
++	struct kbase_arbiter_freq *arb_dev_freq =
++			(struct kbase_arbiter_freq *) gpu_clk_handle;
++
++	mutex_lock(&arb_dev_freq->arb_freq_lock);
++	/* Convert from KHz to Hz */
++	freq = arb_dev_freq->arb_freq * KHZ_TO_HZ;
++	mutex_unlock(&arb_dev_freq->arb_freq_lock);
++	return freq;
++}
++
++/**
++ * arb_gpu_clk_notifier_register() - Register a clock rate change notifier.
++ * @kbdev          - kbase_device pointer
++ * @gpu_clk_handle - Handle unique to the enumerated GPU clock
++ * @nb             - notifier block containing the callback function pointer
++ *
++ * Returns 0 on success, negative error code otherwise.
++ *
++ * This function registers a callback function that is invoked whenever the
++ * frequency of the clock corresponding to @gpu_clk_handle changes.
++ */
++static int arb_gpu_clk_notifier_register(struct kbase_device *kbdev,
++	void *gpu_clk_handle, struct notifier_block *nb)
++{
++	int ret = 0;
++	struct kbase_arbiter_freq *arb_dev_freq =
++		(struct kbase_arbiter_freq *)gpu_clk_handle;
++
++	if (!arb_dev_freq->nb)
++		arb_dev_freq->nb = nb;
++	else
++		ret = -EBUSY;
++
++	return ret;
++}
++
++/**
++ * gpu_clk_notifier_unregister() - Unregister clock rate change notifier
++ * @kbdev          - kbase_device pointer
++ * @gpu_clk_handle - Handle unique to the enumerated GPU clock
++ * @nb             - notifier block containing the callback function pointer
++ *
++ * This function pointer is used to unregister a callback function that
++ * was previously registered to get notified of a frequency change of the
++ * clock corresponding to @gpu_clk_handle.
++ */
++static void arb_gpu_clk_notifier_unregister(struct kbase_device *kbdev,
++	void *gpu_clk_handle, struct notifier_block *nb)
++{
++	struct kbase_arbiter_freq *arb_dev_freq =
++		(struct kbase_arbiter_freq *)gpu_clk_handle;
++	if (arb_dev_freq->nb == nb) {
++		arb_dev_freq->nb = NULL;
++	} else {
++		dev_err(kbdev->dev, "%s - notifier did not match\n",
++			 __func__);
++	}
+ }
++
++struct kbase_clk_rate_trace_op_conf arb_clk_rate_trace_ops = {
++	.get_gpu_clk_rate = get_arb_gpu_clk_rate,
++	.enumerate_gpu_clk = enumerate_arb_gpu_clk,
++	.gpu_clk_notifier_register = arb_gpu_clk_notifier_register,
++	.gpu_clk_notifier_unregister = arb_gpu_clk_notifier_unregister
++};
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.h b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.h
+index 3c49eb1..1f570bb 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/arbiter/mali_kbase_arbiter_pm.h
+@@ -1,28 +1,7 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- *//* SPDX-License-Identifier: GPL-2.0 */
+-
+-/*
+- *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+@@ -38,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /**
+@@ -116,10 +93,18 @@ void kbase_arbiter_pm_early_term(struct kbase_device *kbdev);
+  * kbase_arbiter_pm_release_interrupts() - Release the GPU interrupts
+  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+  *
+- * Releases interrupts if needed (GPU is available) otherwise does nothing
++ * Releases interrupts and set the interrupt flag to false
+  */
+ void kbase_arbiter_pm_release_interrupts(struct kbase_device *kbdev);
+ 
++/**
++ * kbase_arbiter_pm_install_interrupts() - Install the GPU interrupts
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Install interrupts and set the interrupt_install flag to true.
++ */
++int kbase_arbiter_pm_install_interrupts(struct kbase_device *kbdev);
++
+ /**
+  * kbase_arbiter_pm_vm_event() - Dispatch VM event to the state machine
+  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+@@ -156,4 +141,54 @@ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
+  */
+ void kbase_arbiter_pm_vm_stopped(struct kbase_device *kbdev);
+ 
++/**
++ * kbase_arbiter_set_max_config() - Set the max config data in kbase device.
++ * @kbdev: The kbase device structure for the device (must be a valid pointer).
++ * @max_l2_slices: The maximum number of L2 slices.
++ * @max_core_mask: The largest core mask.
++ *
++ * This function handles a stop event for the VM.
++ * It will update the VM state and forward the stop event to the driver.
++ */
++void kbase_arbiter_set_max_config(struct kbase_device *kbdev,
++				  uint32_t max_l2_slices,
++				  uint32_t max_core_mask);
++
++/**
++ * kbase_arbiter_pm_gpu_assigned() - Determine if this VM has access to the GPU
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Return: 0 if the VM does not have access, 1 if it does, and a negative number
++ * if an error occurred
++ */
++int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev);
++
++extern struct kbase_clk_rate_trace_op_conf arb_clk_rate_trace_ops;
++
++/**
++ * struct kbase_arbiter_freq - Holding the GPU clock frequency data retrieved
++ * from arbiter
++ * @arb_freq:      GPU clock frequency value
++ * @arb_freq_lock: Mutex protecting access to arbfreq value
++ * @nb:            Notifier block to receive rate change callbacks
++ * @freq_updated:  Flag to indicate whether a frequency changed has just been
++ *                 communicated to avoid "GPU_GRANTED when not expected" warning
++ */
++struct kbase_arbiter_freq {
++	uint32_t arb_freq;
++	struct mutex arb_freq_lock;
++	struct notifier_block *nb;
++	bool freq_updated;
++};
++
++/**
++ * kbase_arbiter_pm_update_gpu_freq() - Update GPU frequency
++ * @arb_freq: Pointer to GPU clock frequency data
++ * @freq:     The new frequency
++ *
++ * Updates the GPU frequency and triggers any notifications
++ */
++void kbase_arbiter_pm_update_gpu_freq(struct kbase_arbiter_freq *arb_freq,
++	uint32_t freq);
++
+ #endif /*_MALI_KBASE_ARBITER_PM_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild
+index 2449e80..5dbcff3 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/Kbuild
+@@ -1,10 +1,11 @@
++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ #
+-# (C) COPYRIGHT 2014-2019 ARM Limited. All rights reserved.
++# (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+ #
+ # This program is free software and is provided to you under the terms of the
+ # GNU General Public License version 2 as published by the Free Software
+ # Foundation, and any use by you of this program is subject to the terms
+-# of such GNU licence.
++# of such GNU license.
+ #
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -15,51 +16,34 @@
+ # along with this program; if not, you can access it online at
+ # http://www.gnu.org/licenses/gpl-2.0.html.
+ #
+-# SPDX-License-Identifier: GPL-2.0
+-#
+ #
+ 
+-BACKEND += \
+-	backend/gpu/mali_kbase_cache_policy_backend.c \
+-	backend/gpu/mali_kbase_device_hw.c \
+-	backend/gpu/mali_kbase_gpuprops_backend.c \
+-	backend/gpu/mali_kbase_irq_linux.c \
+-	backend/gpu/mali_kbase_instr_backend.c \
+-	backend/gpu/mali_kbase_js_backend.c \
+-	backend/gpu/mali_kbase_pm_backend.c \
+-	backend/gpu/mali_kbase_pm_driver.c \
+-	backend/gpu/mali_kbase_pm_metrics.c \
+-	backend/gpu/mali_kbase_pm_ca.c \
+-	backend/gpu/mali_kbase_pm_always_on.c \
+-	backend/gpu/mali_kbase_pm_coarse_demand.c \
+-	backend/gpu/mali_kbase_pm_policy.c \
+-	backend/gpu/mali_kbase_time.c \
+-	backend/gpu/mali_kbase_l2_mmu_config.c
++mali_kbase-y += \
++    backend/gpu/mali_kbase_cache_policy_backend.o \
++    backend/gpu/mali_kbase_gpuprops_backend.o \
++    backend/gpu/mali_kbase_irq_linux.o \
++    backend/gpu/mali_kbase_js_backend.o \
++    backend/gpu/mali_kbase_pm_backend.o \
++    backend/gpu/mali_kbase_pm_driver.o \
++    backend/gpu/mali_kbase_pm_metrics.o \
++    backend/gpu/mali_kbase_pm_ca.o \
++    backend/gpu/mali_kbase_pm_always_on.o \
++    backend/gpu/mali_kbase_pm_coarse_demand.o \
++    backend/gpu/mali_kbase_pm_policy.o \
++    backend/gpu/mali_kbase_time.o \
++    backend/gpu/mali_kbase_l2_mmu_config.o \
++    backend/gpu/mali_kbase_clk_rate_trace_mgr.o
+ 
+-ifeq ($(MALI_USE_CSF),1)
+-# empty
+-else
+-	BACKEND += \
+-		backend/gpu/mali_kbase_jm_as.c \
+-		backend/gpu/mali_kbase_debug_job_fault_backend.c \
+-		backend/gpu/mali_kbase_jm_hw.c \
+-		backend/gpu/mali_kbase_jm_rb.c
++ifeq ($(MALI_USE_CSF),0)
++    mali_kbase-y += \
++        backend/gpu/mali_kbase_instr_backend.o \
++        backend/gpu/mali_kbase_jm_as.o \
++        backend/gpu/mali_kbase_debug_job_fault_backend.o \
++        backend/gpu/mali_kbase_jm_hw.o \
++        backend/gpu/mali_kbase_jm_rb.o
+ endif
+ 
+-ifeq ($(MALI_CUSTOMER_RELEASE),0)
+-BACKEND += \
+-	backend/gpu/mali_kbase_pm_always_on_demand.c
+-endif
+ 
+-ifeq ($(CONFIG_MALI_DEVFREQ),y)
+-BACKEND += \
+-	backend/gpu/mali_kbase_devfreq.c
+-endif
++mali_kbase-$(CONFIG_MALI_DEVFREQ) += \
++    backend/gpu/mali_kbase_devfreq.o
+ 
+-ifeq ($(CONFIG_MALI_NO_MALI),y)
+-	# Dummy model
+-	BACKEND += backend/gpu/mali_kbase_model_dummy.c
+-	BACKEND += backend/gpu/mali_kbase_model_linux.c
+-	# HW error simulation
+-	BACKEND += backend/gpu/mali_kbase_model_error_generator.c
+-endif
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
+index 4a61f96..6924fdb 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_backend_config.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
+index 7378bfd..e542ccf 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2015-2016,2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2016, 2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,10 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include "backend/gpu/mali_kbase_cache_policy_backend.h"
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ 
+ void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
+ 		u32 mode)
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
+index f78ada7..278125a 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2016, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,16 +17,13 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ #ifndef _KBASE_CACHE_POLICY_BACKEND_H_
+ #define _KBASE_CACHE_POLICY_BACKEND_H_
+ 
+ #include "mali_kbase.h"
+-#include "mali_base_kernel.h"
++#include <uapi/gpu/arm/midgard/mali_base_kernel.h>
+ 
+ /**
+   * kbase_cache_set_coherency_mode() - Sets the system coherency mode
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.c
+new file mode 100644
+index 0000000..6ad0f58
+--- /dev/null
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.c
+@@ -0,0 +1,325 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++/*
++ *
++ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++/*
++ * Implementation of the GPU clock rate trace manager.
++ */
++
++#include <mali_kbase.h>
++#include <mali_kbase_config_defaults.h>
++#include <linux/clk.h>
++#include <asm/div64.h>
++#include "backend/gpu/mali_kbase_clk_rate_trace_mgr.h"
++
++#ifdef CONFIG_TRACE_POWER_GPU_FREQUENCY
++#include <trace/events/power_gpu_frequency.h>
++#else
++#include "mali_power_gpu_frequency_trace.h"
++#endif
++
++#ifndef CLK_RATE_TRACE_OPS
++#define CLK_RATE_TRACE_OPS (NULL)
++#endif
++
++/**
++ * get_clk_rate_trace_callbacks() - Returns pointer to clk trace ops.
++ * @kbdev: Pointer to kbase device, used to check if arbitration is enabled
++ *         when compiled with arbiter support.
++ * Return: Pointer to clk trace ops if supported or NULL.
++ */
++static struct kbase_clk_rate_trace_op_conf *
++get_clk_rate_trace_callbacks(struct kbase_device *kbdev __maybe_unused)
++{
++	/* base case */
++	struct kbase_clk_rate_trace_op_conf *callbacks =
++		(struct kbase_clk_rate_trace_op_conf *)CLK_RATE_TRACE_OPS;
++#if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
++	const void *arbiter_if_node;
++
++	if (WARN_ON(!kbdev) || WARN_ON(!kbdev->dev))
++		return callbacks;
++
++	arbiter_if_node =
++		of_get_property(kbdev->dev->of_node, "arbiter_if", NULL);
++	/* Arbitration enabled, override the callback pointer.*/
++	if (arbiter_if_node)
++		callbacks = &arb_clk_rate_trace_ops;
++	else
++		dev_dbg(kbdev->dev,
++			"Arbitration supported but disabled by platform. Leaving clk rate callbacks as default.\n");
++
++#endif
++
++	return callbacks;
++}
++
++static int gpu_clk_rate_change_notifier(struct notifier_block *nb,
++			unsigned long event, void *data)
++{
++	struct kbase_gpu_clk_notifier_data *ndata = data;
++	struct kbase_clk_data *clk_data =
++		container_of(nb, struct kbase_clk_data, clk_rate_change_nb);
++	struct kbase_clk_rate_trace_manager *clk_rtm = clk_data->clk_rtm;
++	unsigned long flags;
++
++	if (WARN_ON_ONCE(clk_data->gpu_clk_handle != ndata->gpu_clk_handle))
++		return NOTIFY_BAD;
++
++	spin_lock_irqsave(&clk_rtm->lock, flags);
++	if (event == POST_RATE_CHANGE) {
++		if (!clk_rtm->gpu_idle &&
++		    (clk_data->clock_val != ndata->new_rate)) {
++			kbase_clk_rate_trace_manager_notify_all(
++				clk_rtm, clk_data->index, ndata->new_rate);
++		}
++
++		clk_data->clock_val = ndata->new_rate;
++	}
++	spin_unlock_irqrestore(&clk_rtm->lock, flags);
++
++	return NOTIFY_DONE;
++}
++
++static int gpu_clk_data_init(struct kbase_device *kbdev,
++		void *gpu_clk_handle, unsigned int index)
++{
++	struct kbase_clk_rate_trace_op_conf *callbacks;
++	struct kbase_clk_data *clk_data;
++	struct kbase_clk_rate_trace_manager *clk_rtm = &kbdev->pm.clk_rtm;
++	int ret = 0;
++
++	callbacks = get_clk_rate_trace_callbacks(kbdev);
++
++	if (WARN_ON(!callbacks) ||
++	    WARN_ON(!gpu_clk_handle) ||
++	    WARN_ON(index >= BASE_MAX_NR_CLOCKS_REGULATORS))
++		return -EINVAL;
++
++	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
++	if (!clk_data) {
++		dev_err(kbdev->dev, "Failed to allocate data for clock enumerated at index %u", index);
++		return -ENOMEM;
++	}
++
++	clk_data->index = (u8)index;
++	clk_data->gpu_clk_handle = gpu_clk_handle;
++	/* Store the initial value of clock */
++	clk_data->clock_val =
++		callbacks->get_gpu_clk_rate(kbdev, gpu_clk_handle);
++
++	{
++		/* At the initialization time, GPU is powered off. */
++		unsigned long flags;
++
++		spin_lock_irqsave(&clk_rtm->lock, flags);
++		kbase_clk_rate_trace_manager_notify_all(
++			clk_rtm, clk_data->index, 0);
++		spin_unlock_irqrestore(&clk_rtm->lock, flags);
++	}
++
++	clk_data->clk_rtm = clk_rtm;
++	clk_rtm->clks[index] = clk_data;
++
++	clk_data->clk_rate_change_nb.notifier_call =
++			gpu_clk_rate_change_notifier;
++
++	if (callbacks->gpu_clk_notifier_register)
++		ret = callbacks->gpu_clk_notifier_register(kbdev,
++				gpu_clk_handle, &clk_data->clk_rate_change_nb);
++	if (ret) {
++		dev_err(kbdev->dev, "Failed to register notifier for clock enumerated at index %u", index);
++		kfree(clk_data);
++	}
++
++	return ret;
++}
++
++int kbase_clk_rate_trace_manager_init(struct kbase_device *kbdev)
++{
++	struct kbase_clk_rate_trace_op_conf *callbacks;
++	struct kbase_clk_rate_trace_manager *clk_rtm = &kbdev->pm.clk_rtm;
++	unsigned int i;
++	int ret = 0;
++
++	callbacks = get_clk_rate_trace_callbacks(kbdev);
++
++	spin_lock_init(&clk_rtm->lock);
++	INIT_LIST_HEAD(&clk_rtm->listeners);
++
++	/* Return early if no callbacks provided for clock rate tracing */
++	if (!callbacks) {
++		WRITE_ONCE(clk_rtm->clk_rate_trace_ops, NULL);
++		return 0;
++	}
++
++	clk_rtm->gpu_idle = true;
++
++	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
++		void *gpu_clk_handle =
++			callbacks->enumerate_gpu_clk(kbdev, i);
++
++		if (!gpu_clk_handle)
++			break;
++
++		ret = gpu_clk_data_init(kbdev, gpu_clk_handle, i);
++		if (ret)
++			goto error;
++	}
++
++	/* Activate clock rate trace manager if at least one GPU clock was
++	 * enumerated.
++	 */
++	if (i) {
++		WRITE_ONCE(clk_rtm->clk_rate_trace_ops, callbacks);
++	} else {
++		dev_info(kbdev->dev, "No clock(s) available for rate tracing");
++		WRITE_ONCE(clk_rtm->clk_rate_trace_ops, NULL);
++	}
++
++	return 0;
++
++error:
++	while (i--) {
++		clk_rtm->clk_rate_trace_ops->gpu_clk_notifier_unregister(
++				kbdev, clk_rtm->clks[i]->gpu_clk_handle,
++				&clk_rtm->clks[i]->clk_rate_change_nb);
++		kfree(clk_rtm->clks[i]);
++	}
++
++	return ret;
++}
++
++void kbase_clk_rate_trace_manager_term(struct kbase_device *kbdev)
++{
++	struct kbase_clk_rate_trace_manager *clk_rtm = &kbdev->pm.clk_rtm;
++	unsigned int i;
++
++	WARN_ON(!list_empty(&clk_rtm->listeners));
++
++	if (!clk_rtm->clk_rate_trace_ops)
++		return;
++
++	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
++		if (!clk_rtm->clks[i])
++			break;
++
++		if (clk_rtm->clk_rate_trace_ops->gpu_clk_notifier_unregister)
++			clk_rtm->clk_rate_trace_ops->gpu_clk_notifier_unregister
++			(kbdev, clk_rtm->clks[i]->gpu_clk_handle,
++			&clk_rtm->clks[i]->clk_rate_change_nb);
++		kfree(clk_rtm->clks[i]);
++	}
++
++	WRITE_ONCE(clk_rtm->clk_rate_trace_ops, NULL);
++}
++
++void kbase_clk_rate_trace_manager_gpu_active(struct kbase_device *kbdev)
++{
++	struct kbase_clk_rate_trace_manager *clk_rtm = &kbdev->pm.clk_rtm;
++	unsigned int i;
++	unsigned long flags;
++
++	if (!clk_rtm->clk_rate_trace_ops)
++		return;
++
++	spin_lock_irqsave(&clk_rtm->lock, flags);
++
++	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
++		struct kbase_clk_data *clk_data = clk_rtm->clks[i];
++
++		if (!clk_data)
++			break;
++
++		if (unlikely(!clk_data->clock_val))
++			continue;
++
++		kbase_clk_rate_trace_manager_notify_all(
++			clk_rtm, clk_data->index, clk_data->clock_val);
++	}
++
++	clk_rtm->gpu_idle = false;
++	spin_unlock_irqrestore(&clk_rtm->lock, flags);
++}
++
++void kbase_clk_rate_trace_manager_gpu_idle(struct kbase_device *kbdev)
++{
++	struct kbase_clk_rate_trace_manager *clk_rtm = &kbdev->pm.clk_rtm;
++	unsigned int i;
++	unsigned long flags;
++
++	if (!clk_rtm->clk_rate_trace_ops)
++		return;
++
++	spin_lock_irqsave(&clk_rtm->lock, flags);
++
++	for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
++		struct kbase_clk_data *clk_data = clk_rtm->clks[i];
++
++		if (!clk_data)
++			break;
++
++		if (unlikely(!clk_data->clock_val))
++			continue;
++
++		kbase_clk_rate_trace_manager_notify_all(
++			clk_rtm, clk_data->index, 0);
++	}
++
++	clk_rtm->gpu_idle = true;
++	spin_unlock_irqrestore(&clk_rtm->lock, flags);
++}
++
++void kbase_clk_rate_trace_manager_notify_all(
++	struct kbase_clk_rate_trace_manager *clk_rtm,
++	u32 clk_index,
++	unsigned long new_rate)
++{
++	struct kbase_clk_rate_listener *pos;
++	struct kbase_device *kbdev;
++
++	lockdep_assert_held(&clk_rtm->lock);
++
++	kbdev = container_of(clk_rtm, struct kbase_device, pm.clk_rtm);
++
++	dev_dbg(kbdev->dev, "%s - GPU clock %u rate changed to %lu, pid: %d",
++		__func__, clk_index, new_rate, current->pid);
++
++	/* Raise standard `power/gpu_frequency` ftrace event */
++	{
++		unsigned long new_rate_khz = new_rate;
++
++#if BITS_PER_LONG == 64
++		do_div(new_rate_khz, 1000);
++#elif BITS_PER_LONG == 32
++		new_rate_khz /= 1000;
++#else
++#error "unsigned long division is not supported for this architecture"
++#endif
++
++		trace_gpu_frequency(new_rate_khz, clk_index);
++	}
++
++	/* Notify the listeners. */
++	list_for_each_entry(pos, &clk_rtm->listeners, node) {
++		pos->notify(pos, clk_index, new_rate);
++	}
++}
++KBASE_EXPORT_TEST_API(kbase_clk_rate_trace_manager_notify_all);
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.h
+new file mode 100644
+index 0000000..f7ec9d1
+--- /dev/null
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_clk_rate_trace_mgr.h
+@@ -0,0 +1,154 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ *
++ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++#ifndef _KBASE_CLK_RATE_TRACE_MGR_
++#define _KBASE_CLK_RATE_TRACE_MGR_
++
++/* The index of top clock domain in kbase_clk_rate_trace_manager:clks. */
++#define KBASE_CLOCK_DOMAIN_TOP (0)
++
++/* The index of shader-cores clock domain in
++ * kbase_clk_rate_trace_manager:clks.
++ */
++#define KBASE_CLOCK_DOMAIN_SHADER_CORES (1)
++
++/**
++ * struct kbase_clk_data - Data stored per enumerated GPU clock.
++ *
++ * @clk_rtm:            Pointer to clock rate trace manager object.
++ * @gpu_clk_handle:     Handle unique to the enumerated GPU clock.
++ * @plat_private:       Private data for the platform to store into
++ * @clk_rate_change_nb: notifier block containing the pointer to callback
++ *                      function that is invoked whenever the rate of
++ *                      enumerated GPU clock changes.
++ * @clock_val:          Current rate of the enumerated GPU clock.
++ * @index:              Index at which the GPU clock was enumerated.
++ */
++struct kbase_clk_data {
++	struct kbase_clk_rate_trace_manager *clk_rtm;
++	void *gpu_clk_handle;
++	void *plat_private;
++	struct notifier_block clk_rate_change_nb;
++	unsigned long clock_val;
++	u8 index;
++};
++
++/**
++ * kbase_clk_rate_trace_manager_init - Initialize GPU clock rate trace manager.
++ *
++ * @kbdev:      Device pointer
++ *
++ * Return: 0 if success, or an error code on failure.
++ */
++int kbase_clk_rate_trace_manager_init(struct kbase_device *kbdev);
++
++/**
++ * kbase_clk_rate_trace_manager_term - Terminate GPU clock rate trace manager.
++ *
++ *  @kbdev:      Device pointer
++ */
++void kbase_clk_rate_trace_manager_term(struct kbase_device *kbdev);
++
++/**
++ * kbase_clk_rate_trace_manager_gpu_active - Inform GPU clock rate trace
++ *                                           manager of GPU becoming active.
++ *
++ * @kbdev:      Device pointer
++ */
++void kbase_clk_rate_trace_manager_gpu_active(struct kbase_device *kbdev);
++
++/**
++ * kbase_clk_rate_trace_manager_gpu_idle - Inform GPU clock rate trace
++ *                                         manager of GPU becoming idle.
++ * @kbdev:      Device pointer
++ */
++void kbase_clk_rate_trace_manager_gpu_idle(struct kbase_device *kbdev);
++
++/**
++ * kbase_clk_rate_trace_manager_subscribe_no_lock() - Add freq change listener.
++ *
++ * @clk_rtm:    Clock rate manager instance.
++ * @listener:   Listener handle
++ *
++ * kbase_clk_rate_trace_manager:lock must be held by the caller.
++ */
++static inline void kbase_clk_rate_trace_manager_subscribe_no_lock(
++	struct kbase_clk_rate_trace_manager *clk_rtm,
++	struct kbase_clk_rate_listener *listener)
++{
++	lockdep_assert_held(&clk_rtm->lock);
++	list_add(&listener->node, &clk_rtm->listeners);
++}
++
++/**
++ * kbase_clk_rate_trace_manager_subscribe() - Add freq change listener.
++ *
++ * @clk_rtm:    Clock rate manager instance.
++ * @listener:   Listener handle
++ */
++static inline void kbase_clk_rate_trace_manager_subscribe(
++	struct kbase_clk_rate_trace_manager *clk_rtm,
++	struct kbase_clk_rate_listener *listener)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&clk_rtm->lock, flags);
++	kbase_clk_rate_trace_manager_subscribe_no_lock(
++		clk_rtm, listener);
++	spin_unlock_irqrestore(&clk_rtm->lock, flags);
++}
++
++/**
++ * kbase_clk_rate_trace_manager_unsubscribe() - Remove freq change listener.
++ *
++ * @clk_rtm:    Clock rate manager instance.
++ * @listener:   Listener handle
++ */
++static inline void kbase_clk_rate_trace_manager_unsubscribe(
++	struct kbase_clk_rate_trace_manager *clk_rtm,
++	struct kbase_clk_rate_listener *listener)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&clk_rtm->lock, flags);
++	list_del(&listener->node);
++	spin_unlock_irqrestore(&clk_rtm->lock, flags);
++}
++
++/**
++ * kbase_clk_rate_trace_manager_notify_all() - Notify all clock \
++ *                                             rate listeners.
++ *
++ * @clk_rtm:     Clock rate manager instance.
++ * @clock_index:   Clock index.
++ * @new_rate:    New clock frequency(Hz)
++ *
++ * kbase_clk_rate_trace_manager:lock must be locked.
++ * This function is exported to be used by clock rate trace test
++ * portal.
++ */
++void kbase_clk_rate_trace_manager_notify_all(
++	struct kbase_clk_rate_trace_manager *clk_rtm,
++	u32 clock_index,
++	unsigned long new_rate);
++
++#endif /* _KBASE_CLK_RATE_TRACE_MGR_ */
++
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
+index b05844e..e121b41 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2012-2015,2018-2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012-2015, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,15 +17,13 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <mali_kbase.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include "mali_kbase_debug_job_fault.h"
+ 
+-#ifdef CONFIG_DEBUG_FS
++#if IS_ENABLED(CONFIG_DEBUG_FS)
+ 
+ /*GPU_CONTROL_REG(r)*/
+ static int gpu_control_reg_snapshot[] = {
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
+index 2806f05..8c31499 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <mali_kbase.h>
+@@ -27,40 +26,53 @@
+ #include <linux/of.h>
+ #include <linux/clk.h>
+ #include <linux/devfreq.h>
+-#ifdef CONFIG_DEVFREQ_THERMAL
++#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
+ #include <linux/devfreq_cooling.h>
+ #endif
+ 
+ #include <linux/version.h>
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ #include <linux/pm_opp.h>
+-#else /* Linux >= 3.13 */
+-/* In 3.13 the OPP include header file, types, and functions were all
+- * renamed. Use the old filename for the include, and define the new names to
+- * the old, when an old kernel is detected.
+- */
+-#include <linux/opp.h>
+-#define dev_pm_opp opp
+-#define dev_pm_opp_get_voltage opp_get_voltage
+-#define dev_pm_opp_get_opp_count opp_get_opp_count
+-#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil
+-#define dev_pm_opp_find_freq_floor opp_find_freq_floor
+-#endif /* Linux >= 3.13 */
+ 
+ /**
+- * opp_translate - Translate nominal OPP frequency from devicetree into real
+- *                 frequency and core mask
+- * @kbdev:     Device pointer
+- * @freq:      Nominal frequency
+- * @core_mask: Pointer to u64 to store core mask to
+- * @freqs:     Pointer to array of frequencies
+- * @volts:     Pointer to array of voltages
++ * get_voltage() - Get the voltage value corresponding to the nominal frequency
++ *                 used by devfreq.
++ * @kbdev:    Device pointer
++ * @freq:     Nominal frequency in Hz passed by devfreq.
++ *
++ * This function will be called only when the opp table which is compatible with
++ * "operating-points-v2-mali", is not present in the devicetree for GPU device.
+  *
+- * This function will only perform translation if an operating-points-v2-mali
+- * table is present in devicetree. If one is not present then it will return an
+- * untranslated frequency and all cores enabled.
++ * Return: Voltage value in milli volts, 0 in case of error.
+  */
+-static void opp_translate(struct kbase_device *kbdev, unsigned long freq,
++static unsigned long get_voltage(struct kbase_device *kbdev, unsigned long freq)
++{
++	struct dev_pm_opp *opp;
++	unsigned long voltage = 0;
++
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
++	rcu_read_lock();
++#endif
++
++	opp = dev_pm_opp_find_freq_exact(kbdev->dev, freq, true);
++
++	if (IS_ERR_OR_NULL(opp))
++		dev_err(kbdev->dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
++	else {
++		voltage = dev_pm_opp_get_voltage(opp);
++#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
++		dev_pm_opp_put(opp);
++#endif
++	}
++
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
++	rcu_read_unlock();
++#endif
++
++	/* Return the voltage in milli volts */
++	return voltage / 1000;
++}
++
++void kbase_devfreq_opp_translate(struct kbase_device *kbdev, unsigned long freq,
+ 	u64 *core_mask, unsigned long *freqs, unsigned long *volts)
+ {
+ 	unsigned int i;
+@@ -82,12 +94,17 @@ static void opp_translate(struct kbase_device *kbdev, unsigned long freq,
+ 	}
+ 
+ 	/* If failed to find OPP, return all cores enabled
+-	 * and nominal frequency
++	 * and nominal frequency and the corresponding voltage.
+ 	 */
+ 	if (i == kbdev->num_opps) {
++		unsigned long voltage = get_voltage(kbdev, freq);
++
+ 		*core_mask = kbdev->gpu_props.props.raw_props.shader_present;
+-		for (i = 0; i < kbdev->nr_clocks; i++)
++
++		for (i = 0; i < kbdev->nr_clocks; i++) {
+ 			freqs[i] = freq;
++			volts[i] = voltage;
++		}
+ 	}
+ }
+ 
+@@ -104,18 +121,18 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+ 
+ 	nominal_freq = *target_freq;
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_lock();
+ #endif
+ 	opp = devfreq_recommended_opp(dev, &nominal_freq, flags);
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_unlock();
+ #endif
+ 	if (IS_ERR_OR_NULL(opp)) {
+ 		dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
+ 		return PTR_ERR(opp);
+ 	}
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+ 	dev_pm_opp_put(opp);
+ #endif
+ 
+@@ -127,9 +144,10 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+ 		return 0;
+ 	}
+ 
+-	opp_translate(kbdev, nominal_freq, &core_mask, freqs, volts);
++	kbase_devfreq_opp_translate(kbdev, nominal_freq, &core_mask,
++				    freqs, volts);
+ 
+-#ifdef CONFIG_REGULATOR
++#if IS_ENABLED(CONFIG_REGULATOR)
+ 	/* Regulators and clocks work in pairs: every clock has a regulator,
+ 	 * and we never expect to have more regulators than clocks.
+ 	 *
+@@ -177,7 +195,7 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
+ 		}
+ 	}
+ 
+-#ifdef CONFIG_REGULATOR
++#if IS_ENABLED(CONFIG_REGULATOR)
+ 	for (i = 0; i < kbdev->nr_clocks; i++) {
+ 		if (kbdev->regulators[i] &&
+ 				kbdev->current_voltages[i] != volts[i] &&
+@@ -238,6 +256,10 @@ kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
+ 	stat->current_frequency = kbdev->current_nominal_freq;
+ 	stat->private_data = NULL;
+ 
++#if MALI_USE_CSF && defined CONFIG_DEVFREQ_THERMAL
++	kbase_ipa_reset_data(kbdev);
++#endif
++
+ 	return 0;
+ }
+ 
+@@ -249,11 +271,11 @@ static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+ 	unsigned long freq;
+ 	struct dev_pm_opp *opp;
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_lock();
+ #endif
+ 	count = dev_pm_opp_get_opp_count(kbdev->dev);
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_unlock();
+ #endif
+ 	if (count < 0)
+@@ -264,20 +286,20 @@ static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
+ 	if (!dp->freq_table)
+ 		return -ENOMEM;
+ 
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_lock();
+ #endif
+ 	for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
+ 		opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
+ 		if (IS_ERR(opp))
+ 			break;
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+ 		dev_pm_opp_put(opp);
+-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
++#endif /* KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE */
+ 
+ 		dp->freq_table[i] = freq;
+ 	}
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ 	rcu_read_unlock();
+ #endif
+ 
+@@ -309,18 +331,21 @@ static void kbase_devfreq_term_freq_table(struct kbase_device *kbdev)
+ 	struct devfreq_dev_profile *dp = &kbdev->devfreq_profile;
+ 
+ 	kfree(dp->freq_table);
++	dp->freq_table = NULL;
+ }
+ 
+ static void kbase_devfreq_term_core_mask_table(struct kbase_device *kbdev)
+ {
+ 	kfree(kbdev->devfreq_table);
++	kbdev->devfreq_table = NULL;
+ }
+ 
+ static void kbase_devfreq_exit(struct device *dev)
+ {
+ 	struct kbase_device *kbdev = dev_get_drvdata(dev);
+ 
+-	kbase_devfreq_term_freq_table(kbdev);
++	if (kbdev)
++		kbase_devfreq_term_freq_table(kbdev);
+ }
+ 
+ static void kbasep_devfreq_read_suspend_clock(struct kbase_device *kbdev,
+@@ -359,7 +384,7 @@ static void kbasep_devfreq_read_suspend_clock(struct kbase_device *kbdev,
+ 
+ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+ {
+-#if KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE || !defined(CONFIG_OF)
++#ifndef CONFIG_OF
+ 	/* OPP table initialization requires at least the capability to get
+ 	 * regulators and clocks from the device tree, as well as parsing
+ 	 * arrays of unsigned integer values.
+@@ -392,7 +417,7 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+ 		u64 core_mask, opp_freq,
+ 			real_freqs[BASE_MAX_NR_CLOCKS_REGULATORS];
+ 		int err;
+-#ifdef CONFIG_REGULATOR
++#if IS_ENABLED(CONFIG_REGULATOR)
+ 		u32 opp_volts[BASE_MAX_NR_CLOCKS_REGULATORS];
+ #endif
+ 
+@@ -420,7 +445,7 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+ 					err);
+ 			continue;
+ 		}
+-#ifdef CONFIG_REGULATOR
++#if IS_ENABLED(CONFIG_REGULATOR)
+ 		err = of_property_read_u32_array(node,
+ 			"opp-microvolt", opp_volts, kbdev->nr_regulators);
+ 		if (err < 0) {
+@@ -474,7 +499,7 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+ 				kbdev->devfreq_table[i].real_freqs[j] =
+ 					real_freqs[j];
+ 		}
+-#ifdef CONFIG_REGULATOR
++#if IS_ENABLED(CONFIG_REGULATOR)
+ 		if (kbdev->nr_regulators > 0) {
+ 			int j;
+ 
+@@ -493,11 +518,9 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
+ 	kbdev->num_opps = i;
+ 
+ 	return 0;
+-#endif /* KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE */
++#endif /* CONFIG_OF */
+ }
+ 
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+-
+ static const char *kbase_devfreq_req_type_name(enum kbase_devfreq_work_type type)
+ {
+ 	const char *p;
+@@ -554,27 +577,26 @@ static void kbase_devfreq_suspend_resume_worker(struct work_struct *work)
+ 	}
+ }
+ 
+-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+-
+ void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+ 				       enum kbase_devfreq_work_type work_type)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ 	unsigned long flags;
+ 
+ 	WARN_ON(work_type == DEVFREQ_WORK_NONE);
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	kbdev->devfreq_queue.req_type = work_type;
+-	queue_work(kbdev->devfreq_queue.workq, &kbdev->devfreq_queue.work);
++	/* Skip enqueuing a work if workqueue has already been terminated. */
++	if (likely(kbdev->devfreq_queue.workq)) {
++		kbdev->devfreq_queue.req_type = work_type;
++		queue_work(kbdev->devfreq_queue.workq,
++			   &kbdev->devfreq_queue.work);
++	}
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 	dev_dbg(kbdev->dev, "Enqueuing devfreq req: %s\n",
+ 		kbase_devfreq_req_type_name(work_type));
+-#endif
+ }
+ 
+ static int kbase_devfreq_work_init(struct kbase_device *kbdev)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ 	kbdev->devfreq_queue.req_type = DEVFREQ_WORK_NONE;
+ 	kbdev->devfreq_queue.acted_type = DEVFREQ_WORK_RESUME;
+ 
+@@ -584,17 +606,23 @@ static int kbase_devfreq_work_init(struct kbase_device *kbdev)
+ 
+ 	INIT_WORK(&kbdev->devfreq_queue.work,
+ 			kbase_devfreq_suspend_resume_worker);
+-#endif
+ 	return 0;
+ }
+ 
+ static void kbase_devfreq_work_term(struct kbase_device *kbdev)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+-	destroy_workqueue(kbdev->devfreq_queue.workq);
+-#endif
++	unsigned long flags;
++	struct workqueue_struct *workq;
++
++	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
++	workq = kbdev->devfreq_queue.workq;
++	kbdev->devfreq_queue.workq = NULL;
++	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++
++	destroy_workqueue(workq);
+ }
+ 
++
+ int kbase_devfreq_init(struct kbase_device *kbdev)
+ {
+ 	struct devfreq_dev_profile *dp;
+@@ -631,19 +659,11 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
+ 		/* Record the maximum frequency possible */
+ 		kbdev->gpu_props.props.core_props.gpu_freq_khz_max =
+ 			dp->freq_table[0] / 1000;
+-	};
+-
+-	err = kbase_devfreq_init_core_mask_table(kbdev);
+-	if (err) {
+-		kbase_devfreq_term_freq_table(kbdev);
+-		return err;
+ 	}
+ 
+-	/* Initialise devfreq suspend/resume workqueue */
+-	err = kbase_devfreq_work_init(kbdev);
++	err = kbase_devfreq_init_core_mask_table(kbdev);
+ 	if (err) {
+ 		kbase_devfreq_term_freq_table(kbdev);
+-		dev_err(kbdev->dev, "Devfreq initialization failed");
+ 		return err;
+ 	}
+ 
+@@ -651,13 +671,27 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
+ 				"simple_ondemand", NULL);
+ 	if (IS_ERR(kbdev->devfreq)) {
+ 		err = PTR_ERR(kbdev->devfreq);
+-		kbase_devfreq_work_term(kbdev);
++		kbdev->devfreq = NULL;
++		kbase_devfreq_term_core_mask_table(kbdev);
+ 		kbase_devfreq_term_freq_table(kbdev);
++		dev_err(kbdev->dev, "Fail to add devfreq device(%d)\n", err);
++		return err;
++	}
++
++	/* Initialize devfreq suspend/resume workqueue */
++	err = kbase_devfreq_work_init(kbdev);
++	if (err) {
++		if (devfreq_remove_device(kbdev->devfreq))
++			dev_err(kbdev->dev, "Fail to rm devfreq\n");
++		kbdev->devfreq = NULL;
++		kbase_devfreq_term_core_mask_table(kbdev);
++		dev_err(kbdev->dev, "Fail to init devfreq workqueue\n");
+ 		return err;
+ 	}
+ 
+ 	/* devfreq_add_device only copies a few of kbdev->dev's fields, so
+-	 * set drvdata explicitly so IPA models can access kbdev. */
++	 * set drvdata explicitly so IPA models can access kbdev.
++	 */
+ 	dev_set_drvdata(&kbdev->devfreq->dev, kbdev);
+ 
+ 	err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq);
+@@ -667,11 +701,11 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
+ 		goto opp_notifier_failed;
+ 	}
+ 
+-#ifdef CONFIG_DEVFREQ_THERMAL
++#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
+ 	err = kbase_ipa_init(kbdev);
+ 	if (err) {
+ 		dev_err(kbdev->dev, "IPA initialization failed\n");
+-		goto cooling_failed;
++		goto ipa_init_failed;
+ 	}
+ 
+ 	kbdev->devfreq_cooling = of_devfreq_cooling_register_power(
+@@ -683,23 +717,28 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
+ 		dev_err(kbdev->dev,
+ 			"Failed to register cooling device (%d)\n",
+ 			err);
+-		goto cooling_failed;
++		goto cooling_reg_failed;
+ 	}
+ #endif
+ 
+ 	return 0;
+ 
+-#ifdef CONFIG_DEVFREQ_THERMAL
+-cooling_failed:
++#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
++cooling_reg_failed:
++	kbase_ipa_term(kbdev);
++ipa_init_failed:
+ 	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+ #endif /* CONFIG_DEVFREQ_THERMAL */
++
+ opp_notifier_failed:
++	kbase_devfreq_work_term(kbdev);
++
+ 	if (devfreq_remove_device(kbdev->devfreq))
+ 		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+-	else
+-		kbdev->devfreq = NULL;
+ 
+-	kbase_devfreq_work_term(kbdev);
++	kbdev->devfreq = NULL;
++
++	kbase_devfreq_term_core_mask_table(kbdev);
+ 
+ 	return err;
+ }
+@@ -710,7 +749,7 @@ void kbase_devfreq_term(struct kbase_device *kbdev)
+ 
+ 	dev_dbg(kbdev->dev, "Term Mali devfreq\n");
+ 
+-#ifdef CONFIG_DEVFREQ_THERMAL
++#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
+ 	if (kbdev->devfreq_cooling)
+ 		devfreq_cooling_unregister(kbdev->devfreq_cooling);
+ 
+@@ -719,6 +758,8 @@ void kbase_devfreq_term(struct kbase_device *kbdev)
+ 
+ 	devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq);
+ 
++	kbase_devfreq_work_term(kbdev);
++
+ 	err = devfreq_remove_device(kbdev->devfreq);
+ 	if (err)
+ 		dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err);
+@@ -726,6 +767,4 @@ void kbase_devfreq_term(struct kbase_device *kbdev)
+ 		kbdev->devfreq = NULL;
+ 
+ 	kbase_devfreq_term_core_mask_table(kbdev);
+-
+-	kbase_devfreq_work_term(kbdev);
+ }
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
+index 8c976b2..901827e 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014, 2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014, 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #ifndef _BASE_DEVFREQ_H_
+@@ -43,4 +42,20 @@ void kbase_devfreq_force_freq(struct kbase_device *kbdev, unsigned long freq);
+ void kbase_devfreq_enqueue_work(struct kbase_device *kbdev,
+ 				enum kbase_devfreq_work_type work_type);
+ 
++/**
++ * kbase_devfreq_opp_translate - Translate nominal OPP frequency from devicetree
++ *                               into real frequency & voltage pair, along with
++ *                               core mask
++ * @kbdev:     Device pointer
++ * @freq:      Nominal frequency
++ * @core_mask: Pointer to u64 to store core mask to
++ * @freqs:     Pointer to array of frequencies
++ * @volts:     Pointer to array of voltages
++ *
++ * This function will only perform translation if an operating-points-v2-mali
++ * table is present in devicetree. If one is not present then it will return an
++ * untranslated frequency (and corresponding voltage) and all cores enabled.
++ */
++void kbase_devfreq_opp_translate(struct kbase_device *kbdev, unsigned long freq,
++	u64 *core_mask, unsigned long *freqs, unsigned long *volts);
+ #endif /* _BASE_DEVFREQ_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
+deleted file mode 100755
+index 5943e4e..0000000
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
++++ /dev/null
+@@ -1,388 +0,0 @@
+-/*
+- *
+- * (C) COPYRIGHT 2014-2016, 2018-2020 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- */
+-
+-
+-/*
+- *
+- */
+-#include <mali_kbase.h>
+-#include <gpu/mali_kbase_gpu_fault.h>
+-#include <backend/gpu/mali_kbase_instr_internal.h>
+-#include <backend/gpu/mali_kbase_pm_internal.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
+-#include <mali_kbase_reset_gpu.h>
+-#include <mmu/mali_kbase_mmu.h>
+-
+-#if !defined(CONFIG_MALI_NO_MALI)
+-
+-#ifdef CONFIG_DEBUG_FS
+-
+-int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size)
+-{
+-	struct kbase_io_access *old_buf;
+-	struct kbase_io_access *new_buf;
+-	unsigned long flags;
+-
+-	if (!new_size)
+-		goto out_err; /* The new size must not be 0 */
+-
+-	new_buf = vmalloc(new_size * sizeof(*h->buf));
+-	if (!new_buf)
+-		goto out_err;
+-
+-	spin_lock_irqsave(&h->lock, flags);
+-
+-	old_buf = h->buf;
+-
+-	/* Note: we won't bother with copying the old data over. The dumping
+-	 * logic wouldn't work properly as it relies on 'count' both as a
+-	 * counter and as an index to the buffer which would have changed with
+-	 * the new array. This is a corner case that we don't need to support.
+-	 */
+-	h->count = 0;
+-	h->size = new_size;
+-	h->buf = new_buf;
+-
+-	spin_unlock_irqrestore(&h->lock, flags);
+-
+-	vfree(old_buf);
+-
+-	return 0;
+-
+-out_err:
+-	return -1;
+-}
+-
+-
+-int kbase_io_history_init(struct kbase_io_history *h, u16 n)
+-{
+-	h->enabled = false;
+-	spin_lock_init(&h->lock);
+-	h->count = 0;
+-	h->size = 0;
+-	h->buf = NULL;
+-	if (kbase_io_history_resize(h, n))
+-		return -1;
+-
+-	return 0;
+-}
+-
+-
+-void kbase_io_history_term(struct kbase_io_history *h)
+-{
+-	vfree(h->buf);
+-	h->buf = NULL;
+-}
+-
+-
+-/* kbase_io_history_add - add new entry to the register access history
+- *
+- * @h: Pointer to the history data structure
+- * @addr: Register address
+- * @value: The value that is either read from or written to the register
+- * @write: 1 if it's a register write, 0 if it's a read
+- */
+-static void kbase_io_history_add(struct kbase_io_history *h,
+-		void __iomem const *addr, u32 value, u8 write)
+-{
+-	struct kbase_io_access *io;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&h->lock, flags);
+-
+-	io = &h->buf[h->count % h->size];
+-	io->addr = (uintptr_t)addr | write;
+-	io->value = value;
+-	++h->count;
+-	/* If count overflows, move the index by the buffer size so the entire
+-	 * buffer will still be dumped later */
+-	if (unlikely(!h->count))
+-		h->count = h->size;
+-
+-	spin_unlock_irqrestore(&h->lock, flags);
+-}
+-
+-
+-void kbase_io_history_dump(struct kbase_device *kbdev)
+-{
+-	struct kbase_io_history *const h = &kbdev->io_history;
+-	u16 i;
+-	size_t iters;
+-	unsigned long flags;
+-
+-	if (!unlikely(h->enabled))
+-		return;
+-
+-	spin_lock_irqsave(&h->lock, flags);
+-
+-	dev_err(kbdev->dev, "Register IO History:");
+-	iters = (h->size > h->count) ? h->count : h->size;
+-	dev_err(kbdev->dev, "Last %zu register accesses of %zu total:\n", iters,
+-			h->count);
+-	for (i = 0; i < iters; ++i) {
+-		struct kbase_io_access *io =
+-			&h->buf[(h->count - iters + i) % h->size];
+-		char const access = (io->addr & 1) ? 'w' : 'r';
+-
+-		dev_err(kbdev->dev, "%6i: %c: reg 0x%016lx val %08x\n", i,
+-			access, (unsigned long)(io->addr & ~0x1), io->value);
+-	}
+-
+-	spin_unlock_irqrestore(&h->lock, flags);
+-}
+-
+-
+-#endif /* CONFIG_DEBUG_FS */
+-
+-
+-void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
+-{
+-	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+-	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+-
+-	writel(value, kbdev->reg + offset);
+-
+-#ifdef CONFIG_DEBUG_FS
+-	if (unlikely(kbdev->io_history.enabled))
+-		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+-				value, 1);
+-#endif /* CONFIG_DEBUG_FS */
+-	dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value);
+-}
+-
+-KBASE_EXPORT_TEST_API(kbase_reg_write);
+-
+-u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
+-{
+-	u32 val;
+-	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
+-	KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+-
+-	val = readl(kbdev->reg + offset);
+-
+-#ifdef CONFIG_DEBUG_FS
+-	if (unlikely(kbdev->io_history.enabled))
+-		kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
+-				val, 0);
+-#endif /* CONFIG_DEBUG_FS */
+-	dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val);
+-
+-	return val;
+-}
+-
+-KBASE_EXPORT_TEST_API(kbase_reg_read);
+-
+-bool kbase_is_gpu_lost(struct kbase_device *kbdev)
+-{
+-	u32 val;
+-
+-	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
+-
+-	return val == 0;
+-}
+-#endif /* !defined(CONFIG_MALI_NO_MALI) */
+-
+-/**
+- * kbase_report_gpu_fault - Report a GPU fault.
+- * @kbdev:    Kbase device pointer
+- * @multiple: Zero if only GPU_FAULT was raised, non-zero if MULTIPLE_GPU_FAULTS
+- *            was also set
+- *
+- * This function is called from the interrupt handler when a GPU fault occurs.
+- * It reports the details of the fault using dev_warn().
+- */
+-static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
+-{
+-	u32 status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS));
+-	u64 address = (u64) kbase_reg_read(kbdev,
+-			GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
+-
+-	address |= kbase_reg_read(kbdev,
+-			GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
+-
+-	meson_gpu_fault ++;
+-	dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
+-		status,
+-		kbase_gpu_exception_name(status & 0xFF),
+-		address);
+-	if (multiple)
+-		dev_warn(kbdev->dev, "There were multiple GPU faults - some have not been reported\n");
+-}
+-
+-static bool kbase_gpu_fault_interrupt(struct kbase_device *kbdev, int multiple)
+-{
+-	kbase_report_gpu_fault(kbdev, multiple);
+-	return false;
+-}
+-
+-void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev)
+-{
+-	u32 irq_mask;
+-
+-	lockdep_assert_held(&kbdev->hwaccess_lock);
+-
+-	if (kbdev->cache_clean_in_progress) {
+-		/* If this is called while another clean is in progress, we
+-		 * can't rely on the current one to flush any new changes in
+-		 * the cache. Instead, trigger another cache clean immediately
+-		 * after this one finishes.
+-		 */
+-		kbdev->cache_clean_queued = true;
+-		return;
+-	}
+-
+-	/* Enable interrupt */
+-	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+-	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+-				irq_mask | CLEAN_CACHES_COMPLETED);
+-
+-	KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, 0);
+-	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+-					GPU_COMMAND_CLEAN_INV_CACHES);
+-
+-	kbdev->cache_clean_in_progress = true;
+-}
+-
+-void kbase_gpu_start_cache_clean(struct kbase_device *kbdev)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	kbase_gpu_start_cache_clean_nolock(kbdev);
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-}
+-
+-void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev)
+-{
+-	lockdep_assert_held(&kbdev->hwaccess_lock);
+-
+-	kbdev->cache_clean_queued = false;
+-	kbdev->cache_clean_in_progress = false;
+-	wake_up(&kbdev->cache_clean_wait);
+-}
+-
+-static void kbase_clean_caches_done(struct kbase_device *kbdev)
+-{
+-	u32 irq_mask;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-
+-	if (kbdev->cache_clean_queued) {
+-		kbdev->cache_clean_queued = false;
+-
+-		KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, 0);
+-		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+-				GPU_COMMAND_CLEAN_INV_CACHES);
+-	} else {
+-		/* Disable interrupt */
+-		irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+-		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+-				irq_mask & ~CLEAN_CACHES_COMPLETED);
+-
+-		kbase_gpu_cache_clean_wait_complete(kbdev);
+-	}
+-
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-}
+-
+-static inline bool get_cache_clean_flag(struct kbase_device *kbdev)
+-{
+-	bool cache_clean_in_progress;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	cache_clean_in_progress = kbdev->cache_clean_in_progress;
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-
+-	return cache_clean_in_progress;
+-}
+-
+-void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev)
+-{
+-	while (get_cache_clean_flag(kbdev)) {
+-		wait_event_interruptible(kbdev->cache_clean_wait,
+-				!kbdev->cache_clean_in_progress);
+-	}
+-}
+-
+-int kbase_gpu_wait_cache_clean_timeout(struct kbase_device *kbdev,
+-				unsigned int wait_timeout_ms)
+-{
+-	long remaining = msecs_to_jiffies(wait_timeout_ms);
+-
+-	while (remaining && get_cache_clean_flag(kbdev)) {
+-		remaining = wait_event_timeout(kbdev->cache_clean_wait,
+-					!kbdev->cache_clean_in_progress,
+-					remaining);
+-	}
+-
+-	return (remaining ? 0 : -ETIMEDOUT);
+-}
+-
+-void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
+-{
+-	bool clear_gpu_fault = false;
+-
+-	KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, val);
+-	if (val & GPU_FAULT)
+-		clear_gpu_fault = kbase_gpu_fault_interrupt(kbdev,
+-					val & MULTIPLE_GPU_FAULTS);
+-
+-	if (val & RESET_COMPLETED)
+-		kbase_pm_reset_done(kbdev);
+-
+-	if (val & PRFCNT_SAMPLE_COMPLETED)
+-		kbase_instr_hwcnt_sample_done(kbdev);
+-
+-	KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, val);
+-	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
+-
+-	/* kbase_pm_check_transitions (called by kbase_pm_power_changed) must
+-	 * be called after the IRQ has been cleared. This is because it might
+-	 * trigger further power transitions and we don't want to miss the
+-	 * interrupt raised to notify us that these further transitions have
+-	 * finished. The same applies to kbase_clean_caches_done() - if another
+-	 * clean was queued, it might trigger another clean, which might
+-	 * generate another interrupt which shouldn't be missed.
+-	 */
+-
+-	if (val & CLEAN_CACHES_COMPLETED)
+-		kbase_clean_caches_done(kbdev);
+-
+-	if (val & POWER_CHANGED_ALL) {
+-		kbase_pm_power_changed(kbdev);
+-	} else if (val & CLEAN_CACHES_COMPLETED) {
+-		/* If cache line evict messages can be lost when shader cores
+-		 * power down then we need to flush the L2 cache before powering
+-		 * down cores. When the flush completes, the shaders' state
+-		 * machine needs to be re-invoked to proceed with powering down
+-		 * cores.
+-		 */
+-		if (kbdev->pm.backend.l2_always_on ||
+-				kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921))
+-			kbase_pm_power_changed(kbdev);
+-	}
+-
+-
+-	KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, val);
+-}
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
+deleted file mode 100644
+index 2e1d011..0000000
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
++++ /dev/null
+@@ -1,127 +0,0 @@
+-/*
+- *
+- * (C) COPYRIGHT 2014,2019-2020 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- */
+-
+-
+-
+-/*
+- * Backend-specific HW access device APIs
+- */
+-
+-#ifndef _KBASE_DEVICE_INTERNAL_H_
+-#define _KBASE_DEVICE_INTERNAL_H_
+-
+-/**
+- * kbase_reg_write - write to GPU register
+- * @kbdev:  Kbase device pointer
+- * @offset: Offset of register
+- * @value:  Value to write
+- *
+- * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+- */
+-void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value);
+-
+-/**
+- * kbase_reg_read - read from GPU register
+- * @kbdev:  Kbase device pointer
+- * @offset: Offset of register
+- *
+- * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
+- *
+- * Return: Value in desired register
+- */
+-u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset);
+-
+-/**
+- * kbase_is_gpu_lost() - Has the GPU been lost.
+- * @kbdev:    Kbase device pointer
+- *
+- * This function will return true if the GPU has been lost.
+- * When this happens register reads will be zero. A zero GPU_ID is
+- * invalid so this is used to detect GPU_LOST
+- *
+- * Return: True if GPU LOST
+- */
+-bool kbase_is_gpu_lost(struct kbase_device *kbdev);
+-
+-/**
+- * kbase_gpu_start_cache_clean - Start a cache clean
+- * @kbdev: Kbase device
+- *
+- * Issue a cache clean and invalidate command to hardware. This function will
+- * take hwaccess_lock.
+- */
+-void kbase_gpu_start_cache_clean(struct kbase_device *kbdev);
+-
+-/**
+- * kbase_gpu_start_cache_clean_nolock - Start a cache clean
+- * @kbdev: Kbase device
+- *
+- * Issue a cache clean and invalidate command to hardware. hwaccess_lock
+- * must be held by the caller.
+- */
+-void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev);
+-
+-/**
+- * kbase_gpu_wait_cache_clean - Wait for cache cleaning to finish
+- * @kbdev: Kbase device
+- *
+- * This function will take hwaccess_lock, and may sleep.
+- */
+-void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev);
+-
+-/**
+- * kbase_gpu_wait_cache_clean_timeout - Wait for certain time for cache
+- *                                      cleaning to finish
+- * @kbdev: Kbase device
+- * @wait_timeout_ms: Time, in milli seconds, to wait for cache clean to complete.
+- *
+- * This function will take hwaccess_lock, and may sleep. This is supposed to be
+- * called from paths (like GPU reset) where an indefinite wait for the completion
+- * of cache clean operation can cause deadlock, as the operation may never
+- * complete.
+- *
+- * Return: 0 if successful or a negative error code on failure.
+- */
+-int kbase_gpu_wait_cache_clean_timeout(struct kbase_device *kbdev,
+-		unsigned int wait_timeout_ms);
+-
+-/**
+- * kbase_gpu_cache_clean_wait_complete - Called after the cache cleaning is
+- *                                       finished. Would also be called after
+- *                                       the GPU reset.
+- * @kbdev: Kbase device
+- *
+- * Caller must hold the hwaccess_lock.
+- */
+-void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev);
+-
+-/**
+- * kbase_gpu_interrupt - GPU interrupt handler
+- * @kbdev: Kbase device pointer
+- * @val:   The value of the GPU IRQ status register which triggered the call
+- *
+- * This function is called from the interrupt handler when a GPU irq is to be
+- * handled.
+- */
+-void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val);
+-
+-#endif /* _KBASE_DEVICE_INTERNAL_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
+index 352afa1..11088db 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
+@@ -1,12 +1,12 @@
+-// SPDX-License-Identifier: GPL-2.0
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -17,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -26,7 +24,7 @@
+  */
+ 
+ #include <mali_kbase.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
+ #include <mali_kbase_hwaccess_gpuprops.h>
+ 
+@@ -41,8 +39,19 @@ int kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ 
+ 	registers.l2_features = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(L2_FEATURES));
++	registers.core_features = 0;
++#if !MALI_USE_CSF
++	/* TGOx */
+ 	registers.core_features = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(CORE_FEATURES));
++#else /* !MALI_USE_CSF */
++	if (((registers.gpu_id & GPU_ID2_PRODUCT_MODEL) ==
++	     GPU_ID2_PRODUCT_TGRX) ||
++	    ((registers.gpu_id & GPU_ID2_PRODUCT_MODEL) ==
++	     GPU_ID2_PRODUCT_TVAX))
++		registers.core_features =
++			kbase_reg_read(kbdev, GPU_CONTROL_REG(CORE_FEATURES));
++#endif /* MALI_USE_CSF */
+ 	registers.tiler_features = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(TILER_FEATURES));
+ 	registers.mem_features = kbase_reg_read(kbdev,
+@@ -51,12 +60,20 @@ int kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ 				GPU_CONTROL_REG(MMU_FEATURES));
+ 	registers.as_present = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(AS_PRESENT));
++#if !MALI_USE_CSF
+ 	registers.js_present = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(JS_PRESENT));
++#else /* !MALI_USE_CSF */
++	registers.js_present = 0;
++#endif /* !MALI_USE_CSF */
+ 
+ 	for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
++#if !MALI_USE_CSF
+ 		registers.js_features[i] = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(JS_FEATURES_REG(i)));
++#else /* !MALI_USE_CSF */
++		registers.js_features[i] = 0;
++#endif /* !MALI_USE_CSF */
+ 
+ 	for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
+ 		registers.texture_features[i] = kbase_reg_read(kbdev,
+@@ -93,13 +110,49 @@ int kbase_backend_gpuprops_get(struct kbase_device *kbdev,
+ 	registers.stack_present_hi = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(STACK_PRESENT_HI));
+ 
+-	if (!kbase_is_gpu_lost(kbdev)) {
++	if (registers.gpu_id >= GPU_ID2_PRODUCT_MAKE(11, 8, 5, 2)) {
++		registers.gpu_features_lo = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(GPU_FEATURES_LO));
++		registers.gpu_features_hi = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(GPU_FEATURES_HI));
++	} else {
++		registers.gpu_features_lo = 0;
++		registers.gpu_features_hi = 0;
++	}
++
++	if (!kbase_is_gpu_removed(kbdev)) {
+ 		*regdump = registers;
+ 		return 0;
+ 	} else
+ 		return -EIO;
+ }
+ 
++int kbase_backend_gpuprops_get_curr_config(struct kbase_device *kbdev,
++		struct kbase_current_config_regdump *curr_config_regdump)
++{
++	if (WARN_ON(!kbdev) || WARN_ON(!curr_config_regdump))
++		return -EINVAL;
++
++	curr_config_regdump->mem_features = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(MEM_FEATURES));
++
++	curr_config_regdump->shader_present_lo = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(SHADER_PRESENT_LO));
++	curr_config_regdump->shader_present_hi = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(SHADER_PRESENT_HI));
++
++	curr_config_regdump->l2_present_lo = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(L2_PRESENT_LO));
++	curr_config_regdump->l2_present_hi = kbase_reg_read(kbdev,
++					GPU_CONTROL_REG(L2_PRESENT_HI));
++
++	if (WARN_ON(kbase_is_gpu_removed(kbdev)))
++		return -EIO;
++
++	return 0;
++
++}
++
+ int kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ 					struct kbase_gpuprops_regdump *regdump)
+ {
+@@ -112,7 +165,7 @@ int kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
+ 		coherency_features = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(COHERENCY_FEATURES));
+ 
+-		if (kbase_is_gpu_lost(kbdev))
++		if (kbase_is_gpu_removed(kbdev))
+ 			return -EIO;
+ 
+ 		regdump->coherency_features = coherency_features;
+@@ -135,11 +188,15 @@ int kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
+ 	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
+ 		u32 l2_features = kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(L2_FEATURES));
++		u32 l2_config =
++			kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_CONFIG));
++
+ 
+-		if (kbase_is_gpu_lost(kbdev))
++		if (kbase_is_gpu_removed(kbdev))
+ 			return -EIO;
+ 
+ 		regdump->l2_features = l2_features;
++		regdump->l2_config = l2_config;
+ 	}
+ 
+ 	return 0;
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
+index 8b320c7..d7edf30 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * GPU backend instrumentation APIs.
+  */
+@@ -29,7 +26,7 @@
+ #include <mali_kbase.h>
+ #include <gpu/mali_kbase_gpu_regmap.h>
+ #include <mali_kbase_hwaccess_instr.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_instr_internal.h>
+ 
+ 
+@@ -71,12 +68,12 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ 
+ 	/* Configure */
+ 	prfcnt_config = kctx->as_nr << PRFCNT_CONFIG_AS_SHIFT;
+-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
+-	if (kbdev->hwcnt.backend.use_secondary_override)
++#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
++	prfcnt_config |= kbdev->hwcnt.backend.override_counter_set
++			 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+ #else
+-	if (enable->use_secondary)
++	prfcnt_config |= enable->counter_set << PRFCNT_CONFIG_SETSELECT_SHIFT;
+ #endif
+-		prfcnt_config |= 1 << PRFCNT_CONFIG_SETSELECT_SHIFT;
+ 
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
+ 			prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
+@@ -87,7 +84,7 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ 					enable->dump_buffer >> 32);
+ 
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
+-					enable->jm_bm);
++					enable->fe_bm);
+ 
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
+ 					enable->shader_bm);
+@@ -110,7 +107,7 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
+ 
+ 	err = 0;
+ 
+-	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %p", kctx);
++	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %pK", kctx);
+ 	return err;
+  out_err:
+ 	return err;
+@@ -170,11 +167,10 @@ int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
+ 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+ 
+-	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %p",
++	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %pK",
+ 									kctx);
+ 
+ 	err = 0;
+-
+  out:
+ 	return err;
+ }
+@@ -194,7 +190,8 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+ 
+ 	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
+ 		/* HW counters are disabled or another dump is ongoing, or we're
+-		 * resetting */
++		 * resetting
++		 */
+ 		goto unlock;
+ 	}
+ 
+@@ -204,7 +201,6 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+ 	 */
+ 	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+ 
+-
+ 	/* Reconfigure the dump address */
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
+ 					kbdev->hwcnt.addr & 0xFFFFFFFF);
+@@ -218,14 +214,13 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ 					GPU_COMMAND_PRFCNT_SAMPLE);
+ 
+-	dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
++	dev_dbg(kbdev->dev, "HW counters dumping done for context %pK", kctx);
+ 
+ 	err = 0;
+ 
+  unlock:
+ 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+ 
+-
+ 	return err;
+ }
+ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
+@@ -254,40 +249,6 @@ bool kbase_instr_hwcnt_dump_complete(struct kbase_context *kctx,
+ }
+ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_dump_complete);
+ 
+-void kbasep_cache_clean_worker(struct work_struct *data)
+-{
+-	struct kbase_device *kbdev;
+-	unsigned long flags, pm_flags;
+-
+-	kbdev = container_of(data, struct kbase_device,
+-						hwcnt.backend.cache_clean_work);
+-
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
+-	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+-
+-	/* Clean and invalidate the caches so we're sure the mmu tables for the
+-	 * dump buffer is valid.
+-	 */
+-	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+-					KBASE_INSTR_STATE_REQUEST_CLEAN);
+-	kbase_gpu_start_cache_clean_nolock(kbdev);
+-	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+-
+-	kbase_gpu_wait_cache_clean(kbdev);
+-
+-	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+-	KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
+-					KBASE_INSTR_STATE_REQUEST_CLEAN);
+-	/* All finished and idle */
+-	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+-	kbdev->hwcnt.backend.triggered = 1;
+-	wake_up(&kbdev->hwcnt.backend.wait);
+-
+-	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+-}
+-
+-
+ void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+@@ -298,20 +259,10 @@ void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
+ 		kbdev->hwcnt.backend.triggered = 1;
+ 		wake_up(&kbdev->hwcnt.backend.wait);
+ 	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
+-		if (kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+-			/* All finished and idle */
+-			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+-			kbdev->hwcnt.backend.triggered = 1;
+-			wake_up(&kbdev->hwcnt.backend.wait);
+-		} else {
+-			int ret;
+-			/* Always clean and invalidate the cache after a successful dump
+-			 */
+-			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+-			ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+-						&kbdev->hwcnt.backend.cache_clean_work);
+-			KBASE_DEBUG_ASSERT(ret);
+-		}
++		/* All finished and idle */
++		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
++		kbdev->hwcnt.backend.triggered = 1;
++		wake_up(&kbdev->hwcnt.backend.wait);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+@@ -353,7 +304,8 @@ int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
+ 	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+ 
+ 	/* Check it's the context previously set up and we're not already
+-	 * dumping */
++	 * dumping
++	 */
+ 	if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
+ 							KBASE_INSTR_STATE_IDLE)
+ 		goto out;
+@@ -373,39 +325,45 @@ KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+ 
+ int kbase_instr_backend_init(struct kbase_device *kbdev)
+ {
+-	int ret = 0;
++	spin_lock_init(&kbdev->hwcnt.lock);
+ 
+ 	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+ 
+ 	init_waitqueue_head(&kbdev->hwcnt.backend.wait);
+-	INIT_WORK(&kbdev->hwcnt.backend.cache_clean_work,
+-						kbasep_cache_clean_worker);
+-
+ 
+ 	kbdev->hwcnt.backend.triggered = 0;
+ 
+-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
+-	kbdev->hwcnt.backend.use_secondary_override = false;
++#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
++/* Use the build time option for the override default. */
++#if defined(CONFIG_MALI_PRFCNT_SET_SECONDARY)
++	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_SECONDARY;
++#elif defined(CONFIG_MALI_PRFCNT_SET_TERTIARY)
++	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_TERTIARY;
++#else
++	/* Default to primary */
++	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_PRIMARY;
+ #endif
+-
+-	kbdev->hwcnt.backend.cache_clean_wq =
+-			alloc_workqueue("Mali cache cleaning workqueue", 0, 1);
+-	if (NULL == kbdev->hwcnt.backend.cache_clean_wq)
+-		ret = -EINVAL;
+-
+-	return ret;
++#endif
++	return 0;
+ }
+ 
+ void kbase_instr_backend_term(struct kbase_device *kbdev)
+ {
+-	destroy_workqueue(kbdev->hwcnt.backend.cache_clean_wq);
++	CSTD_UNUSED(kbdev);
+ }
+ 
+-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
++#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
+ void kbase_instr_backend_debugfs_init(struct kbase_device *kbdev)
+ {
+-	debugfs_create_bool("hwcnt_use_secondary", S_IRUGO | S_IWUSR,
+-		kbdev->mali_debugfs_directory,
+-		&kbdev->hwcnt.backend.use_secondary_override);
++	/* No validation is done on the debugfs input. Invalid input could cause
++	 * performance counter errors. This is acceptable since this is a debug
++	 * only feature and users should know what they are doing.
++	 *
++	 * Valid inputs are the values accepted bythe SET_SELECT bits of the
++	 * PRFCNT_CONFIG register as defined in the architecture specification.
++	*/
++	debugfs_create_u8("hwcnt_set_select", S_IRUGO | S_IWUSR,
++			  kbdev->mali_debugfs_directory,
++			  (u8 *)&kbdev->hwcnt.backend.override_counter_set);
+ }
+ #endif
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
+index 9930968..e356348 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_defs.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014, 2016, 2018, 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014, 2016, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -27,6 +26,8 @@
+ #ifndef _KBASE_INSTR_DEFS_H_
+ #define _KBASE_INSTR_DEFS_H_
+ 
++#include <mali_kbase_hwcnt_gpu.h>
++
+ /*
+  * Instrumentation State Machine States
+  */
+@@ -37,8 +38,6 @@ enum kbase_instr_state {
+ 	KBASE_INSTR_STATE_IDLE,
+ 	/* Hardware is currently dumping a frame. */
+ 	KBASE_INSTR_STATE_DUMPING,
+-	/* We've requested a clean to occur on a workqueue */
+-	KBASE_INSTR_STATE_REQUEST_CLEAN,
+ 	/* An error has occured during DUMPING (page fault). */
+ 	KBASE_INSTR_STATE_FAULT
+ };
+@@ -47,14 +46,11 @@ enum kbase_instr_state {
+ struct kbase_instr_backend {
+ 	wait_queue_head_t wait;
+ 	int triggered;
+-#ifdef CONFIG_MALI_PRFCNT_SET_SECONDARY_VIA_DEBUG_FS
+-	bool use_secondary_override;
++#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
++	enum kbase_hwcnt_physical_set override_counter_set;
+ #endif
+ 
+ 	enum kbase_instr_state state;
+-	struct workqueue_struct *cache_clean_wq;
+-	struct work_struct  cache_clean_work;
+ };
+ 
+ #endif /* _KBASE_INSTR_DEFS_H_ */
+-
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
+index 2254b9f..332cc69 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_internal.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014, 2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014, 2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * Backend-specific HW access instrumentation APIs
+  */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
+index ca3c048..2671ce5 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_internal.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2015, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
+index 8696c6a..a29f7ef 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2016,2018-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2016, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,17 +17,15 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <mali_kbase.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_irq_internal.h>
+ 
+ #include <linux/interrupt.h>
+ 
+-#if !defined(CONFIG_MALI_NO_MALI)
++#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
+ 
+ /* GPU IRQ Tags */
+ #define	JOB_IRQ_TAG	0
+@@ -72,7 +71,12 @@ static irqreturn_t kbase_job_irq_handler(int irq, void *data)
+ 
+ 	dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
+ 
++#if MALI_USE_CSF
++	/* call the csf interrupt handler */
++	kbase_csf_interrupt(kbdev, val);
++#else
+ 	kbase_job_done(kbdev, val);
++#endif
+ 
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+@@ -210,24 +214,25 @@ int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
+ 	int result = 0;
+ 	irq_handler_t requested_irq_handler = NULL;
+ 
+-	KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
+-						(GPU_IRQ_HANDLER >= irq_type));
++	KBASE_DEBUG_ASSERT((irq_type >= JOB_IRQ_HANDLER) &&
++			   (irq_type <= GPU_IRQ_HANDLER));
+ 
+ 	/* Release previous handler */
+ 	if (kbdev->irqs[irq_type].irq)
+ 		free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
+ 
+-	requested_irq_handler = (NULL != custom_handler) ? custom_handler :
+-						kbase_handler_table[irq_type];
++	requested_irq_handler = (custom_handler != NULL) ?
++					custom_handler :
++					kbase_handler_table[irq_type];
+ 
+-	if (0 != request_irq(kbdev->irqs[irq_type].irq,
+-			requested_irq_handler,
++	if (request_irq(kbdev->irqs[irq_type].irq, requested_irq_handler,
+ 			kbdev->irqs[irq_type].flags | IRQF_SHARED,
+-			dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
++			dev_name(kbdev->dev),
++			kbase_tag(kbdev, irq_type)) != 0) {
+ 		result = -EINVAL;
+ 		dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ 					kbdev->irqs[irq_type].irq, irq_type);
+-#ifdef CONFIG_SPARSE_IRQ
++#if IS_ENABLED(CONFIG_SPARSE_IRQ)
+ 		dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+ #endif /* CONFIG_SPARSE_IRQ */
+ 	}
+@@ -456,7 +461,7 @@ int kbase_install_interrupts(struct kbase_device *kbdev)
+ 		if (err) {
+ 			dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
+ 							kbdev->irqs[i].irq, i);
+-#ifdef CONFIG_SPARSE_IRQ
++#if IS_ENABLED(CONFIG_SPARSE_IRQ)
+ 			dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
+ #endif /* CONFIG_SPARSE_IRQ */
+ 			goto release;
+@@ -496,4 +501,4 @@ void kbase_synchronize_irqs(struct kbase_device *kbdev)
+ 
+ KBASE_EXPORT_TEST_API(kbase_synchronize_irqs);
+ 
+-#endif /* !defined(CONFIG_MALI_NO_MALI) */
++#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
+index bb4f548..888aa59 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register backend context / address space management
+  */
+@@ -58,8 +56,10 @@ static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
+ 	lockdep_assert_held(&js_devdata->runpool_mutex);
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
++#if !MALI_USE_CSF
+ 	/* Attribute handling */
+ 	kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
++#endif
+ 
+ 	/* Allow it to run jobs */
+ 	kbasep_js_set_submit_allowed(js_devdata, kctx);
+@@ -188,8 +188,8 @@ int kbase_backend_find_and_release_free_address_space(
+ 			}
+ 
+ 			/* Context was retained while locks were dropped,
+-			 * continue looking for free AS */
+-
++			 * continue looking for free AS
++			 */
+ 			mutex_unlock(&js_devdata->runpool_mutex);
+ 			mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
+ 
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
+index 7cda61a..e29ace7 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2016, 2018-2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register-based HW access backend specific definitions
+  */
+@@ -78,11 +76,12 @@ struct slot_rb {
+  * The hwaccess_lock (a spinlock) must be held when accessing this structure
+  */
+ struct kbase_backend_data {
++#if !MALI_USE_CSF
+ 	struct slot_rb slot_rb[BASE_JM_MAX_NR_SLOTS];
+-
+ 	struct hrtimer scheduling_timer;
+ 
+ 	bool timer_running;
++#endif
+ 	bool suspend_timer;
+ 
+ 	atomic_t reset_gpu;
+@@ -92,13 +91,16 @@ struct kbase_backend_data {
+ /* kbase_prepare_to_reset_gpu has been called */
+ #define KBASE_RESET_GPU_PREPARED        1
+ /* kbase_reset_gpu has been called - the reset will now definitely happen
+- * within the timeout period */
++ * within the timeout period
++ */
+ #define KBASE_RESET_GPU_COMMITTED       2
+ /* The GPU reset process is currently occuring (timeout has expired or
+- * kbasep_try_reset_gpu_early was called) */
++ * kbasep_try_reset_gpu_early was called)
++ */
+ #define KBASE_RESET_GPU_HAPPENING       3
+ /* Reset the GPU silently, used when resetting the GPU as part of normal
+- * behavior (e.g. when exiting protected mode). */
++ * behavior (e.g. when exiting protected mode).
++ */
+ #define KBASE_RESET_GPU_SILENT          4
+ 	struct workqueue_struct *reset_workq;
+ 	struct work_struct reset_work;
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
+index fa6bc83..ae0377f 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -33,16 +32,20 @@
+ #include <mali_kbase_hwaccess_jm.h>
+ #include <mali_kbase_reset_gpu.h>
+ #include <mali_kbase_ctx_sched.h>
++#include <mali_kbase_kinstr_jm.h>
+ #include <mali_kbase_hwcnt_context.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_irq_internal.h>
+ #include <backend/gpu/mali_kbase_jm_internal.h>
++#include <mali_kbase_regs_history_debugfs.h>
+ 
+ static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev);
++static u64 kbasep_apply_limited_core_mask(const struct kbase_device *kbdev,
++				const u64 affinity, const u64 limited_core_mask);
+ 
+ static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+ 				base_jd_core_req core_req,
+-				int js)
++				int js, const u64 limited_core_mask)
+ {
+ 	u64 affinity;
+ 
+@@ -71,14 +74,21 @@ static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+ 		 */
+ 		if (js == 2 && num_core_groups > 1)
+ 			affinity &= coherency_info->group[1].core_mask;
+-		else
++		else if (num_core_groups > 1)
+ 			affinity &= coherency_info->group[0].core_mask;
++		else
++			affinity &= kbdev->gpu_props.curr_config.shader_present;
+ 	} else {
+ 		/* Use all cores */
+ 		affinity = kbdev->pm.backend.shaders_avail &
+ 				kbdev->pm.debug_core_mask[js];
+ 	}
+ 
++	if (core_req & BASE_JD_REQ_LIMITED_CORE_MASK) {
++		/* Limiting affinity due to BASE_JD_REQ_LIMITED_CORE_MASK by applying the limited core mask. */
++		affinity = kbasep_apply_limited_core_mask(kbdev, affinity, limited_core_mask);
++	}
++
+ 	if (unlikely(!affinity)) {
+ #ifdef CONFIG_MALI_DEBUG
+ 		u64 shaders_ready =
+@@ -88,6 +98,16 @@ static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+ #endif
+ 
+ 		affinity = kbdev->pm.backend.shaders_avail;
++
++		if (core_req & BASE_JD_REQ_LIMITED_CORE_MASK) {
++			/* Limiting affinity again to make sure it only enables shader cores with backed TLS memory. */
++			affinity = kbasep_apply_limited_core_mask(kbdev, affinity, limited_core_mask);
++
++#ifdef CONFIG_MALI_DEBUG
++			/* affinity should never be 0 */
++			WARN_ON(!affinity);
++#endif
++		}
+ 	}
+ 
+ 	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+@@ -168,7 +188,7 @@ static u64 select_job_chain(struct kbase_jd_atom *katom)
+ 	}
+ 
+ 	dev_dbg(kctx->kbdev->dev,
+-		"Selected job chain 0x%llx for end atom %p in state %d\n",
++		"Selected job chain 0x%llx for end atom %pK in state %d\n",
+ 		jc, (void *)katom, (int)rp->state);
+ 
+ 	katom->jc = jc;
+@@ -192,7 +212,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
+ 	/* Command register must be available */
+ 	KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
+ 
+-	dev_dbg(kctx->kbdev->dev, "Write JS_HEAD_NEXT 0x%llx for atom %p\n",
++	dev_dbg(kctx->kbdev->dev, "Write JS_HEAD_NEXT 0x%llx for atom %pK\n",
+ 		jc_head, (void *)katom);
+ 
+ 	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
+@@ -200,10 +220,12 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
+ 	kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
+ 						jc_head >> 32);
+ 
+-	affinity = kbase_job_write_affinity(kbdev, katom->core_req, js);
++	affinity = kbase_job_write_affinity(kbdev, katom->core_req, js,
++						kctx->limited_core_mask);
+ 
+ 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
+-	 * start */
++	 * start
++	 */
+ 	cfg = kctx->as_nr;
+ 
+ 	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) &&
+@@ -255,7 +277,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
+ 	katom->start_timestamp = ktime_get();
+ 
+ 	/* GO ! */
+-	dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx",
++	dev_dbg(kbdev->dev, "JS: Submitting atom %pK from ctx %pK to js[%d] with head=0x%llx",
+ 				katom, kctx, js, jc_head);
+ 
+ 	KBASE_KTRACE_ADD_JM_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
+@@ -277,7 +299,8 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
+ 			katom,
+ 			&kbdev->gpu_props.props.raw_props.js_features[js],
+ 			"ctx_nr,atom_nr");
+-#ifdef CONFIG_GPU_TRACEPOINTS
++	kbase_kinstr_jm_atom_hw_submit(katom);
++#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
+ 	if (!kbase_backend_nr_atoms_submitted(kbdev, js)) {
+ 		/* If this is the only job on the slot, trace it as starting */
+ 		char js_string[16];
+@@ -328,7 +351,8 @@ static void kbasep_job_slot_update_head_start_timestamp(
+ 			/* Only update the timestamp if it's a better estimate
+ 			 * than what's currently stored. This is because our
+ 			 * estimate that accounts for the throttle time may be
+-			 * too much of an overestimate */
++			 * too much of an overestimate
++			 */
+ 			katom->start_timestamp = end_timestamp;
+ 		}
+ 	}
+@@ -371,9 +395,9 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
+ 		/* treat failed slots as finished slots */
+ 		u32 finished = (done & 0xFFFF) | failed;
+ 
+-		/* Note: This is inherently unfair, as we always check
+-		 * for lower numbered interrupts before the higher
+-		 * numbered ones.*/
++		/* Note: This is inherently unfair, as we always check for lower
++		 * numbered interrupts before the higher numbered ones.
++		 */
+ 		i = ffs(finished) - 1;
+ 		KBASE_DEBUG_ASSERT(i >= 0);
+ 
+@@ -385,7 +409,8 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
+ 
+ 			if (failed & (1u << i)) {
+ 				/* read out the job slot status code if the job
+-				 * slot reported failure */
++				 * slot reported failure
++				 */
+ 				completion_code = kbase_reg_read(kbdev,
+ 					JOB_SLOT_REG(i, JS_STATUS));
+ 
+@@ -399,7 +424,8 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
+ 
+ 					/* Soft-stopped job - read the value of
+ 					 * JS<n>_TAIL so that the job chain can
+-					 * be resumed */
++					 * be resumed
++					 */
+ 					job_tail = (u64)kbase_reg_read(kbdev,
+ 						JOB_SLOT_REG(i, JS_TAIL_LO)) |
+ 						((u64)kbase_reg_read(kbdev,
+@@ -408,21 +434,26 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
+ 				} else if (completion_code ==
+ 						BASE_JD_EVENT_NOT_STARTED) {
+ 					/* PRLAM-10673 can cause a TERMINATED
+-					 * job to come back as NOT_STARTED, but
+-					 * the error interrupt helps us detect
+-					 * it */
++					 * job to come back as NOT_STARTED,
++					 * but the error interrupt helps us
++					 * detect it
++					 */
+ 					completion_code =
+ 						BASE_JD_EVENT_TERMINATED;
+ 				}
+ 
+ 				kbase_gpu_irq_evict(kbdev, i, completion_code);
+ 
+-				/* Some jobs that encounter a BUS FAULT may result in corrupted
+-				 * state causing future jobs to hang. Reset GPU before
+-				 * allowing any other jobs on the slot to continue. */
++				/* Some jobs that encounter a BUS FAULT may
++				 * result in corrupted state causing future
++				 * jobs to hang. Reset GPU before allowing
++				 * any other jobs on the slot to continue.
++				 */
+ 				if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_3076)) {
+ 					if (completion_code == BASE_JD_EVENT_JOB_BUS_FAULT) {
+-						if (kbase_prepare_to_reset_gpu_locked(kbdev))
++						if (kbase_prepare_to_reset_gpu_locked(
++							    kbdev,
++							    RESET_FLAGS_NONE))
+ 							kbase_reset_gpu_locked(kbdev);
+ 					}
+ 				}
+@@ -480,7 +511,8 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
+ 
+ 				if ((rawstat >> (i + 16)) & 1) {
+ 					/* There is a failed job that we've
+-					 * missed - add it back to active */
++					 * missed - add it back to active
++					 */
+ 					active |= (1u << i);
+ 				}
+ 			}
+@@ -582,7 +614,8 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ 		}
+ 
+ 		/* We are about to issue a soft stop, so mark the atom as having
+-		 * been soft stopped */
++		 * been soft stopped
++		 */
+ 		target_katom->atom_flags |= KBASE_KATOM_FLAG_BEEN_SOFT_STOPPED;
+ 
+ 		/* Mark the point where we issue the soft-stop command */
+@@ -692,12 +725,40 @@ void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx)
+ 		kbase_job_slot_hardstop(kctx, i, NULL);
+ }
+ 
++/**
++ * kbase_is_existing_atom_submitted_later_than_ready
++ * @ready: sequence number of the ready atom
++ * @existing: sequence number of the existing atom
++ *
++ * Returns true if the existing atom has been submitted later than the
++ * ready atom. It is used to understand if an atom that is ready has been
++ * submitted earlier than the currently running atom, so that the currently
++ * running atom should be preempted to allow the ready atom to run.
++ */
++static inline bool kbase_is_existing_atom_submitted_later_than_ready(u64 ready, u64 existing)
++{
++	/* No seq_nr set? */
++	if (!ready || !existing)
++		return false;
++
++	/* Efficiently handle the unlikely case of wrapping.
++	 * The following code assumes that the delta between the sequence number
++	 * of the two atoms is less than INT64_MAX.
++	 * In the extremely unlikely case where the delta is higher, the comparison
++	 * defaults for no preemption.
++	 * The code also assumes that the conversion from unsigned to signed types
++	 * works because the signed integers are 2's complement.
++	 */
++	return (s64)(ready - existing) < 0;
++}
++
+ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ 				struct kbase_jd_atom *target_katom)
+ {
+ 	struct kbase_device *kbdev;
+ 	int js = target_katom->slot_nr;
+ 	int priority = target_katom->sched_priority;
++	int seq_nr = target_katom->seq_nr;
+ 	int i;
+ 	bool stop_sent = false;
+ 
+@@ -719,7 +780,8 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
+ 				(katom->kctx != kctx))
+ 			continue;
+ 
+-		if (katom->sched_priority > priority) {
++		if ((katom->sched_priority > priority) ||
++		    (katom->kctx == kctx && kbase_is_existing_atom_submitted_later_than_ready(seq_nr, katom->seq_nr))) {
+ 			if (!stop_sent)
+ 				KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED(
+ 						kbdev,
+@@ -749,7 +811,7 @@ static int softstop_start_rp_nolock(
+ 
+ 	if (!(katom->core_req & BASE_JD_REQ_START_RENDERPASS)) {
+ 		dev_dbg(kctx->kbdev->dev,
+-			"Atom %p on job slot is not start RP\n", (void *)katom);
++			"Atom %pK on job slot is not start RP\n", (void *)katom);
+ 		return -EPERM;
+ 	}
+ 
+@@ -762,13 +824,13 @@ static int softstop_start_rp_nolock(
+ 		rp->state != KBASE_JD_RP_RETRY))
+ 		return -EINVAL;
+ 
+-	dev_dbg(kctx->kbdev->dev, "OOM in state %d with region %p\n",
++	dev_dbg(kctx->kbdev->dev, "OOM in state %d with region %pK\n",
+ 		(int)rp->state, (void *)reg);
+ 
+ 	if (WARN_ON(katom != rp->start_katom))
+ 		return -EINVAL;
+ 
+-	dev_dbg(kctx->kbdev->dev, "Adding region %p to list %p\n",
++	dev_dbg(kctx->kbdev->dev, "Adding region %pK to list %pK\n",
+ 		(void *)reg, (void *)&rp->oom_reg_list);
+ 	list_move_tail(&reg->link, &rp->oom_reg_list);
+ 	dev_dbg(kctx->kbdev->dev, "Added region to list\n");
+@@ -813,9 +875,9 @@ void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+ 	if (timeout != 0)
+ 		goto exit;
+ 
+-	if (kbase_prepare_to_reset_gpu(kbdev)) {
++	if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE)) {
+ 		dev_err(kbdev->dev,
+-			"Issueing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
++			"Issuing GPU soft-reset because jobs failed to be killed (within %d ms) as part of context termination (e.g. process exit)\n",
+ 			ZAP_TIMEOUT);
+ 		kbase_reset_gpu(kbdev);
+ 	}
+@@ -823,7 +885,7 @@ void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
+ 	/* Wait for the reset to complete */
+ 	kbase_reset_gpu_wait(kbdev);
+ exit:
+-	dev_dbg(kbdev->dev, "Zap: Finished Context %p", kctx);
++	dev_dbg(kbdev->dev, "Zap: Finished Context %pK", kctx);
+ 
+ 	/* Ensure that the signallers of the waitqs have finished */
+ 	mutex_lock(&kctx->jctx.lock);
+@@ -884,7 +946,7 @@ KBASE_EXPORT_TEST_API(kbase_job_slot_term);
+ void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
+ 			struct kbase_jd_atom *target_katom, u32 sw_flags)
+ {
+-	dev_dbg(kbdev->dev, "Soft-stop atom %p with flags 0x%x (s:%d)\n",
++	dev_dbg(kbdev->dev, "Soft-stop atom %pK with flags 0x%x (s:%d)\n",
+ 		target_katom, sw_flags, js);
+ 
+ 	KBASE_DEBUG_ASSERT(!(sw_flags & JS_COMMAND_MASK));
+@@ -988,6 +1050,33 @@ void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
+ 	}
+ }
+ 
++int kbase_reset_gpu_prevent_and_wait(struct kbase_device *kbdev)
++{
++	WARN(true, "%s Not implemented for JM GPUs", __func__);
++	return -EINVAL;
++}
++
++int kbase_reset_gpu_try_prevent(struct kbase_device *kbdev)
++{
++	WARN(true, "%s Not implemented for JM GPUs", __func__);
++	return -EINVAL;
++}
++
++void kbase_reset_gpu_allow(struct kbase_device *kbdev)
++{
++	WARN(true, "%s Not implemented for JM GPUs", __func__);
++}
++
++void kbase_reset_gpu_assert_prevented(struct kbase_device *kbdev)
++{
++	WARN(true, "%s Not implemented for JM GPUs", __func__);
++}
++
++void kbase_reset_gpu_assert_failed_or_prevented(struct kbase_device *kbdev)
++{
++	WARN(true, "%s Not implemented for JM GPUs", __func__);
++}
++
+ static void kbase_debug_dump_registers(struct kbase_device *kbdev)
+ {
+ 	int i;
+@@ -1054,13 +1143,15 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
+ 
+ 	/* Make sure the timer has completed - this cannot be done from
+ 	 * interrupt context, so this cannot be done within
+-	 * kbasep_try_reset_gpu_early. */
++	 * kbasep_try_reset_gpu_early.
++	 */
+ 	hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+ 
+ 	if (kbase_pm_context_active_handle_suspend(kbdev,
+ 				KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ 		/* This would re-activate the GPU. Since it's already idle,
+-		 * there's no need to reset it */
++		 * there's no need to reset it
++		 */
+ 		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+ 						KBASE_RESET_GPU_NOT_PENDING);
+ 		kbase_disjoint_state_down(kbdev);
+@@ -1081,14 +1172,16 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
+ 	kbdev->irq_reset_flush = true;
+ 
+ 	/* Disable IRQ to avoid IRQ handlers to kick in after releasing the
+-	 * spinlock; this also clears any outstanding interrupts */
++	 * spinlock; this also clears any outstanding interrupts
++	 */
+ 	kbase_pm_disable_interrupts_nolock(kbdev);
+ 
+ 	spin_unlock(&kbdev->mmu_mask_change);
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+ 	/* Ensure that any IRQ handlers have finished
+-	 * Must be done without any locks IRQ handlers will take */
++	 * Must be done without any locks IRQ handlers will take
++	 */
+ 	kbase_synchronize_irqs(kbdev);
+ 
+ 	/* Flush out any in-flight work items */
+@@ -1099,7 +1192,8 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
+ 
+ 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
+ 		/* Ensure that L2 is not transitioning when we send the reset
+-		 * command */
++		 * command
++		 */
+ 		while (--max_loops && kbase_pm_get_trans_cores(kbdev,
+ 				KBASE_PM_CORE_L2))
+ 			;
+@@ -1114,14 +1208,16 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
+ 	/* All slot have been soft-stopped and we've waited
+ 	 * SOFT_STOP_RESET_TIMEOUT for the slots to clear, at this point we
+ 	 * assume that anything that is still left on the GPU is stuck there and
+-	 * we'll kill it when we reset the GPU */
++	 * we'll kill it when we reset the GPU
++	 */
+ 
+ 	if (!silent)
+ 		dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)",
+ 								RESET_TIMEOUT);
+ 
+ 	/* Output the state of some interesting registers to help in the
+-	 * debugging of GPU resets */
++	 * debugging of GPU resets
++	 */
+ 	if (!silent)
+ 		kbase_debug_dump_registers(kbdev);
+ 
+@@ -1160,7 +1256,8 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
+ 	kbase_pm_update_cores_state(kbdev);
+ 
+ 	/* Synchronously request and wait for those cores, because if
+-	 * instrumentation is enabled it would need them immediately. */
++	 * instrumentation is enabled it would need them immediately.
++	 */
+ 	kbase_pm_wait_for_desired_state(kbdev);
+ 
+ 	mutex_unlock(&kbdev->pm.lock);
+@@ -1237,7 +1334,8 @@ static void kbasep_try_reset_gpu_early_locked(struct kbase_device *kbdev)
+ 
+ 	/* Check that the reset has been committed to (i.e. kbase_reset_gpu has
+ 	 * been called), and that no other thread beat this thread to starting
+-	 * the reset */
++	 * the reset
++	 */
+ 	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ 			KBASE_RESET_GPU_COMMITTED, KBASE_RESET_GPU_HAPPENING) !=
+ 						KBASE_RESET_GPU_COMMITTED) {
+@@ -1261,6 +1359,7 @@ static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+ /**
+  * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU
+  * @kbdev: kbase device
++ * @flags: Bitfield indicating impact of reset (see flag defines)
+  *
+  * This function just soft-stops all the slots to ensure that as many jobs as
+  * possible are saved.
+@@ -1271,12 +1370,23 @@ static void kbasep_try_reset_gpu_early(struct kbase_device *kbdev)
+  *   false - Another thread is performing a reset, kbase_reset_gpu should
+  *   not be called.
+  */
+-bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
++bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev,
++				       unsigned int flags)
+ {
+ 	int i;
+ 
++	CSTD_UNUSED(flags);
+ 	KBASE_DEBUG_ASSERT(kbdev);
+ 
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (kbase_pm_is_gpu_lost(kbdev)) {
++		/* GPU access has been removed, reset will be done by
++		 * Arbiter instead
++		 */
++		return false;
++	}
++#endif
++
+ 	if (atomic_cmpxchg(&kbdev->hwaccess.backend.reset_gpu,
+ 						KBASE_RESET_GPU_NOT_PENDING,
+ 						KBASE_RESET_GPU_PREPARED) !=
+@@ -1293,14 +1403,14 @@ bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev)
+ 	return true;
+ }
+ 
+-bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev)
++bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev, unsigned int flags)
+ {
+-	unsigned long flags;
++	unsigned long lock_flags;
+ 	bool ret;
+ 
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	ret = kbase_prepare_to_reset_gpu_locked(kbdev);
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++	spin_lock_irqsave(&kbdev->hwaccess_lock, lock_flags);
++	ret = kbase_prepare_to_reset_gpu_locked(kbdev, flags);
++	spin_unlock_irqrestore(&kbdev->hwaccess_lock, lock_flags);
+ 
+ 	return ret;
+ }
+@@ -1321,7 +1431,8 @@ void kbase_reset_gpu(struct kbase_device *kbdev)
+ 	KBASE_DEBUG_ASSERT(kbdev);
+ 
+ 	/* Note this is an assert/atomic_set because it is a software issue for
+-	 * a race to be occuring here */
++	 * a race to be occurring here
++	 */
+ 	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ 						KBASE_RESET_GPU_PREPARED);
+ 	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+@@ -1344,7 +1455,8 @@ void kbase_reset_gpu_locked(struct kbase_device *kbdev)
+ 	KBASE_DEBUG_ASSERT(kbdev);
+ 
+ 	/* Note this is an assert/atomic_set because it is a software issue for
+-	 * a race to be occuring here */
++	 * a race to be occurring here
++	 */
+ 	KBASE_DEBUG_ASSERT(atomic_read(&kbdev->hwaccess.backend.reset_gpu) ==
+ 						KBASE_RESET_GPU_PREPARED);
+ 	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+@@ -1419,3 +1531,21 @@ void kbase_reset_gpu_term(struct kbase_device *kbdev)
+ {
+ 	destroy_workqueue(kbdev->hwaccess.backend.reset_workq);
+ }
++
++static u64 kbasep_apply_limited_core_mask(const struct kbase_device *kbdev,
++				const u64 affinity, const u64 limited_core_mask)
++{
++	const u64 result = affinity & limited_core_mask;
++
++#ifdef CONFIG_MALI_DEBUG
++	dev_dbg(kbdev->dev,
++				"Limiting affinity due to BASE_JD_REQ_LIMITED_CORE_MASK from 0x%lx to 0x%lx (mask is 0x%lx)\n",
++				(unsigned long int)affinity,
++				(unsigned long int)result,
++				(unsigned long int)limited_core_mask);
++#else
++	CSTD_UNUSED(kbdev);
++#endif
++
++	return result;
++}
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
+index 1419b59..1039e85 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2011-2016, 2018-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2011-2016, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * Job Manager backend-specific low-level APIs.
+  */
+@@ -34,7 +31,7 @@
+ #include <linux/atomic.h>
+ 
+ #include <backend/gpu/mali_kbase_jm_rb.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ 
+ /**
+  * kbase_job_submit_nolock() - Submit a job to a certain job-slot
+@@ -62,7 +59,7 @@ void kbase_job_submit_nolock(struct kbase_device *kbdev,
+ void kbase_job_done_slot(struct kbase_device *kbdev, int s, u32 completion_code,
+ 					u64 job_tail, ktime_t *end_timestamp);
+ 
+-#ifdef CONFIG_GPU_TRACEPOINTS
++#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
+ static inline char *kbasep_make_job_slot_string(int js, char *js_string,
+ 						size_t js_size)
+ {
+@@ -71,11 +68,13 @@ static inline char *kbasep_make_job_slot_string(int js, char *js_string,
+ }
+ #endif
+ 
++#if !MALI_USE_CSF
+ static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
+ 						struct kbase_context *kctx)
+ {
+ 	return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT));
+ }
++#endif
+ 
+ 
+ /**
+@@ -94,6 +93,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
+ 				struct kbase_jd_atom *katom,
+ 				int js);
+ 
++#if !MALI_USE_CSF
+ /**
+  * kbasep_job_slot_soft_or_hard_stop_do_action() - Perform a soft or hard stop
+  *						   on the specified atom
+@@ -112,6 +112,7 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
+ 					u32 action,
+ 					base_jd_core_req core_reqs,
+ 					struct kbase_jd_atom *target_katom);
++#endif /* !MALI_USE_CSF */
+ 
+ /**
+  * kbase_backend_soft_hard_stop_slot() - Soft or hard stop jobs on a given job
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
+index 4e4ed05..8ee897f 100755
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register-based HW access backend specific APIs
+  */
+@@ -33,16 +31,19 @@
+ #include <tl/mali_kbase_tracepoints.h>
+ #include <mali_kbase_hwcnt_context.h>
+ #include <mali_kbase_reset_gpu.h>
++#include <mali_kbase_kinstr_jm.h>
+ #include <backend/gpu/mali_kbase_cache_policy_backend.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_jm_internal.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
+ 
+ /* Return whether the specified ringbuffer is empty. HW access lock must be
+- * held */
++ * held
++ */
+ #define SLOT_RB_EMPTY(rb)   (rb->write_idx == rb->read_idx)
+ /* Return number of atoms currently in the specified ringbuffer. HW access lock
+- * must be held */
++ * must be held
++ */
+ #define SLOT_RB_ENTRIES(rb) (int)(s8)(rb->write_idx - rb->read_idx)
+ 
+ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+@@ -253,6 +254,8 @@ static bool kbase_gpu_check_secure_atoms(struct kbase_device *kbdev,
+ 
+ int kbase_backend_slot_free(struct kbase_device *kbdev, int js)
+ {
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
+ 	if (atomic_read(&kbdev->hwaccess.backend.reset_gpu) !=
+ 						KBASE_RESET_GPU_NOT_PENDING) {
+ 		/* The GPU is being reset - so prevent submission */
+@@ -278,15 +281,19 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ 		break;
+ 
+ 	case KBASE_ATOM_GPU_RB_SUBMITTED:
++		kbase_kinstr_jm_atom_hw_release(katom);
+ 		/* Inform power management at start/finish of atom so it can
+ 		 * update its GPU utilisation metrics. Mark atom as not
+-		 * submitted beforehand. */
++		 * submitted beforehand.
++		 */
+ 		katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
+ 		kbase_pm_metrics_update(kbdev, end_timestamp);
+ 
++		/* Inform platform at start/finish of atom */
++		kbasep_platform_event_atom_complete(katom);
++
+ 		if (katom->core_req & BASE_JD_REQ_PERMON)
+ 			kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+-		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+ 
+ 		KBASE_TLSTREAM_TL_NRET_ATOM_LPU(kbdev, katom,
+ 			&kbdev->gpu_props.props.raw_props.js_features
+@@ -296,6 +303,8 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
+ 			&kbdev->gpu_props.props.raw_props.js_features
+ 				[katom->slot_nr]);
+ 
++		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
++
+ 	case KBASE_ATOM_GPU_RB_READY:
+ 		/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
+ 
+@@ -540,7 +549,8 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+ 		KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START(kbdev, kbdev);
+ 		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ 		 * should ensure that we are not already transitiong, and that
+-		 * there are no atoms currently on the GPU. */
++		 * there are no atoms currently on the GPU.
++		 */
+ 		WARN_ON(kbdev->protected_mode_transition);
+ 		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+ 		/* If hwcnt is disabled, it means we didn't clean up correctly
+@@ -566,19 +576,15 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+ 
+ 		/* We couldn't disable atomically, so kick off a worker */
+ 		if (!kbdev->protected_mode_hwcnt_disabled) {
+-#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+-			queue_work(system_wq,
+-				&kbdev->protected_mode_hwcnt_disable_work);
+-#else
+-			queue_work(system_highpri_wq,
++			kbase_hwcnt_context_queue_work(
++				kbdev->hwcnt_gpu_ctx,
+ 				&kbdev->protected_mode_hwcnt_disable_work);
+-#endif
+ 			return -EAGAIN;
+ 		}
+ 
+-		/* Once reaching this point GPU must be
+-		 * switched to protected mode or hwcnt
+-		 * re-enabled. */
++		/* Once reaching this point GPU must be switched to protected
++		 * mode or hwcnt re-enabled.
++		 */
+ 
+ 		if (kbase_pm_protected_entry_override_enable(kbdev))
+ 			return -EAGAIN;
+@@ -618,7 +624,7 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
+ 						KBASE_PM_CORE_L2) ||
+ 				kbase_pm_get_trans_cores(kbdev,
+ 						KBASE_PM_CORE_L2) ||
+-				kbase_is_gpu_lost(kbdev)) {
++				kbase_is_gpu_removed(kbdev)) {
+ 				/*
+ 				 * The L2 is still powered, wait for all
+ 				 * the users to finish with it before doing
+@@ -718,7 +724,8 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+ 		KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START(kbdev, kbdev);
+ 		/* The checks in KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV
+ 		 * should ensure that we are not already transitiong, and that
+-		 * there are no atoms currently on the GPU. */
++		 * there are no atoms currently on the GPU.
++		 */
+ 		WARN_ON(kbdev->protected_mode_transition);
+ 		WARN_ON(kbase_gpu_atoms_submitted_any(kbdev));
+ 
+@@ -764,8 +771,8 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
+ 			katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ 			kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ 			/* Only return if head atom or previous atom
+-			 * already removed - as atoms must be returned
+-			 * in order */
++			 * already removed - as atoms must be returned in order
++			 */
+ 			if (idx == 0 || katom[0]->gpu_rb_state ==
+ 					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ 				kbase_gpu_dequeue_atom(kbdev, js, NULL);
+@@ -811,7 +818,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+-	if (kbase_reset_gpu_is_active(kbdev) || kbase_is_gpu_lost(kbdev))
++	if (kbase_reset_gpu_is_active(kbdev) ||
++			kbase_is_gpu_removed(kbdev))
+ #else
+ 	if (kbase_reset_gpu_is_active(kbdev))
+ #endif
+@@ -843,7 +851,7 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 					break;
+ 
+ 				katom[idx]->gpu_rb_state =
+-				KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
++					KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV;
+ 
+ 				/* ***TRANSITION TO HIGHER STATE*** */
+ 				/* fallthrough */
+@@ -907,12 +915,14 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 					kbase_gpu_mark_atom_for_return(kbdev,
+ 							katom[idx]);
+ 					/* Set EVENT_DONE so this atom will be
+-					   completed, not unpulled. */
++					 * completed, not unpulled.
++					 */
+ 					katom[idx]->event_code =
+ 						BASE_JD_EVENT_DONE;
+ 					/* Only return if head atom or previous
+ 					 * atom already removed - as atoms must
+-					 * be returned in order. */
++					 * be returned in order.
++					 */
+ 					if (idx == 0 ||	katom[0]->gpu_rb_state ==
+ 							KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ 						kbase_gpu_dequeue_atom(kbdev, js, NULL);
+@@ -943,7 +953,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 
+ 				if (idx == 1) {
+ 					/* Only submit if head atom or previous
+-					 * atom already submitted */
++					 * atom already submitted
++					 */
+ 					if ((katom[0]->gpu_rb_state !=
+ 						KBASE_ATOM_GPU_RB_SUBMITTED &&
+ 						katom[0]->gpu_rb_state !=
+@@ -959,7 +970,8 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 				}
+ 
+ 				/* If inter-slot serialization in use then don't
+-				 * submit atom if any other slots are in use */
++				 * submit atom if any other slots are in use
++				 */
+ 				if ((kbdev->serialize_jobs &
+ 						KBASE_SERIALIZE_INTER_SLOT) &&
+ 						other_slots_busy(kbdev, js))
+@@ -971,31 +983,37 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
+ 					break;
+ #endif
+ 				/* Check if this job needs the cycle counter
+-				 * enabled before submission */
++				 * enabled before submission
++				 */
+ 				if (katom[idx]->core_req & BASE_JD_REQ_PERMON)
+ 					kbase_pm_request_gpu_cycle_counter_l2_is_on(
+ 									kbdev);
+ 
+ 				kbase_job_hw_submit(kbdev, katom[idx], js);
+ 				katom[idx]->gpu_rb_state =
+-						KBASE_ATOM_GPU_RB_SUBMITTED;
++					KBASE_ATOM_GPU_RB_SUBMITTED;
++
++				/* ***TRANSITION TO HIGHER STATE*** */
++				/* fallthrough */
++			case KBASE_ATOM_GPU_RB_SUBMITTED:
+ 
+ 				/* Inform power management at start/finish of
+ 				 * atom so it can update its GPU utilisation
+-				 * metrics. */
++				 * metrics.
++				 */
+ 				kbase_pm_metrics_update(kbdev,
+ 						&katom[idx]->start_timestamp);
+ 
+-				/* ***TRANSITION TO HIGHER STATE*** */
+-				/* fallthrough */
+-			case KBASE_ATOM_GPU_RB_SUBMITTED:
+-				/* Atom submitted to HW, nothing else to do */
++				/* Inform platform at start/finish of atom */
++				kbasep_platform_event_atom_submit(katom[idx]);
++
+ 				break;
+ 
+ 			case KBASE_ATOM_GPU_RB_RETURN_TO_JS:
+ 				/* Only return if head atom or previous atom
+ 				 * already removed - as atoms must be returned
+-				 * in order */
++				 * in order
++				 */
+ 				if (idx == 0 || katom[0]->gpu_rb_state ==
+ 					KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ 					kbase_gpu_dequeue_atom(kbdev, js, NULL);
+@@ -1013,7 +1031,7 @@ void kbase_backend_run_atom(struct kbase_device *kbdev,
+ 				struct kbase_jd_atom *katom)
+ {
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+-	dev_dbg(kbdev->dev, "Backend running atom %p\n", (void *)katom);
++	dev_dbg(kbdev->dev, "Backend running atom %pK\n", (void *)katom);
+ 
+ 	kbase_gpu_enqueue_atom(kbdev, katom);
+ 	kbase_backend_slot_update(kbdev);
+@@ -1074,7 +1092,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 	struct kbase_context *kctx = katom->kctx;
+ 
+ 	dev_dbg(kbdev->dev,
+-		"Atom %p completed on hw with code 0x%x and job_tail 0x%llx (s:%d)\n",
++		"Atom %pK completed on hw with code 0x%x and job_tail 0x%llx (s:%d)\n",
+ 		(void *)katom, completion_code, job_tail, js);
+ 
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+@@ -1098,7 +1116,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 		 * BASE_JD_REQ_SKIP_CACHE_END is set, the GPU cache is not
+ 		 * flushed. To prevent future evictions causing possible memory
+ 		 * corruption we need to flush the cache manually before any
+-		 * affected memory gets reused. */
++		 * affected memory gets reused.
++		 */
+ 		katom->need_cache_flush_cores_retained = true;
+ 	}
+ 
+@@ -1181,7 +1200,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 					katom_idx1->gpu_rb_state !=
+ 					KBASE_ATOM_GPU_RB_SUBMITTED) {
+ 				/* Can not dequeue this atom yet - will be
+-				 * dequeued when atom at idx0 completes */
++				 * dequeued when atom at idx0 completes
++				 */
+ 				katom_idx1->event_code = BASE_JD_EVENT_STOPPED;
+ 				kbase_gpu_mark_atom_for_return(kbdev,
+ 								katom_idx1);
+@@ -1194,7 +1214,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 	if (job_tail != 0 && job_tail != katom->jc) {
+ 		/* Some of the job has been executed */
+ 		dev_dbg(kbdev->dev,
+-			"Update job chain address of atom %p to resume from 0x%llx\n",
++			"Update job chain address of atom %pK to resume from 0x%llx\n",
+ 			(void *)katom, job_tail);
+ 
+ 		katom->jc = job_tail;
+@@ -1214,7 +1234,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 	 * - Schedule out the parent context if necessary, and schedule a new
+ 	 *   one in.
+ 	 */
+-#ifdef CONFIG_GPU_TRACEPOINTS
++#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
+ 	{
+ 		/* The atom in the HEAD */
+ 		struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
+@@ -1255,7 +1275,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 
+ 	if (katom) {
+ 		dev_dbg(kbdev->dev,
+-			"Cross-slot dependency %p has become runnable.\n",
++			"Cross-slot dependency %pK has become runnable.\n",
+ 			(void *)katom);
+ 
+ 		/* Check if there are lower priority jobs to soft stop */
+@@ -1268,7 +1288,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
+ 	kbase_pm_update_state(kbdev);
+ 
+ 	/* Job completion may have unblocked other atoms. Try to update all job
+-	 * slots */
++	 * slots
++	 */
+ 	kbase_backend_slot_update(kbdev);
+ }
+ 
+@@ -1319,7 +1340,8 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
+ 				katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
+ 				/* As the atom was not removed, increment the
+ 				 * index so that we read the correct atom in the
+-				 * next iteration. */
++				 * next iteration.
++				 */
+ 				atom_idx++;
+ 				continue;
+ 			}
+@@ -1422,7 +1444,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 		katom_idx0_valid = (katom_idx0 == katom);
+ 		/* If idx0 is to be removed and idx1 is on the same context,
+ 		 * then idx1 must also be removed otherwise the atoms might be
+-		 * returned out of order */
++		 * returned out of order
++		 */
+ 		if (katom_idx1)
+ 			katom_idx1_valid = (katom_idx1 == katom) ||
+ 						(katom_idx0_valid &&
+@@ -1469,7 +1492,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 				if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ 						JS_COMMAND_NEXT)) == 0) {
+ 					/* idx0 has already completed - stop
+-					 * idx1 if needed*/
++					 * idx1 if needed
++					 */
+ 					if (katom_idx1_valid) {
+ 						kbase_gpu_stop_atom(kbdev, js,
+ 								katom_idx1,
+@@ -1478,7 +1502,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 					}
+ 				} else {
+ 					/* idx1 is in NEXT registers - attempt
+-					 * to remove */
++					 * to remove
++					 */
+ 					kbase_reg_write(kbdev,
+ 							JOB_SLOT_REG(js,
+ 							JS_COMMAND_NEXT),
+@@ -1493,7 +1518,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 							JS_HEAD_NEXT_HI))
+ 									!= 0) {
+ 						/* idx1 removed successfully,
+-						 * will be handled in IRQ */
++						 * will be handled in IRQ
++						 */
+ 						kbase_gpu_remove_atom(kbdev,
+ 								katom_idx1,
+ 								action, true);
+@@ -1507,7 +1533,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 						ret = true;
+ 					} else if (katom_idx1_valid) {
+ 						/* idx0 has already completed,
+-						 * stop idx1 if needed */
++						 * stop idx1 if needed
++						 */
+ 						kbase_gpu_stop_atom(kbdev, js,
+ 								katom_idx1,
+ 								action);
+@@ -1526,7 +1553,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 				 * flow was also interrupted, and this function
+ 				 * might not enter disjoint state e.g. if we
+ 				 * don't actually do a hard stop on the head
+-				 * atom */
++				 * atom
++				 */
+ 				kbase_gpu_stop_atom(kbdev, js, katom_idx0,
+ 									action);
+ 				ret = true;
+@@ -1554,7 +1582,8 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 				ret = true;
+ 			} else {
+ 				/* idx1 is in NEXT registers - attempt to
+-				 * remove */
++				 * remove
++				 */
+ 				kbase_reg_write(kbdev, JOB_SLOT_REG(js,
+ 							JS_COMMAND_NEXT),
+ 							JS_COMMAND_NOP);
+@@ -1564,13 +1593,15 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
+ 				    kbase_reg_read(kbdev, JOB_SLOT_REG(js,
+ 						JS_HEAD_NEXT_HI)) != 0) {
+ 					/* idx1 removed successfully, will be
+-					 * handled in IRQ once idx0 completes */
++					 * handled in IRQ once idx0 completes
++					 */
+ 					kbase_gpu_remove_atom(kbdev, katom_idx1,
+ 									action,
+ 									false);
+ 				} else {
+ 					/* idx0 has already completed - stop
+-					 * idx1 */
++					 * idx1
++					 */
+ 					kbase_gpu_stop_atom(kbdev, js,
+ 								katom_idx1,
+ 								action);
+@@ -1644,7 +1675,7 @@ void kbase_gpu_dump_slots(struct kbase_device *kbdev)
+ 
+ 			if (katom)
+ 				dev_info(kbdev->dev,
+-				"  js%d idx%d : katom=%p gpu_rb_state=%d\n",
++				"  js%d idx%d : katom=%pK gpu_rb_state=%d\n",
+ 				js, idx, katom, katom->gpu_rb_state);
+ 			else
+ 				dev_info(kbdev->dev, "  js%d idx%d : empty\n",
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
+index c3b9f2d..d3ff203 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register-based HW access backend specific APIs
+  */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
+index fcc0437..02d7cdb 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register-based HW access backend specific job scheduler APIs
+  */
+@@ -31,13 +29,14 @@
+ #include <backend/gpu/mali_kbase_jm_internal.h>
+ #include <backend/gpu/mali_kbase_js_internal.h>
+ 
++#if !MALI_USE_CSF
+ /*
+  * Hold the runpool_mutex for this
+  */
+ static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+ {
+ 	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+-	s8 nr_running_ctxs;
++	int nr_running_ctxs;
+ 
+ 	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
+ 
+@@ -47,7 +46,8 @@ static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+ 
+ 	/* nr_contexts_pullable is updated with the runpool_mutex. However, the
+ 	 * locking in the caller gives us a barrier that ensures
+-	 * nr_contexts_pullable is up-to-date for reading */
++	 * nr_contexts_pullable is up-to-date for reading
++	 */
+ 	nr_running_ctxs = atomic_read(&kbdev->js_data.nr_contexts_runnable);
+ 
+ #ifdef CONFIG_MALI_DEBUG
+@@ -69,10 +69,10 @@ static inline bool timer_callback_should_run(struct kbase_device *kbdev)
+ 		 * don't check KBASEP_JS_CTX_ATTR_NON_COMPUTE).
+ 		 */
+ 		{
+-			s8 nr_compute_ctxs =
++			int nr_compute_ctxs =
+ 				kbasep_js_ctx_attr_count_on_runpool(kbdev,
+ 						KBASEP_JS_CTX_ATTR_COMPUTE);
+-			s8 nr_noncompute_ctxs = nr_running_ctxs -
++			int nr_noncompute_ctxs = nr_running_ctxs -
+ 							nr_compute_ctxs;
+ 
+ 			return (bool) (nr_compute_ctxs >= 2 ||
+@@ -113,7 +113,8 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+ 
+ 		if (atom != NULL) {
+ 			/* The current version of the model doesn't support
+-			 * Soft-Stop */
++			 * Soft-Stop
++			 */
+ 			if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
+ 				u32 ticks = atom->ticks++;
+ 
+@@ -141,7 +142,8 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+ 				 * new soft_stop timeout. This ensures that
+ 				 * atoms do not miss any of the timeouts due to
+ 				 * races between this worker and the thread
+-				 * changing the timeouts. */
++				 * changing the timeouts.
++				 */
+ 				if (backend->timeouts_updated &&
+ 						ticks > soft_stop_ticks)
+ 					ticks = atom->ticks = soft_stop_ticks;
+@@ -171,10 +173,11 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+ 					 *
+ 					 * Similarly, if it's about to be
+ 					 * decreased, the last job from another
+-					 * context has already finished, so it's
+-					 * not too bad that we observe the older
+-					 * value and register a disjoint event
+-					 * when we try soft-stopping */
++					 * context has already finished, so
++					 * it's not too bad that we observe the
++					 * older value and register a disjoint
++					 * event when we try soft-stopping
++					 */
+ 					if (js_devdata->nr_user_contexts_running
+ 							>= disjoint_threshold)
+ 						softstop_flags |=
+@@ -252,9 +255,9 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+ 		}
+ 	}
+ 	if (reset_needed) {
+-		dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issueing GPU soft-reset to resolve.");
++		dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (JS_RESET_TICKS_SS/DUMPING timeout hit). Issuing GPU soft-reset to resolve.");
+ 
+-		if (kbase_prepare_to_reset_gpu_locked(kbdev))
++		if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
+ 			kbase_reset_gpu_locked(kbdev);
+ 	}
+ 	/* the timer is re-issued if there is contexts in the run-pool */
+@@ -270,9 +273,11 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+ 
+ 	return HRTIMER_NORESTART;
+ }
++#endif /* !MALI_USE_CSF */
+ 
+ void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+ {
++#if !MALI_USE_CSF
+ 	struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ 	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ 	unsigned long flags;
+@@ -284,11 +289,12 @@ void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+ 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 		backend->timer_running = false;
+ 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-		/* From now on, return value of timer_callback_should_run() will
+-		 * also cause the timer to not requeue itself. Its return value
+-		 * cannot change, because it depends on variables updated with
+-		 * the runpool_mutex held, which the caller of this must also
+-		 * hold */
++		/* From now on, return value of timer_callback_should_run()
++		 * will also cause the timer to not requeue itself. Its return
++		 * value cannot change, because it depends on variables updated
++		 * with the runpool_mutex held, which the caller of this must
++		 * also hold
++		 */
+ 		hrtimer_cancel(&backend->scheduling_timer);
+ 	}
+ 
+@@ -303,25 +309,36 @@ void kbase_backend_ctx_count_changed(struct kbase_device *kbdev)
+ 
+ 		KBASE_KTRACE_ADD_JM(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, 0u);
+ 	}
++#else /* !MALI_USE_CSF */
++	CSTD_UNUSED(kbdev);
++#endif /* !MALI_USE_CSF */
+ }
+ 
+ int kbase_backend_timer_init(struct kbase_device *kbdev)
+ {
++#if !MALI_USE_CSF
+ 	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ 
+ 	hrtimer_init(&backend->scheduling_timer, CLOCK_MONOTONIC,
+ 							HRTIMER_MODE_REL);
+ 	backend->scheduling_timer.function = timer_callback;
+ 	backend->timer_running = false;
++#else /* !MALI_USE_CSF */
++	CSTD_UNUSED(kbdev);
++#endif /* !MALI_USE_CSF */
+ 
+ 	return 0;
+ }
+ 
+ void kbase_backend_timer_term(struct kbase_device *kbdev)
+ {
++#if !MALI_USE_CSF
+ 	struct kbase_backend_data *backend = &kbdev->hwaccess.backend;
+ 
+ 	hrtimer_cancel(&backend->scheduling_timer);
++#else /* !MALI_USE_CSF */
++	CSTD_UNUSED(kbdev);
++#endif /* !MALI_USE_CSF */
+ }
+ 
+ void kbase_backend_timer_suspend(struct kbase_device *kbdev)
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
+index 6576e55..5284288 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_internal.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2015, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * Register-based HW access backend specific job scheduler APIs
+  */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c
+index e67d12b..7131546 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.c
+@@ -1,12 +1,12 @@
+-// SPDX-License-Identifier: GPL-2.0
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -17,14 +17,12 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ #include <mali_kbase.h>
+ #include <mali_kbase_bits.h>
+ #include <mali_kbase_config_defaults.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include "mali_kbase_l2_mmu_config.h"
+ 
+ /**
+@@ -56,23 +54,34 @@ struct l2_mmu_config_limit {
+ /*
+  * Zero represents no limit
+  *
+- * For LBEX TBEX TTRX and TNAX:
++ * For LBEX TBEX TBAX TTRX and TNAX:
+  *   The value represents the number of outstanding reads (6 bits) or writes (5 bits)
+  *
+  * For all other GPUS it is a fraction see: mali_kbase_config_defaults.h
+  */
+ static const struct l2_mmu_config_limit limits[] = {
+-	 /* GPU                       read                  write            */
+-	 {GPU_ID2_PRODUCT_LBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
+-	 {GPU_ID2_PRODUCT_TBEX, {0, GENMASK(10, 5), 5}, {0, GENMASK(16, 12), 12} },
+-	 {GPU_ID2_PRODUCT_TTRX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+-	 {GPU_ID2_PRODUCT_TNAX, {0, GENMASK(12, 7), 7}, {0, GENMASK(17, 13), 13} },
+-	 {GPU_ID2_PRODUCT_TGOX,
+-	   {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+-	   {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+-	 {GPU_ID2_PRODUCT_TNOX,
+-	   {KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
+-	   {KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
++	/* GPU, read, write */
++	{GPU_ID2_PRODUCT_LBEX,
++		{0, GENMASK(10, 5), 5},
++		{0, GENMASK(16, 12), 12} },
++	{GPU_ID2_PRODUCT_TBEX,
++		{0, GENMASK(10, 5), 5},
++		{0, GENMASK(16, 12), 12} },
++	{GPU_ID2_PRODUCT_TBAX,
++		{0, GENMASK(10, 5), 5},
++		{0, GENMASK(16, 12), 12} },
++	{GPU_ID2_PRODUCT_TTRX,
++		{0, GENMASK(12, 7), 7},
++		{0, GENMASK(17, 13), 13} },
++	{GPU_ID2_PRODUCT_TNAX,
++		{0, GENMASK(12, 7), 7},
++		{0, GENMASK(17, 13), 13} },
++	{GPU_ID2_PRODUCT_TGOX,
++		{KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
++		{KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
++	{GPU_ID2_PRODUCT_TNOX,
++		{KBASE_3BIT_AID_32, GENMASK(14, 12), 12},
++		{KBASE_3BIT_AID_32, GENMASK(17, 15), 15} },
+ };
+ 
+ int kbase_set_mmu_quirks(struct kbase_device *kbdev)
+@@ -100,7 +109,7 @@ int kbase_set_mmu_quirks(struct kbase_device *kbdev)
+ 
+ 	mmu_config = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG));
+ 
+-	if (kbase_is_gpu_lost(kbdev))
++	if (kbase_is_gpu_removed(kbdev))
+ 		return -EIO;
+ 
+ 	mmu_config &= ~(limit.read.mask | limit.write.mask);
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h
+index 0c779ac..07014ad 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_l2_mmu_config.h
+@@ -1,31 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, you can access it online at
+- * http://www.gnu.org/licenses/gpl-2.0.html.
+- *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+- *//* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * (C) COPYRIGHT 2019-2020 ARM Limited. All rights reserved.
+- *
+- * This program is free software and is provided to you under the terms of the
+- * GNU General Public License version 2 as published by the Free Software
+- * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
+index e33fe0b..077c234 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2010-2015, 2018-2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2010-2015, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * "Always on" power management policy
+  */
+@@ -61,7 +58,11 @@ const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
+ 	always_on_term,			/* term */
+ 	always_on_shaders_needed,	/* shaders_needed */
+ 	always_on_get_core_active,	/* get_core_active */
++	NULL,				/* handle_event */
+ 	KBASE_PM_POLICY_ID_ALWAYS_ON,	/* id */
++#if MALI_USE_CSF
++	ALWAYS_ON_PM_SCHED_FLAGS,	/* pm_sched_flags */
++#endif
+ };
+ 
+ KBASE_EXPORT_TEST_API(kbase_pm_always_on_policy_ops);
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
+index e7927cf..98d35da 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2011-2015, 2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * "Always on" power management policy
+  */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
+index 576c9f2..cc791df 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
+@@ -1,11 +1,12 @@
+- /*
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++/*
+  *
+- * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,11 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+ /*
+  * GPU backend implementation of base kernel power management APIs
+  */
+@@ -30,14 +28,16 @@
+ #include <mali_kbase_config_defaults.h>
+ 
+ #include <mali_kbase_pm.h>
++#if !MALI_USE_CSF
+ #include <mali_kbase_hwaccess_jm.h>
+ #include <backend/gpu/mali_kbase_js_internal.h>
+ #include <backend/gpu/mali_kbase_jm_internal.h>
++#endif /* !MALI_USE_CSF */
+ #include <mali_kbase_hwcnt_context.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
+ #include <backend/gpu/mali_kbase_devfreq.h>
+ #include <mali_kbase_dummy_job_wa.h>
+-#include <mali_kbase_irq_internal.h>
++#include <backend/gpu/mali_kbase_irq_internal.h>
+ 
+ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
+ static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
+@@ -106,6 +106,11 @@ void kbase_pm_register_access_enable(struct kbase_device *kbdev)
+ 	if (callbacks)
+ 		callbacks->power_on_callback(kbdev);
+ 
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (WARN_ON(kbase_pm_is_gpu_lost(kbdev)))
++		dev_err(kbdev->dev, "Attempting to power on while GPU lost\n");
++#endif
++
+ 	kbdev->pm.backend.gpu_powered = true;
+ }
+ 
+@@ -139,24 +144,35 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
+ 
+ 	kbdev->pm.backend.ca_cores_enabled = ~0ull;
+ 	kbdev->pm.backend.gpu_powered = false;
++	kbdev->pm.backend.gpu_ready = false;
+ 	kbdev->pm.suspending = false;
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+-	kbdev->pm.gpu_lost = false;
++	kbase_pm_set_gpu_lost(kbdev, false);
+ #endif
+ #ifdef CONFIG_MALI_DEBUG
+ 	kbdev->pm.backend.driver_ready_for_irqs = false;
+ #endif /* CONFIG_MALI_DEBUG */
+ 	init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
+ 
++#if !MALI_USE_CSF
+ 	/* Initialise the metrics subsystem */
+ 	ret = kbasep_pm_metrics_init(kbdev);
+ 	if (ret)
+ 		return ret;
++#else
++	mutex_init(&kbdev->pm.backend.policy_change_lock);
++	kbdev->pm.backend.policy_change_clamp_state_to_off = false;
++	/* Due to dependency on kbase_ipa_control, the metrics subsystem can't
++	 * be initialized here.
++	 */
++	CSTD_UNUSED(ret);
++#endif
+ 
+ 	init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
+ 	kbdev->pm.backend.reset_done = false;
+ 
+ 	init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
++	init_waitqueue_head(&kbdev->pm.resume_wait);
+ 	kbdev->pm.active_count = 0;
+ 
+ 	spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
+@@ -213,7 +229,9 @@ pm_state_machine_fail:
+ 	kbase_pm_policy_term(kbdev);
+ 	kbase_pm_ca_term(kbdev);
+ workq_fail:
++#if !MALI_USE_CSF
+ 	kbasep_pm_metrics_term(kbdev);
++#endif
+ 	return -EINVAL;
+ }
+ 
+@@ -222,7 +240,8 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+ 	lockdep_assert_held(&kbdev->pm.lock);
+ 
+ 	/* Turn clocks and interrupts on - no-op if we haven't done a previous
+-	 * kbase_pm_clock_off() */
++	 * kbase_pm_clock_off()
++	 */
+ 	kbase_pm_clock_on(kbdev, is_resume);
+ 
+ 	if (!is_resume) {
+@@ -240,7 +259,8 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
+ 	kbase_pm_update_cores_state(kbdev);
+ 
+ 	/* NOTE: We don't wait to reach the desired state, since running atoms
+-	 * will wait for that state to be reached anyway */
++	 * will wait for that state to be reached anyway
++	 */
+ }
+ 
+ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+@@ -251,13 +271,20 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+ 	struct kbase_pm_backend_data *backend = &pm->backend;
+ 	unsigned long flags;
+ 
++#if !MALI_USE_CSF
+ 	/* Wait for power transitions to complete. We do this with no locks held
+ 	 * so that we don't deadlock with any pending workqueues.
+ 	 */
+ 	kbase_pm_wait_for_desired_state(kbdev);
++#endif
+ 
+ 	kbase_pm_lock(kbdev);
+ 
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (kbase_pm_is_gpu_lost(kbdev))
++		backend->poweron_required = false;
++#endif
++
+ 	if (!backend->poweron_required) {
+ 		unsigned long flags;
+ 
+@@ -278,6 +305,14 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+ 			kbase_flush_mmu_wqs(kbdev);
+ 			kbase_pm_lock(kbdev);
+ 
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++			/* poweron_required may have changed while pm lock
++			 * was released.
++			 */
++			if (kbase_pm_is_gpu_lost(kbdev))
++				backend->poweron_required = false;
++#endif
++
+ 			/* Turn off clock now that fault have been handled. We
+ 			 * dropped locks so poweron_required may have changed -
+ 			 * power back on if this is the case (effectively only
+@@ -296,9 +331,14 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
+ 	if (backend->poweron_required) {
+ 		backend->poweron_required = false;
+ 		kbdev->pm.backend.l2_desired = true;
++#if MALI_USE_CSF
++		kbdev->pm.backend.mcu_desired = true;
++#endif
+ 		kbase_pm_update_state(kbdev);
+ 		kbase_pm_update_cores_state_nolock(kbdev);
++#if !MALI_USE_CSF
+ 		kbase_backend_slot_update(kbdev);
++#endif /* !MALI_USE_CSF */
+ 	}
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+@@ -451,12 +491,22 @@ static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
+ 		 */
+ 		backend->hwcnt_disabled = true;
+ 		kbase_pm_update_state(kbdev);
++#if !MALI_USE_CSF
+ 		kbase_backend_slot_update(kbdev);
++#endif /* !MALI_USE_CSF */
+ 	} else {
+ 		/* PM state was updated while we were doing the disable,
+ 		 * so we need to undo the disable we just performed.
+ 		 */
++#if MALI_USE_CSF
++		unsigned long lock_flags;
++
++		kbase_csf_scheduler_spin_lock(kbdev, &lock_flags);
++#endif
+ 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
++#if MALI_USE_CSF
++		kbase_csf_scheduler_spin_unlock(kbdev, lock_flags);
++#endif
+ 	}
+ 
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+@@ -476,8 +526,12 @@ void kbase_pm_do_poweroff(struct kbase_device *kbdev)
+ 	if (kbdev->pm.backend.poweroff_wait_in_progress)
+ 		goto unlock_hwaccess;
+ 
++#if MALI_USE_CSF
++	kbdev->pm.backend.mcu_desired = false;
++#else
+ 	/* Force all cores off */
+ 	kbdev->pm.backend.shaders_desired = false;
++#endif
+ 	kbdev->pm.backend.l2_desired = false;
+ 
+ 	kbdev->pm.backend.poweroff_wait_in_progress = true;
+@@ -528,20 +582,35 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ 	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
+ 
+ 	/* Power up the GPU, don't enable IRQs as we are not ready to receive
+-	 * them. */
++	 * them
++	 */
+ 	ret = kbase_pm_init_hw(kbdev, flags);
+ 	if (ret) {
+ 		kbase_pm_unlock(kbdev);
+ 		return ret;
+ 	}
+-
++#if MALI_USE_CSF
++	kbdev->pm.debug_core_mask =
++		kbdev->gpu_props.props.raw_props.shader_present;
++	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
++	/* Set the initial value for 'shaders_avail'. It would be later
++	 * modified only from the MCU state machine, when the shader core
++	 * allocation enable mask request has completed. So its value would
++	 * indicate the mask of cores that are currently being used by FW for
++	 * the allocation of endpoints requested by CSGs.
++	 */
++	kbdev->pm.backend.shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
++	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
++#else
+ 	kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
+ 			kbdev->pm.debug_core_mask[1] =
+ 			kbdev->pm.debug_core_mask[2] =
+ 			kbdev->gpu_props.props.raw_props.shader_present;
++#endif
+ 
+ 	/* Pretend the GPU is active to prevent a power policy turning the GPU
+-	 * cores off */
++	 * cores off
++	 */
+ 	kbdev->pm.active_count = 1;
+ 
+ 	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+@@ -553,13 +622,27 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
+ 								irq_flags);
+ 
+ 	/* We are ready to receive IRQ's now as power policy is set up, so
+-	 * enable them now. */
++	 * enable them now.
++	 */
+ #ifdef CONFIG_MALI_DEBUG
+ 	kbdev->pm.backend.driver_ready_for_irqs = true;
+ #endif
+ 	kbase_pm_enable_interrupts(kbdev);
+ 
++	WARN_ON(!kbdev->pm.backend.gpu_powered);
++	/* GPU has been powered up (by kbase_pm_init_hw) and interrupts have
++	 * been enabled, so GPU is ready for use and PM state machine can be
++	 * exercised from this point onwards.
++	 */
++	kbdev->pm.backend.gpu_ready = true;
++
+ 	/* Turn on the GPU and any cores needed by the policy */
++#if MALI_USE_CSF
++	/* Turn on the L2 caches, needed for firmware boot */
++	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
++	kbdev->pm.backend.l2_desired = true;
++	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
++#endif
+ 	kbase_pm_do_poweron(kbdev, false);
+ 	kbase_pm_unlock(kbdev);
+ 
+@@ -573,6 +656,8 @@ void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
+ 	mutex_lock(&kbdev->pm.lock);
+ 	kbase_pm_do_poweroff(kbdev);
+ 	mutex_unlock(&kbdev->pm.lock);
++
++	kbase_pm_wait_for_poweroff_complete(kbdev);
+ }
+ 
+ KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
+@@ -587,10 +672,15 @@ void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
+ 
+ 	if (kbdev->pm.backend.hwcnt_disabled) {
+ 		unsigned long flags;
+-
++#if MALI_USE_CSF
++		kbase_csf_scheduler_spin_lock(kbdev, &flags);
++		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
++		kbase_csf_scheduler_spin_unlock(kbdev, flags);
++#else
+ 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
+ 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++#endif
+ 	}
+ 
+ 	/* Free any resources the policy allocated */
+@@ -598,8 +688,16 @@ void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
+ 	kbase_pm_policy_term(kbdev);
+ 	kbase_pm_ca_term(kbdev);
+ 
++#if !MALI_USE_CSF
+ 	/* Shut down the metrics subsystem */
+ 	kbasep_pm_metrics_term(kbdev);
++#else
++	if (WARN_ON(mutex_is_locked(&kbdev->pm.backend.policy_change_lock))) {
++		mutex_lock(&kbdev->pm.backend.policy_change_lock);
++		mutex_unlock(&kbdev->pm.backend.policy_change_lock);
++	}
++	mutex_destroy(&kbdev->pm.backend.policy_change_lock);
++#endif
+ 
+ 	destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
+ }
+@@ -611,11 +709,24 @@ void kbase_pm_power_changed(struct kbase_device *kbdev)
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 	kbase_pm_update_state(kbdev);
+ 
++#if !MALI_USE_CSF
+ 		kbase_backend_slot_update(kbdev);
++#endif /* !MALI_USE_CSF */
+ 
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+ 
++#if MALI_USE_CSF
++void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
++{
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++	lockdep_assert_held(&kbdev->pm.lock);
++
++	kbdev->pm.debug_core_mask = new_core_mask;
++	kbase_pm_update_dynamic_cores_onoff(kbdev);
++}
++KBASE_EXPORT_TEST_API(kbase_pm_set_debug_core_mask);
++#else
+ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ 		u64 new_core_mask_js0, u64 new_core_mask_js1,
+ 		u64 new_core_mask_js2)
+@@ -636,6 +747,7 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
+ 
+ 	kbase_pm_update_dynamic_cores_onoff(kbdev);
+ }
++#endif /* MALI_USE_CSF */
+ 
+ void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
+ {
+@@ -651,12 +763,15 @@ void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+ {
+ 	/* Force power off the GPU and all cores (regardless of policy), only
+ 	 * after the PM active count reaches zero (otherwise, we risk turning it
+-	 * off prematurely) */
++	 * off prematurely)
++	 */
+ 	kbase_pm_lock(kbdev);
+ 
+ 	kbase_pm_do_poweroff(kbdev);
+ 
++#if !MALI_USE_CSF
+ 	kbase_backend_timer_suspend(kbdev);
++#endif /* !MALI_USE_CSF */
+ 
+ 	kbase_pm_unlock(kbdev);
+ 
+@@ -672,12 +787,19 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+ 
+ 	kbdev->pm.suspending = false;
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+-	kbdev->pm.gpu_lost = false;
++	if (kbase_pm_is_gpu_lost(kbdev)) {
++		dev_dbg(kbdev->dev, "%s: GPU lost in progress\n", __func__);
++		kbase_pm_unlock(kbdev);
++		return;
++	}
+ #endif
+ 	kbase_pm_do_poweron(kbdev, true);
+ 
++#if !MALI_USE_CSF
+ 	kbase_backend_timer_resume(kbdev);
++#endif /* !MALI_USE_CSF */
+ 
++	wake_up_all(&kbdev->pm.resume_wait);
+ 	kbase_pm_unlock(kbdev);
+ }
+ 
+@@ -685,63 +807,50 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
+ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+-	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+ 	ktime_t end_timestamp = ktime_get();
++	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
+ 
+-	/* Full GPU reset will have been done by hypervisor, so cancel */
+-	atomic_set(&kbdev->hwaccess.backend.reset_gpu,
+-			KBASE_RESET_GPU_NOT_PENDING);
+-	hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
+-
+-	/* GPU is no longer mapped to VM.  So no interrupts will be received
+-	 * and Mali registers have been replaced by dummy RAM
+-	 */
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	spin_lock(&kbdev->mmu_mask_change);
+-	kbdev->irq_reset_flush = true;
+-	spin_unlock(&kbdev->mmu_mask_change);
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-	kbase_synchronize_irqs(kbdev);
+-	kbase_flush_mmu_wqs(kbdev);
+-	kbdev->irq_reset_flush = false;
++	if (!kbdev->arb.arb_if)
++		return;
+ 
+-	/* Clear all jobs running on the GPU */
+ 	mutex_lock(&kbdev->pm.lock);
+-	kbdev->pm.gpu_lost = true;
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	kbdev->protected_mode = false;
+-	if (!kbdev->pm.backend.protected_entry_transition_override)
+-		kbase_backend_reset(kbdev, &end_timestamp);
+-	kbase_pm_metrics_update(kbdev, NULL);
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++	mutex_lock(&arb_vm_state->vm_state_lock);
++	if (kbdev->pm.backend.gpu_powered &&
++			!kbase_pm_is_gpu_lost(kbdev)) {
++		kbase_pm_set_gpu_lost(kbdev, true);
++
++		/* GPU is no longer mapped to VM.  So no interrupts will
++		 * be received and Mali registers have been replaced by
++		 * dummy RAM
++		 */
++		WARN(!kbase_is_gpu_removed(kbdev),
++			"GPU is still available after GPU lost event\n");
+ 
+-	/* Cancel any pending HWC dumps */
+-	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+-	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+-	kbdev->hwcnt.backend.triggered = 1;
+-	wake_up(&kbdev->hwcnt.backend.wait);
+-	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
++		/* Full GPU reset will have been done by hypervisor, so
++		 * cancel
++		 */
++		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
++				KBASE_RESET_GPU_NOT_PENDING);
++		hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
++		kbase_synchronize_irqs(kbdev);
+ 
+-	/* Wait for all threads keeping GPU active to complete */
+-	mutex_unlock(&kbdev->pm.lock);
+-	wait_event(kbdev->pm.zero_active_count_wait,
+-			kbdev->pm.active_count == 0);
+-	mutex_lock(&kbdev->pm.lock);
++		/* Clear all jobs running on the GPU */
++		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
++		kbdev->protected_mode = false;
++		kbase_backend_reset(kbdev, &end_timestamp);
++		kbase_pm_metrics_update(kbdev, NULL);
++		kbase_pm_update_state(kbdev);
++		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+-	/* Update state to GPU off */
+-	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+-	kbdev->pm.backend.shaders_desired = false;
+-	kbdev->pm.backend.l2_desired = false;
+-	backend->l2_state = KBASE_L2_OFF;
+-	backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF;
+-	kbdev->pm.backend.gpu_powered = false;
+-	backend->poweroff_wait_in_progress = false;
+-	KBASE_KTRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, 0);
+-	wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
+-	kbase_gpu_cache_clean_wait_complete(kbdev);
+-	backend->poweroff_wait_in_progress = false;
+-	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+-	wake_up(&kbdev->pm.backend.poweroff_wait);
++		/* Cancel any pending HWC dumps */
++		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
++		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
++		kbdev->hwcnt.backend.triggered = 1;
++		wake_up(&kbdev->hwcnt.backend.wait);
++		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
++	}
++	mutex_unlock(&arb_vm_state->vm_state_lock);
+ 	mutex_unlock(&kbdev->pm.lock);
+ }
++
+ #endif /* CONFIG_MALI_ARBITER_SUPPORT */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
+index e7eef26..efc620f 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2013-2018, 2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2013-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -27,9 +26,6 @@
+ #include <mali_kbase.h>
+ #include <mali_kbase_pm.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
+-#ifdef CONFIG_MALI_NO_MALI
+-#include <backend/gpu/mali_kbase_model_dummy.h>
+-#endif
+ #include <mali_kbase_dummy_job_wa.h>
+ 
+ int kbase_pm_ca_init(struct kbase_device *kbdev)
+@@ -59,6 +55,14 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
+ 
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 
++#if MALI_USE_CSF
++	if (!(core_mask & kbdev->pm.debug_core_mask)) {
++		dev_err(kbdev->dev,
++			"OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
++			core_mask, kbdev->pm.debug_core_mask);
++		goto unlock;
++	}
++#else
+ 	if (!(core_mask & kbdev->pm.debug_core_mask_all)) {
+ 		dev_err(kbdev->dev, "OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
+ 				core_mask, kbdev->pm.debug_core_mask_all);
+@@ -69,6 +73,7 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
+ 		dev_err(kbdev->dev, "Dynamic core scaling not supported as dummy job WA is enabled");
+ 		goto unlock;
+ 	}
++#endif /* MALI_USE_CSF */
+ 
+ 	pm_backend->ca_cores_enabled = core_mask;
+ 
+@@ -80,21 +85,32 @@ unlock:
+ 	dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n",
+ 			pm_backend->ca_cores_enabled);
+ }
++KBASE_EXPORT_TEST_API(kbase_devfreq_set_core_mask);
+ #endif
+ 
+ u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+ {
+-#ifdef CONFIG_MALI_DEVFREQ
+-	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
++#if MALI_USE_CSF
++	u64 debug_core_mask = kbdev->pm.debug_core_mask;
++#else
++	u64 debug_core_mask = kbdev->pm.debug_core_mask_all;
+ #endif
+ 
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
+ #ifdef CONFIG_MALI_DEVFREQ
+-	return pm_backend->ca_cores_enabled & kbdev->pm.debug_core_mask_all;
++	/*
++	 * Although in the init we let the pm_backend->ca_cores_enabled to be
++	 * the max config (it uses the base_gpu_props), at this function we need
++	 * to limit it to be a subgroup of the curr config, otherwise the
++	 * shaders state machine on the PM does not evolve.
++	 */
++	return kbdev->gpu_props.curr_config.shader_present &
++			kbdev->pm.backend.ca_cores_enabled &
++			debug_core_mask;
+ #else
+-	return kbdev->gpu_props.props.raw_props.shader_present &
+-			kbdev->pm.debug_core_mask_all;
++	return kbdev->gpu_props.curr_config.shader_present &
++		debug_core_mask;
+ #endif
+ }
+ 
+@@ -104,8 +120,8 @@ u64 kbase_pm_ca_get_instr_core_mask(struct kbase_device *kbdev)
+ {
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
+-#ifdef CONFIG_MALI_NO_MALI
+-	return (((1ull) << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1);
++#if   MALI_USE_CSF
++	return kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
+ #else
+ 	return kbdev->pm.backend.pm_shaders_core_mask;
+ #endif
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
+index 5423e96..8d169c3 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2011-2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
+index f67ec65..41f3c14 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2017, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
+index 9eef44a..f40b753 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2012-2016, 2018-2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012-2016, 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * "Coarse Demand" power management policy
+  */
+@@ -60,7 +57,11 @@ const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
+ 	coarse_demand_term,			/* term */
+ 	coarse_demand_shaders_needed,		/* shaders_needed */
+ 	coarse_demand_get_core_active,		/* get_core_active */
++	NULL,					/* handle_event */
+ 	KBASE_PM_POLICY_ID_COARSE_DEMAND,	/* id */
++#if MALI_USE_CSF
++	COARSE_ON_DEMAND_PM_SCHED_FLAGS,	/* pm_sched_flags */
++#endif
+ };
+ 
+ KBASE_EXPORT_TEST_API(kbase_pm_coarse_demand_policy_ops);
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
+index 304e5d7..5e3f17e 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2012-2015, 2018, 2020-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * "Coarse Demand" power management policy
+  */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
+index f4bcf3e..1a0572b 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2014-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -29,9 +28,6 @@
+ 
+ #include "mali_kbase_pm_always_on.h"
+ #include "mali_kbase_pm_coarse_demand.h"
+-#if !MALI_CUSTOMER_RELEASE
+-#include "mali_kbase_pm_always_on_demand.h"
+-#endif
+ 
+ /* Forward definition - see mali_kbase.h */
+ struct kbase_device;
+@@ -62,24 +58,9 @@ enum kbase_pm_core_type {
+ 	KBASE_PM_CORE_STACK = STACK_PRESENT_LO
+ };
+ 
+-/**
++/*
+  * enum kbase_l2_core_state - The states used for the L2 cache & tiler power
+  *                            state machine.
+- *
+- * @KBASE_L2_OFF: The L2 cache and tiler are off
+- * @KBASE_L2_PEND_ON: The L2 cache and tiler are powering on
+- * @KBASE_L2_RESTORE_CLOCKS: The GPU clock is restored. Conditionally used.
+- * @KBASE_L2_ON_HWCNT_ENABLE: The L2 cache and tiler are on, and hwcnt is being
+- *                            enabled
+- * @KBASE_L2_ON: The L2 cache and tiler are on, and hwcnt is enabled
+- * @KBASE_L2_ON_HWCNT_DISABLE: The L2 cache and tiler are on, and hwcnt is being
+- *                             disabled
+- * @KBASE_L2_SLOW_DOWN_CLOCKS: The GPU clock is set to appropriate or lowest
+- *                             clock. Conditionally used.
+- * @KBASE_L2_POWER_DOWN: The L2 cache and tiler are about to be powered off
+- * @KBASE_L2_PEND_OFF: The L2 cache and tiler are powering off
+- * @KBASE_L2_RESET_WAIT: The GPU is resetting, L2 cache and tiler power state
+- *                       are unknown
+  */
+ enum kbase_l2_core_state {
+ #define KBASEP_L2_STATE(n) KBASE_L2_ ## n,
+@@ -87,45 +68,19 @@ enum kbase_l2_core_state {
+ #undef KBASEP_L2_STATE
+ };
+ 
+-/**
++#if MALI_USE_CSF
++/*
++ * enum kbase_mcu_state - The states used for the MCU state machine.
++ */
++enum kbase_mcu_state {
++#define KBASEP_MCU_STATE(n) KBASE_MCU_ ## n,
++#include "mali_kbase_pm_mcu_states.h"
++#undef KBASEP_MCU_STATE
++};
++#endif
++
++/*
+  * enum kbase_shader_core_state - The states used for the shaders' state machine.
+- *
+- * @KBASE_SHADERS_OFF_CORESTACK_OFF: The shaders and core stacks are off
+- * @KBASE_SHADERS_OFF_CORESTACK_PEND_ON: The shaders are off, core stacks have
+- *                                       been requested to power on and hwcnt
+- *                                       is being disabled
+- * @KBASE_SHADERS_PEND_ON_CORESTACK_ON: Core stacks are on, shaders have been
+- *                                      requested to power on. Or after doing
+- *                                      partial shader on/off, checking whether
+- *                                      it's the desired state.
+- * @KBASE_SHADERS_ON_CORESTACK_ON: The shaders and core stacks are on, and hwcnt
+- *					already enabled.
+- * @KBASE_SHADERS_ON_CORESTACK_ON_RECHECK: The shaders and core stacks
+- *                                      are on, hwcnt disabled, and checks
+- *                                      to powering down or re-enabling
+- *                                      hwcnt.
+- * @KBASE_SHADERS_WAIT_OFF_CORESTACK_ON: The shaders have been requested to
+- *                                       power off, but they remain on for the
+- *                                       duration of the hysteresis timer
+- * @KBASE_SHADERS_WAIT_GPU_IDLE: The shaders partial poweroff needs to reach
+- *                               a state where jobs on the GPU are finished
+- *                               including jobs currently running and in the
+- *                               GPU queue because of GPU2017-861
+- * @KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON: The hysteresis timer has expired
+- * @KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON: The core stacks are on and the
+- *                                          level 2 cache is being flushed.
+- * @KBASE_SHADERS_READY_OFF_CORESTACK_ON: The core stacks are on and the shaders
+- *                                        are ready to be powered off.
+- * @KBASE_SHADERS_PEND_OFF_CORESTACK_ON: The core stacks are on, and the shaders
+- *                                       have been requested to power off
+- * @KBASE_SHADERS_OFF_CORESTACK_PEND_OFF: The shaders are off, and the core stacks
+- *                                        have been requested to power off
+- * @KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF: Shaders and corestacks are
+- *                                                  off, but the tick timer
+- *                                                  cancellation is still
+- *                                                  pending.
+- * @KBASE_SHADERS_RESET_WAIT: The GPU is resetting, shader and core stack power
+- *                            states are unknown
+  */
+ enum kbase_shader_core_state {
+ #define KBASEP_SHADER_STATE(n) KBASE_SHADERS_ ## n,
+@@ -137,28 +92,40 @@ enum kbase_shader_core_state {
+  * struct kbasep_pm_metrics - Metrics data collected for use by the power
+  *                            management framework.
+  *
+- *  @time_busy: number of ns the GPU was busy executing jobs since the
+- *          @time_period_start timestamp.
+- *  @time_idle: number of ns since time_period_start the GPU was not executing
+- *          jobs since the @time_period_start timestamp.
+- *  @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
+- *           if two CL jobs were active for 400ns, this value would be updated
+- *           with 800.
+- *  @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
+- *           if two GL jobs were active for 400ns, this value would be updated
+- *           with 800.
++ *  @time_busy: the amount of time the GPU was busy executing jobs since the
++ *          @time_period_start timestamp, in units of 256ns. This also includes
++ *          time_in_protm, the time spent in protected mode, since it's assumed
++ *          the GPU was busy 100% during this period.
++ *  @time_idle: the amount of time the GPU was not executing jobs since the
++ *              time_period_start timestamp, measured in units of 256ns.
++ *  @time_in_protm: The amount of time the GPU has spent in protected mode since
++ *                  the time_period_start timestamp, measured in units of 256ns.
++ *  @busy_cl: the amount of time the GPU was busy executing CL jobs. Note that
++ *           if two CL jobs were active for 256ns, this value would be updated
++ *           with 2 (2x256ns).
++ *  @busy_gl: the amount of time the GPU was busy executing GL jobs. Note that
++ *           if two GL jobs were active for 256ns, this value would be updated
++ *           with 2 (2x256ns).
+  */
+ struct kbasep_pm_metrics {
+ 	u32 time_busy;
+ 	u32 time_idle;
++#if MALI_USE_CSF
++	u32 time_in_protm;
++#else
+ 	u32 busy_cl[2];
+ 	u32 busy_gl;
++#endif
+ };
+ 
+ /**
+  * struct kbasep_pm_metrics_state - State required to collect the metrics in
+  *                                  struct kbasep_pm_metrics
+  *  @time_period_start: time at which busy/idle measurements started
++ *  @ipa_control_client: Handle returned on registering DVFS as a
++ *                       kbase_ipa_control client
++ *  @skip_gpu_active_sanity_check: Decide whether to skip GPU_ACTIVE sanity
++ *                                 check in DVFS utilisation calculation
+  *  @gpu_active: true when the GPU is executing jobs. false when
+  *           not. Updated when the job scheduler informs us a job in submitted
+  *           or removed from a GPU slot.
+@@ -170,6 +137,7 @@ struct kbasep_pm_metrics {
+  *  @values: The current values of the power management metrics. The
+  *           kbase_pm_get_dvfs_metrics() function is used to compare these
+  *           current values with the saved values from a previous invocation.
++ *  @initialized: tracks whether metrics_state has been initialized or not.
+  *  @timer: timer to regularly make DVFS decisions based on the power
+  *           management metrics.
+  *  @timer_active: boolean indicating @timer is running
+@@ -178,9 +146,14 @@ struct kbasep_pm_metrics {
+  */
+ struct kbasep_pm_metrics_state {
+ 	ktime_t time_period_start;
++#if MALI_USE_CSF
++	void *ipa_control_client;
++	bool skip_gpu_active_sanity_check;
++#else
+ 	bool gpu_active;
+ 	u32 active_cl_ctx[2];
+ 	u32 active_gl_ctx[3];
++#endif
+ 	spinlock_t lock;
+ 
+ 	void *platform_data;
+@@ -189,6 +162,7 @@ struct kbasep_pm_metrics_state {
+ 	struct kbasep_pm_metrics values;
+ 
+ #ifdef CONFIG_MALI_MIDGARD_DVFS
++	bool initialized;
+ 	struct hrtimer timer;
+ 	bool timer_active;
+ 	struct kbasep_pm_metrics dvfs_last;
+@@ -202,8 +176,12 @@ struct kbasep_pm_metrics_state {
+  * @work: Work item which cancels the timer
+  * @timer: Timer for powering off the shader cores
+  * @configured_interval: Period of GPU poweroff timer
+- * @configured_ticks: User-configured number of ticks to wait after the shader
+- *                    power down request is received before turning off the cores
++ * @default_ticks: User-configured number of ticks to wait after the shader
++ *                 power down request is received before turning off the cores
++ * @configured_ticks: Power-policy configured number of ticks to wait after the
++ *                    shader power down request is received before turning off
++ *                    the cores. For simple power policies, this is equivalent
++ *                    to @default_ticks.
+  * @remaining_ticks: Number of remaining timer ticks until shaders are powered off
+  * @cancel_queued: True if the cancellation work item has been queued. This is
+  *                 required to ensure that it is not queued twice, e.g. after
+@@ -217,6 +195,7 @@ struct kbasep_pm_tick_timer_state {
+ 	struct hrtimer timer;
+ 
+ 	ktime_t configured_interval;
++	unsigned int default_ticks;
+ 	unsigned int configured_ticks;
+ 	unsigned int remaining_ticks;
+ 
+@@ -227,9 +206,6 @@ struct kbasep_pm_tick_timer_state {
+ union kbase_pm_policy_data {
+ 	struct kbasep_pm_policy_always_on always_on;
+ 	struct kbasep_pm_policy_coarse_demand coarse_demand;
+-#if !MALI_CUSTOMER_RELEASE
+-	struct kbasep_pm_policy_always_on_demand always_on_demand;
+-#endif
+ };
+ 
+ /**
+@@ -240,7 +216,8 @@ union kbase_pm_policy_data {
+  *
+  * @pm_current_policy: The policy that is currently actively controlling the
+  *                     power state.
+- * @pm_policy_data:    Private data for current PM policy
++ * @pm_policy_data:    Private data for current PM policy. This is automatically
++ *                     zeroed when a policy change occurs.
+  * @reset_done:        Flag when a reset is complete
+  * @reset_done_wait:   Wait queue to wait for changes to @reset_done
+  * @gpu_cycle_counter_requests: The reference count of active gpu cycle counter
+@@ -254,6 +231,11 @@ union kbase_pm_policy_data {
+  *                     variable should be protected by: both the hwaccess_lock
+  *                     spinlock and the pm.lock mutex for writes; or at least
+  *                     one of either lock for reads.
++ * @gpu_ready:         Indicates whether the GPU is in a state in which it is
++ *                     safe to perform PM changes. When false, the PM state
++ *                     machine needs to wait before making changes to the GPU
++ *                     power policy, DevFreq or core_mask, so as to avoid these
++ *                     changing while implicit GPU resets are ongoing.
+  * @pm_shaders_core_mask: Shader PM state synchronised shaders core mask. It
+  *                     holds the cores enabled in a hardware counters dump,
+  *                     and may differ from @shaders_avail when under different
+@@ -294,6 +276,8 @@ union kbase_pm_policy_data {
+  * @callback_soft_reset: Optional callback to software reset the GPU. See
+  *                       &struct kbase_pm_callback_conf
+  * @ca_cores_enabled: Cores that are currently available
++ * @mcu_state: The current state of the micro-control unit, only applicable
++ *             to GPUs that have such a component
+  * @l2_state:     The current state of the L2 cache state machine. See
+  *                &enum kbase_l2_core_state
+  * @l2_desired:   True if the L2 cache should be powered on by the L2 cache state
+@@ -303,10 +287,10 @@ union kbase_pm_policy_data {
+  * @shaders_avail: This is updated by the state machine when it is in a state
+  *                 where it can write to the SHADER_PWRON or PWROFF registers
+  *                 to have the same set of available cores as specified by
+- *                 @shaders_desired_mask. So it would eventually have the same
+- *                 value as @shaders_desired_mask and would precisely indicate
+- *                 the cores that are currently available. This is internal to
+- *                 shader state machine and should *not* be modified elsewhere.
++ *                 @shaders_desired_mask. So would precisely indicate the cores
++ *                 that are currently available. This is internal to shader
++ *                 state machine of JM GPUs and should *not* be modified
++ *                 elsewhere.
+  * @shaders_desired_mask: This is updated by the state machine when it is in
+  *                        a state where it can handle changes to the core
+  *                        availability (either by DVFS or sysfs). This is
+@@ -318,6 +302,16 @@ union kbase_pm_policy_data {
+  *                   cores may be different, but there should be transitions in
+  *                   progress that will eventually achieve this state (assuming
+  *                   that the policy doesn't change its mind in the mean time).
++ * @mcu_desired: True if the micro-control unit should be powered on
++ * @policy_change_clamp_state_to_off: Signaling the backend is in PM policy
++ *                change transition, needs the mcu/L2 to be brought back to the
++ *                off state and remain in that state until the flag is cleared.
++ * @csf_pm_sched_flags: CSF Dynamic PM control flags in accordance to the
++ *                current active PM policy. This field is updated whenever a
++ *                new policy is activated.
++ * @policy_change_lock: Used to serialize the policy change calls. In CSF case,
++ *                      the change of policy may involve the scheduler to
++ *                      suspend running CSGs and then reconfigure the MCU.
+  * @in_reset: True if a GPU is resetting and normal power manager operation is
+  *            suspended
+  * @partial_shaderoff: True if we want to partial power off shader cores,
+@@ -373,6 +367,7 @@ struct kbase_pm_backend_data {
+ 	wait_queue_head_t gpu_in_desired_state_wait;
+ 
+ 	bool gpu_powered;
++	bool gpu_ready;
+ 
+ 	u64 pm_shaders_core_mask;
+ 
+@@ -406,10 +401,19 @@ struct kbase_pm_backend_data {
+ 
+ 	u64 ca_cores_enabled;
+ 
++#if MALI_USE_CSF
++	enum kbase_mcu_state mcu_state;
++#endif
+ 	enum kbase_l2_core_state l2_state;
+ 	enum kbase_shader_core_state shaders_state;
+ 	u64 shaders_avail;
+ 	u64 shaders_desired_mask;
++#if MALI_USE_CSF
++	bool mcu_desired;
++	bool policy_change_clamp_state_to_off;
++	unsigned int csf_pm_sched_flags;
++	struct mutex policy_change_lock;
++#endif
+ 	bool l2_desired;
+ 	bool l2_always_on;
+ 	bool shaders_desired;
+@@ -433,6 +437,23 @@ struct kbase_pm_backend_data {
+ 	struct work_struct gpu_clock_control_work;
+ };
+ 
++#if MALI_USE_CSF
++/* CSF PM flag, signaling that the MCU CORE should be kept on */
++#define  CSF_DYNAMIC_PM_CORE_KEEP_ON (1 << 0)
++/* CSF PM flag, signaling no scheduler suspension on idle groups */
++#define CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE (1 << 1)
++/* CSF PM flag, signaling no scheduler suspension on no runnable groups */
++#define CSF_DYNAMIC_PM_SCHED_NO_SUSPEND (1 << 2)
++
++/* The following flags corresponds to existing defined PM policies */
++#define ALWAYS_ON_PM_SCHED_FLAGS (CSF_DYNAMIC_PM_CORE_KEEP_ON | \
++				  CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE | \
++				  CSF_DYNAMIC_PM_SCHED_NO_SUSPEND)
++#define COARSE_ON_DEMAND_PM_SCHED_FLAGS (0)
++#if !MALI_CUSTOMER_RELEASE
++#define ALWAYS_ON_DEMAND_PM_SCHED_FLAGS (CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE)
++#endif
++#endif
+ 
+ /* List of policy IDs */
+ enum kbase_pm_policy_id {
+@@ -443,6 +464,33 @@ enum kbase_pm_policy_id {
+ 	KBASE_PM_POLICY_ID_ALWAYS_ON
+ };
+ 
++/**
++ * enum kbase_pm_policy_event - PM Policy event ID
++ */
++enum kbase_pm_policy_event {
++	/**
++	 * @KBASE_PM_POLICY_EVENT_IDLE: Indicates that the GPU power state
++	 * model has determined that the GPU has gone idle.
++	 */
++	KBASE_PM_POLICY_EVENT_IDLE,
++	/**
++	 * @KBASE_PM_POLICY_EVENT_POWER_ON: Indicates that the GPU state model
++	 * is preparing to power on the GPU.
++	 */
++	KBASE_PM_POLICY_EVENT_POWER_ON,
++	/**
++	 * @KBASE_PM_POLICY_EVENT_TIMER_HIT: Indicates that the GPU became
++	 * active while the Shader Tick Timer was holding the GPU in a powered
++	 * on state.
++	 */
++	KBASE_PM_POLICY_EVENT_TIMER_HIT,
++	/**
++	 * @KBASE_PM_POLICY_EVENT_TIMER_MISS: Indicates that the GPU did not
++	 * become active before the Shader Tick Timer timeout occurred.
++	 */
++	KBASE_PM_POLICY_EVENT_TIMER_MISS,
++};
++
+ /**
+  * struct kbase_pm_policy - Power policy structure.
+  *
+@@ -455,15 +503,22 @@ enum kbase_pm_policy_id {
+  * @shaders_needed:     Function called to find out if shader cores are needed
+  * @get_core_active:    Function called to get the current overall GPU power
+  *                      state
++ * @handle_event:       Function called when a PM policy event occurs. Should be
++ *                      set to NULL if the power policy doesn't require any
++ *                      event notifications.
+  * @id:                 Field indicating an ID for this policy. This is not
+  *                      necessarily the same as its index in the list returned
+  *                      by kbase_pm_list_policies().
+  *                      It is used purely for debugging.
++ * @pm_sched_flags: Policy associated with CSF PM scheduling operational flags.
++ *                  Pre-defined required flags exist for each of the
++ *                  ARM released policies, such as 'always_on', 'coarse_demand'
++ *                  and etc.
+  */
+ struct kbase_pm_policy {
+ 	char *name;
+ 
+-	/**
++	/*
+ 	 * Function called when the policy is selected
+ 	 *
+ 	 * This should initialize the kbdev->pm.pm_policy_data structure. It
+@@ -477,7 +532,7 @@ struct kbase_pm_policy {
+ 	 */
+ 	void (*init)(struct kbase_device *kbdev);
+ 
+-	/**
++	/*
+ 	 * Function called when the policy is unselected.
+ 	 *
+ 	 * @kbdev: The kbase device structure for the device (must be a
+@@ -485,7 +540,7 @@ struct kbase_pm_policy {
+ 	 */
+ 	void (*term)(struct kbase_device *kbdev);
+ 
+-	/**
++	/*
+ 	 * Function called to find out if shader cores are needed
+ 	 *
+ 	 * This needs to at least satisfy kbdev->pm.backend.shaders_desired,
+@@ -498,7 +553,7 @@ struct kbase_pm_policy {
+ 	 */
+ 	bool (*shaders_needed)(struct kbase_device *kbdev);
+ 
+-	/**
++	/*
+ 	 * Function called to get the current overall GPU power state
+ 	 *
+ 	 * This function must meet or exceed the requirements for power
+@@ -511,7 +566,26 @@ struct kbase_pm_policy {
+ 	 */
+ 	bool (*get_core_active)(struct kbase_device *kbdev);
+ 
++	/**
++	 * Function called when a power event occurs
++	 *
++	 * @kbdev: The kbase device structure for the device (must be a
++	 *         valid pointer)
++	 * @event: The id of the power event that has occurred
++	 */
++	void (*handle_event)(struct kbase_device *kbdev,
++			     enum kbase_pm_policy_event event);
++
+ 	enum kbase_pm_policy_id id;
++
++#if MALI_USE_CSF
++	/* Policy associated with CSF PM scheduling operational flags.
++	 * There are pre-defined required flags exist for each of the
++	 * ARM released policies, such as 'always_on', 'coarse_demand'
++	 * and etc.
++	 */
++	unsigned int pm_sched_flags;
++#endif
+ };
+ 
+ #endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
+index 6b821f7..bcada93 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
+@@ -1,12 +1,12 @@
+-// SPDX-License-Identifier: GPL-2.0
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -17,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -32,12 +30,18 @@
+ #include <mali_kbase_pm.h>
+ #include <mali_kbase_config_defaults.h>
+ #include <mali_kbase_smc.h>
++
++#if MALI_USE_CSF
++#include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
++#else
+ #include <mali_kbase_hwaccess_jm.h>
++#endif /* !MALI_USE_CSF */
++
+ #include <mali_kbase_reset_gpu.h>
+ #include <mali_kbase_ctx_sched.h>
+ #include <mali_kbase_hwcnt_context.h>
+ #include <backend/gpu/mali_kbase_cache_policy_backend.h>
+-#include <backend/gpu/mali_kbase_device_internal.h>
++#include <device/mali_kbase_device.h>
+ #include <backend/gpu/mali_kbase_irq_internal.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
+ #include <backend/gpu/mali_kbase_l2_mmu_config.h>
+@@ -45,6 +49,9 @@
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+ #include <arbiter/mali_kbase_arbiter_pm.h>
+ #endif /* CONFIG_MALI_ARBITER_SUPPORT */
++#if MALI_USE_CSF
++#include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
++#endif
+ 
+ #include <linux/of.h>
+ 
+@@ -89,6 +96,28 @@ static u64 kbase_pm_get_state(
+ 		enum kbase_pm_core_type core_type,
+ 		enum kbasep_pm_action action);
+ 
++#if MALI_USE_CSF
++bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev)
++{
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	if (unlikely(!kbdev->csf.firmware_inited))
++		return false;
++
++	if (kbdev->csf.scheduler.pm_active_count)
++		return true;
++
++	/* MCU is supposed to be ON, only when scheduler.pm_active_count is
++	 * non zero. But for always_on policy, the MCU needs to be kept on,
++	 * unless policy changing transition needs it off.
++	 */
++
++	return (kbdev->pm.backend.mcu_desired &&
++		kbase_pm_no_mcu_core_pwroff(kbdev) &&
++		!kbdev->pm.backend.policy_change_clamp_state_to_off);
++}
++#endif
++
+ bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
+ {
+ 	if (kbdev->pm.backend.protected_entry_transition_override)
+@@ -102,6 +131,11 @@ bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
+ 			!kbdev->pm.backend.shaders_desired)
+ 		return false;
+ 
++#if MALI_USE_CSF
++	if (kbdev->pm.backend.policy_change_clamp_state_to_off)
++		return false;
++#endif
++
+ 	return kbdev->pm.backend.l2_desired;
+ }
+ 
+@@ -210,7 +244,7 @@ static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
+ 	return (u32)core_type + (u32)action;
+ }
+ 
+-#ifdef CONFIG_ARM64
++#if IS_ENABLED(CONFIG_ARM64)
+ static void mali_cci_flush_l2(struct kbase_device *kbdev)
+ {
+ 	const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
+@@ -233,7 +267,8 @@ static void mali_cci_flush_l2(struct kbase_device *kbdev)
+ 		GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+ 
+ 	/* Wait for cache flush to complete before continuing, exit on
+-	 * gpu resets or loop expiry. */
++	 * gpu resets or loop expiry.
++	 */
+ 	while (((raw & mask) == 0) && --loops) {
+ 		raw = kbase_reg_read(kbdev,
+ 					GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
+@@ -372,9 +407,9 @@ u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
+ 
+ 	switch (type) {
+ 	case KBASE_PM_CORE_L2:
+-		return kbdev->gpu_props.props.raw_props.l2_present;
++		return kbdev->gpu_props.curr_config.l2_present;
+ 	case KBASE_PM_CORE_SHADER:
+-		return kbdev->gpu_props.props.raw_props.shader_present;
++		return kbdev->gpu_props.curr_config.shader_present;
+ 	case KBASE_PM_CORE_TILER:
+ 		return kbdev->gpu_props.props.raw_props.tiler_present;
+ 	case KBASE_PM_CORE_STACK:
+@@ -468,14 +503,10 @@ static void kbase_pm_trigger_hwcnt_disable(struct kbase_device *kbdev)
+ 	 */
+ 	if (kbase_hwcnt_context_disable_atomic(kbdev->hwcnt_gpu_ctx)) {
+ 		backend->hwcnt_disabled = true;
++
+ 	} else {
+-#if KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE
+-		queue_work(system_wq,
+-			&backend->hwcnt_disable_work);
+-#else
+-		queue_work(system_highpri_wq,
+-			&backend->hwcnt_disable_work);
+-#endif
++		kbase_hwcnt_context_queue_work(kbdev->hwcnt_gpu_ctx,
++					       &backend->hwcnt_disable_work);
+ 	}
+ }
+ 
+@@ -493,7 +524,8 @@ static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
+ 	 * Skip if size and hash are not given explicitly,
+ 	 * which means default values are used.
+ 	 */
+-	if ((kbdev->l2_size_override == 0) && (kbdev->l2_hash_override == 0))
++	if ((kbdev->l2_size_override == 0) && (kbdev->l2_hash_override == 0) &&
++	    (!kbdev->l2_hash_values_override))
+ 		return;
+ 
+ 	val = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_CONFIG));
+@@ -504,13 +536,25 @@ static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
+ 	}
+ 
+ 	if (kbdev->l2_hash_override) {
++		WARN_ON(kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_ASN_HASH));
+ 		val &= ~L2_CONFIG_HASH_MASK;
+ 		val |= (kbdev->l2_hash_override << L2_CONFIG_HASH_SHIFT);
++	} else if (kbdev->l2_hash_values_override) {
++		int i;
++
++		WARN_ON(!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_ASN_HASH));
++		val &= ~L2_CONFIG_ASN_HASH_ENABLE_MASK;
++		val |= (0x1 << L2_CONFIG_ASN_HASH_ENABLE_SHIFT);
++
++		for (i = 0; i < ASN_HASH_COUNT; i++) {
++			dev_dbg(kbdev->dev, "Program 0x%x to ASN_HASH[%d]\n",
++				kbdev->l2_hash_values[i], i);
++			kbase_reg_write(kbdev, GPU_CONTROL_REG(ASN_HASH(i)),
++					kbdev->l2_hash_values[i]);
++		}
+ 	}
+ 
+ 	dev_dbg(kbdev->dev, "Program 0x%x to L2_CONFIG\n", val);
+-
+-	/* Write L2_CONFIG to override */
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_CONFIG), val);
+ }
+ 
+@@ -523,6 +567,278 @@ static void kbase_pm_control_gpu_clock(struct kbase_device *kbdev)
+ 	queue_work(system_wq, &backend->gpu_clock_control_work);
+ }
+ 
++#if MALI_USE_CSF
++static const char *kbase_mcu_state_to_string(enum kbase_mcu_state state)
++{
++	const char *const strings[] = {
++#define KBASEP_MCU_STATE(n) #n,
++#include "mali_kbase_pm_mcu_states.h"
++#undef KBASEP_MCU_STATE
++	};
++	if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
++		return "Bad MCU state";
++	else
++		return strings[state];
++}
++
++static inline bool kbase_pm_handle_mcu_core_attr_update(struct kbase_device *kbdev)
++{
++	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
++	bool timer_update;
++	bool core_mask_update;
++
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	WARN_ON(backend->mcu_state != KBASE_MCU_ON);
++
++	/* This function is only for cases where the MCU managing Cores, if
++	 * the firmware mode is with host control, do nothing here.
++	 */
++	if (unlikely(kbdev->csf.firmware_hctl_core_pwr))
++		return false;
++
++	core_mask_update =
++		backend->shaders_avail != backend->shaders_desired_mask;
++
++	timer_update = kbdev->csf.mcu_core_pwroff_dur_count !=
++			kbdev->csf.mcu_core_pwroff_reg_shadow;
++
++	if (core_mask_update || timer_update)
++		kbase_csf_firmware_update_core_attr(kbdev, timer_update,
++			core_mask_update, backend->shaders_desired_mask);
++
++	return (core_mask_update || timer_update);
++}
++
++static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
++{
++	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
++	enum kbase_mcu_state prev_state;
++
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	/*
++	 * Initial load of firmare should have been done to
++	 * exercise the MCU state machine.
++	 */
++	if (unlikely(!kbdev->csf.firmware_inited)) {
++		WARN_ON(backend->mcu_state != KBASE_MCU_OFF);
++		return -EIO;
++	}
++
++	do {
++		u64 shaders_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
++		u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
++
++		/* mask off ready from trans in case transitions finished
++		 * between the register reads
++		 */
++		shaders_trans &= ~shaders_ready;
++
++		prev_state = backend->mcu_state;
++
++		switch (backend->mcu_state) {
++		case KBASE_MCU_OFF:
++			if (kbase_pm_is_mcu_desired(kbdev) &&
++			    !backend->policy_change_clamp_state_to_off &&
++			    backend->l2_state == KBASE_L2_ON) {
++				kbase_csf_firmware_trigger_reload(kbdev);
++				backend->mcu_state = KBASE_MCU_PEND_ON_RELOAD;
++			}
++			break;
++
++		case KBASE_MCU_PEND_ON_RELOAD:
++			if (kbdev->csf.firmware_reloaded) {
++				backend->shaders_desired_mask =
++					kbase_pm_ca_get_core_mask(kbdev);
++				kbase_csf_firmware_global_reinit(kbdev,
++					backend->shaders_desired_mask);
++				backend->mcu_state =
++					KBASE_MCU_ON_GLB_REINIT_PEND;
++			}
++			break;
++
++		case KBASE_MCU_ON_GLB_REINIT_PEND:
++			if (kbase_csf_firmware_global_reinit_complete(kbdev)) {
++				backend->shaders_avail =
++						backend->shaders_desired_mask;
++				backend->pm_shaders_core_mask = 0;
++				if (kbdev->csf.firmware_hctl_core_pwr) {
++					kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
++						backend->shaders_avail, ACTION_PWRON);
++					backend->mcu_state =
++						KBASE_MCU_HCTL_SHADERS_PEND_ON;
++				} else
++					backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
++			}
++			break;
++
++		case KBASE_MCU_HCTL_SHADERS_PEND_ON:
++			if (!shaders_trans &&
++			    shaders_ready == backend->shaders_avail) {
++				/* Cores now stable, notify MCU the stable mask */
++				kbase_csf_firmware_update_core_attr(kbdev,
++						false, true, shaders_ready);
++
++				backend->pm_shaders_core_mask = shaders_ready;
++				backend->mcu_state =
++					KBASE_MCU_HCTL_CORES_NOTIFY_PEND;
++			}
++			break;
++
++		case KBASE_MCU_HCTL_CORES_NOTIFY_PEND:
++			/* Wait for the acknowledgement */
++			if (kbase_csf_firmware_core_attr_updated(kbdev))
++				backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
++			break;
++
++		case KBASE_MCU_ON_HWCNT_ENABLE:
++			backend->hwcnt_desired = true;
++			if (backend->hwcnt_disabled) {
++				unsigned long flags;
++
++				kbase_csf_scheduler_spin_lock(kbdev, &flags);
++				kbase_hwcnt_context_enable(
++					kbdev->hwcnt_gpu_ctx);
++				kbase_csf_scheduler_spin_unlock(kbdev, flags);
++				backend->hwcnt_disabled = false;
++			}
++			backend->mcu_state = KBASE_MCU_ON;
++			break;
++
++		case KBASE_MCU_ON:
++			backend->shaders_desired_mask = kbase_pm_ca_get_core_mask(kbdev);
++
++			if (!kbase_pm_is_mcu_desired(kbdev))
++				backend->mcu_state = KBASE_MCU_ON_HWCNT_DISABLE;
++			else if (kbdev->csf.firmware_hctl_core_pwr) {
++				/* Host control add additional Cores to be active */
++				if (backend->shaders_desired_mask & ~shaders_ready) {
++					backend->hwcnt_desired = false;
++					if (!backend->hwcnt_disabled)
++						kbase_pm_trigger_hwcnt_disable(kbdev);
++					backend->mcu_state =
++						KBASE_MCU_HCTL_MCU_ON_RECHECK;
++				}
++			} else if (kbase_pm_handle_mcu_core_attr_update(kbdev))
++				kbdev->pm.backend.mcu_state =
++					KBASE_MCU_ON_CORE_ATTR_UPDATE_PEND;
++			break;
++
++		case KBASE_MCU_HCTL_MCU_ON_RECHECK:
++			backend->shaders_desired_mask = kbase_pm_ca_get_core_mask(kbdev);
++
++			if (!backend->hwcnt_disabled) {
++				/* Wait for being disabled */
++				;
++			} else if (!kbase_pm_is_mcu_desired(kbdev)) {
++				/* Converging to MCU powering down flow */
++				backend->mcu_state = KBASE_MCU_ON_HWCNT_DISABLE;
++			} else if (backend->shaders_desired_mask & ~shaders_ready) {
++				/* set cores ready but not available to
++				 * meet SHADERS_PEND_ON check pass
++				 */
++				backend->shaders_avail =
++					(backend->shaders_desired_mask | shaders_ready);
++
++				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
++						backend->shaders_avail & ~shaders_ready,
++						ACTION_PWRON);
++				backend->mcu_state =
++					KBASE_MCU_HCTL_SHADERS_PEND_ON;
++			} else {
++				backend->mcu_state =
++					KBASE_MCU_HCTL_SHADERS_PEND_ON;
++			}
++			break;
++
++		case KBASE_MCU_ON_CORE_ATTR_UPDATE_PEND:
++			if (kbase_csf_firmware_core_attr_updated(kbdev)) {
++				backend->shaders_avail =
++					backend->shaders_desired_mask;
++				backend->mcu_state = KBASE_MCU_ON;
++			}
++			break;
++
++		case KBASE_MCU_ON_HWCNT_DISABLE:
++			if (kbase_pm_is_mcu_desired(kbdev)) {
++				backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
++				break;
++			}
++
++			backend->hwcnt_desired = false;
++			if (!backend->hwcnt_disabled)
++				kbase_pm_trigger_hwcnt_disable(kbdev);
++
++			if (backend->hwcnt_disabled)
++				backend->mcu_state = KBASE_MCU_ON_HALT;
++			break;
++
++		case KBASE_MCU_ON_HALT:
++			if (!kbase_pm_is_mcu_desired(kbdev)) {
++				kbase_csf_firmware_trigger_mcu_halt(kbdev);
++				backend->mcu_state = KBASE_MCU_ON_PEND_HALT;
++			} else
++				backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
++			break;
++
++		case KBASE_MCU_ON_PEND_HALT:
++			if (kbase_csf_firmware_mcu_halted(kbdev)) {
++				if (kbdev->csf.firmware_hctl_core_pwr)
++					backend->mcu_state =
++						KBASE_MCU_HCTL_SHADERS_READY_OFF;
++				else
++					backend->mcu_state = KBASE_MCU_POWER_DOWN;
++			}
++			break;
++
++		case KBASE_MCU_HCTL_SHADERS_READY_OFF:
++			kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
++					shaders_ready, ACTION_PWROFF);
++			backend->mcu_state =
++				KBASE_MCU_HCTL_SHADERS_PEND_OFF;
++			break;
++
++		case KBASE_MCU_HCTL_SHADERS_PEND_OFF:
++			if (!shaders_trans && !shaders_ready) {
++				backend->pm_shaders_core_mask = 0;
++				backend->mcu_state = KBASE_MCU_POWER_DOWN;
++			}
++			break;
++
++		case KBASE_MCU_POWER_DOWN:
++			kbase_csf_firmware_disable_mcu(kbdev);
++			backend->mcu_state = KBASE_MCU_PEND_OFF;
++			break;
++
++		case KBASE_MCU_PEND_OFF:
++			/* wait synchronously for the MCU to get disabled */
++			kbase_csf_firmware_disable_mcu_wait(kbdev);
++			backend->mcu_state = KBASE_MCU_OFF;
++			break;
++
++		case KBASE_MCU_RESET_WAIT:
++			/* Reset complete  */
++			if (!backend->in_reset)
++				backend->mcu_state = KBASE_MCU_OFF;
++			break;
++
++		default:
++			WARN(1, "Invalid state in mcu_state: %d",
++			     backend->mcu_state);
++		}
++
++		if (backend->mcu_state != prev_state)
++			dev_dbg(kbdev->dev, "MCU state transition: %s to %s\n",
++				kbase_mcu_state_to_string(prev_state),
++				kbase_mcu_state_to_string(backend->mcu_state));
++
++	} while (backend->mcu_state != prev_state);
++
++	return 0;
++}
++#endif
++
+ static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
+ {
+ 	const char *const strings[] = {
+@@ -539,8 +855,10 @@ static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
+ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ {
+ 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+-	u64 l2_present = kbdev->gpu_props.props.raw_props.l2_present;
++	u64 l2_present = kbdev->gpu_props.curr_config.l2_present;
++#if !MALI_USE_CSF
+ 	u64 tiler_present = kbdev->gpu_props.props.raw_props.tiler_present;
++#endif
+ 	enum kbase_l2_core_state prev_state;
+ 
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+@@ -551,35 +869,49 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 				KBASE_PM_CORE_L2);
+ 		u64 l2_ready = kbase_pm_get_ready_cores(kbdev,
+ 				KBASE_PM_CORE_L2);
++
++#if !MALI_USE_CSF
+ 		u64 tiler_trans = kbase_pm_get_trans_cores(kbdev,
+ 				KBASE_PM_CORE_TILER);
+ 		u64 tiler_ready = kbase_pm_get_ready_cores(kbdev,
+ 				KBASE_PM_CORE_TILER);
++#endif
+ 
+ 		/*
+ 		 * kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
+ 		 * are vulnerable to corruption if gpu is lost
+ 		 */
+-		if (kbase_is_gpu_lost(kbdev))
+-			return -EIO;
++		if (kbase_is_gpu_removed(kbdev)
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++				|| kbase_pm_is_gpu_lost(kbdev)) {
++#else
++				) {
++#endif
++			backend->shaders_state =
++				KBASE_SHADERS_OFF_CORESTACK_OFF;
++			backend->l2_state = KBASE_L2_OFF;
++			dev_dbg(kbdev->dev, "GPU lost has occurred - L2 off\n");
++			break;
++		}
+ 
+ 		/* mask off ready from trans in case transitions finished
+ 		 * between the register reads
+ 		 */
+ 		l2_trans &= ~l2_ready;
++#if !MALI_USE_CSF
+ 		tiler_trans &= ~tiler_ready;
+-
++#endif
+ 		prev_state = backend->l2_state;
+ 
+ 		switch (backend->l2_state) {
+ 		case KBASE_L2_OFF:
+ 			if (kbase_pm_is_l2_desired(kbdev)) {
+ 				/*
+-				 * Set the desired config for L2 before powering
+-				 * it on
++				 * Set the desired config for L2 before
++				 * powering it on
+ 				 */
+ 				kbase_pm_l2_config_override(kbdev);
+-
++#if !MALI_USE_CSF
+ 				/* L2 is required, power on.  Powering on the
+ 				 * tiler will also power the first L2 cache.
+ 				 */
+@@ -593,14 +925,30 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 					kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
+ 							l2_present & ~1,
+ 							ACTION_PWRON);
++#else
++				/* With CSF firmware, Host driver doesn't need to
++				 * handle power management with both shader and tiler cores.
++				 * The CSF firmware will power up the cores appropriately.
++				 * So only power the l2 cache explicitly.
++				 */
++				kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
++						l2_present, ACTION_PWRON);
++#endif
+ 				backend->l2_state = KBASE_L2_PEND_ON;
+ 			}
+ 			break;
+ 
+ 		case KBASE_L2_PEND_ON:
++#if !MALI_USE_CSF
+ 			if (!l2_trans && l2_ready == l2_present && !tiler_trans
+ 					&& tiler_ready == tiler_present) {
+-				KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL, tiler_ready);
++				KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL,
++						tiler_ready);
++#else
++			if (!l2_trans && l2_ready == l2_present) {
++				KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_L2, NULL,
++						l2_ready);
++#endif
+ 				/*
+ 				 * Ensure snoops are enabled after L2 is powered
+ 				 * up. Note that kbase keeps track of the snoop
+@@ -658,22 +1006,30 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 			break;
+ 
+ 		case KBASE_L2_ON_HWCNT_ENABLE:
++#if !MALI_USE_CSF
+ 			backend->hwcnt_desired = true;
+ 			if (backend->hwcnt_disabled) {
+ 				kbase_hwcnt_context_enable(
+ 					kbdev->hwcnt_gpu_ctx);
+ 				backend->hwcnt_disabled = false;
+ 			}
++#endif
+ 			backend->l2_state = KBASE_L2_ON;
+ 			break;
+ 
+ 		case KBASE_L2_ON:
+ 			if (!kbase_pm_is_l2_desired(kbdev)) {
++#if !MALI_USE_CSF
+ 				/* Do not power off L2 until the shaders and
+ 				 * core stacks are off.
+ 				 */
+ 				if (backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+ 					break;
++#else
++				/* Do not power off L2 until the MCU has been stopped */
++				if (backend->mcu_state != KBASE_MCU_OFF)
++					break;
++#endif
+ 
+ 				/* We need to make sure hardware counters are
+ 				 * disabled before powering down the L2, to
+@@ -690,6 +1046,7 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 			break;
+ 
+ 		case KBASE_L2_ON_HWCNT_DISABLE:
++#if !MALI_USE_CSF
+ 			/* If the L2 became desired while we were waiting on the
+ 			 * worker to do the actual hwcnt disable (which might
+ 			 * happen if some work was submitted immediately after
+@@ -719,6 +1076,7 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 			if (!backend->hwcnt_disabled) {
+ 				kbase_pm_trigger_hwcnt_disable(kbdev);
+ 			}
++#endif
+ 
+ 			if (backend->hwcnt_disabled) {
+ 				if (kbdev->pm.backend.gpu_clock_slow_down_wa)
+@@ -769,9 +1127,11 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
+ 				 */
+ 				kbase_gpu_start_cache_clean_nolock(
+ 						kbdev);
+-
++#if !MALI_USE_CSF
+ 			KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL, 0u);
+-
++#else
++			KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_L2, NULL, 0u);
++#endif
+ 			backend->l2_state = KBASE_L2_PEND_OFF;
+ 			break;
+ 
+@@ -877,6 +1237,7 @@ static void shader_poweroff_timer_queue_cancel(struct kbase_device *kbdev)
+ 	}
+ }
+ 
++#if !MALI_USE_CSF
+ static const char *kbase_shader_core_state_to_string(
+ 	enum kbase_shader_core_state state)
+ {
+@@ -898,7 +1259,6 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 			&kbdev->pm.backend.shader_tick_timer;
+ 	enum kbase_shader_core_state prev_state;
+ 	u64 stacks_avail = 0;
+-	int err = 0;
+ 
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
+@@ -924,8 +1284,15 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 		 * kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
+ 		 * are vulnerable to corruption if gpu is lost
+ 		 */
+-		if (kbase_is_gpu_lost(kbdev)) {
+-			err = -EIO;
++		if (kbase_is_gpu_removed(kbdev)
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++				|| kbase_pm_is_gpu_lost(kbdev)) {
++#else
++				) {
++#endif
++			backend->shaders_state =
++				KBASE_SHADERS_OFF_CORESTACK_OFF;
++			dev_dbg(kbdev->dev, "GPU lost has occurred - shaders off\n");
+ 			break;
+ 		}
+ 
+@@ -976,6 +1343,12 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 				kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
+ 						backend->shaders_avail, ACTION_PWRON);
+ 
++				if (backend->pm_current_policy &&
++				    backend->pm_current_policy->handle_event)
++					backend->pm_current_policy->handle_event(
++						kbdev,
++						KBASE_PM_POLICY_EVENT_POWER_ON);
++
+ 				backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
+ 			}
+ 			break;
+@@ -986,8 +1359,18 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 				backend->pm_shaders_core_mask = shaders_ready;
+ 				backend->hwcnt_desired = true;
+ 				if (backend->hwcnt_disabled) {
++#if MALI_USE_CSF
++					unsigned long flags;
++
++					kbase_csf_scheduler_spin_lock(kbdev,
++								      &flags);
++#endif
+ 					kbase_hwcnt_context_enable(
+ 						kbdev->hwcnt_gpu_ctx);
++#if MALI_USE_CSF
++					kbase_csf_scheduler_spin_unlock(kbdev,
++									flags);
++#endif
+ 					backend->hwcnt_disabled = false;
+ 				}
+ 
+@@ -1018,6 +1401,12 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 				/* Wait for being disabled */
+ 				;
+ 			} else if (!backend->shaders_desired) {
++				if (backend->pm_current_policy &&
++				    backend->pm_current_policy->handle_event)
++					backend->pm_current_policy->handle_event(
++						kbdev,
++						KBASE_PM_POLICY_EVENT_IDLE);
++
+ 				if (kbdev->pm.backend.protected_transition_override ||
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+ 						kbase_pm_is_suspending(kbdev) ||
+@@ -1078,9 +1467,21 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 			}
+ 
+ 			if (backend->shaders_desired) {
++				if (backend->pm_current_policy &&
++				    backend->pm_current_policy->handle_event)
++					backend->pm_current_policy->handle_event(
++						kbdev,
++						KBASE_PM_POLICY_EVENT_TIMER_HIT);
++
+ 				stt->remaining_ticks = 0;
+ 				backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON_RECHECK;
+ 			} else if (stt->remaining_ticks == 0) {
++				if (backend->pm_current_policy &&
++				    backend->pm_current_policy->handle_event)
++					backend->pm_current_policy->handle_event(
++						kbdev,
++						KBASE_PM_POLICY_EVENT_TIMER_MISS);
++
+ 				backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+ 			} else if (kbase_pm_is_suspending(kbdev) ||
+@@ -1167,8 +1568,18 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 				backend->pm_shaders_core_mask = 0;
+ 				backend->hwcnt_desired = true;
+ 				if (backend->hwcnt_disabled) {
++#if MALI_USE_CSF
++					unsigned long flags;
++
++					kbase_csf_scheduler_spin_lock(kbdev,
++								      &flags);
++#endif
+ 					kbase_hwcnt_context_enable(
+ 						kbdev->hwcnt_gpu_ctx);
++#if MALI_USE_CSF
++					kbase_csf_scheduler_spin_unlock(kbdev,
++									flags);
++#endif
+ 					backend->hwcnt_disabled = false;
+ 				}
+ 				backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
+@@ -1195,8 +1606,9 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
+ 
+ 	} while (backend->shaders_state != prev_state);
+ 
+-	return err;
++	return 0;
+ }
++#endif
+ 
+ static bool kbase_pm_is_in_desired_state_nolock(struct kbase_device *kbdev)
+ {
+@@ -1211,12 +1623,21 @@ static bool kbase_pm_is_in_desired_state_nolock(struct kbase_device *kbdev)
+ 			kbdev->pm.backend.l2_state != KBASE_L2_OFF)
+ 		in_desired_state = false;
+ 
++#if !MALI_USE_CSF
+ 	if (kbdev->pm.backend.shaders_desired &&
+ 			kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON)
+ 		in_desired_state = false;
+ 	else if (!kbdev->pm.backend.shaders_desired &&
+ 			kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
+ 		in_desired_state = false;
++#else
++	if (kbase_pm_is_mcu_desired(kbdev) &&
++	    kbdev->pm.backend.mcu_state != KBASE_MCU_ON)
++		in_desired_state = false;
++	else if (!kbase_pm_is_mcu_desired(kbdev) &&
++		 kbdev->pm.backend.mcu_state != KBASE_MCU_OFF)
++		in_desired_state = false;
++#endif
+ 
+ 	return in_desired_state;
+ }
+@@ -1280,17 +1701,22 @@ static void kbase_pm_trace_power_state(struct kbase_device *kbdev)
+ 
+ void kbase_pm_update_state(struct kbase_device *kbdev)
+ {
++#if !MALI_USE_CSF
+ 	enum kbase_shader_core_state prev_shaders_state =
+ 			kbdev->pm.backend.shaders_state;
++#else
++	enum kbase_mcu_state prev_mcu_state = kbdev->pm.backend.mcu_state;
++#endif
+ 
+ 	lockdep_assert_held(&kbdev->hwaccess_lock);
+ 
+-	if (!kbdev->pm.backend.gpu_powered)
+-		return; /* Do nothing if the GPU is off */
++	if (!kbdev->pm.backend.gpu_ready)
++		return; /* Do nothing if the GPU is not ready */
+ 
+ 	if (kbase_pm_l2_update_state(kbdev))
+ 		return;
+ 
++#if !MALI_USE_CSF
+ 	if (kbase_pm_shaders_update_state(kbdev))
+ 		return;
+ 
+@@ -1304,9 +1730,20 @@ void kbase_pm_update_state(struct kbase_device *kbdev)
+ 		if (kbase_pm_l2_update_state(kbdev))
+ 			return;
+ 		}
++#else
++	if (kbase_pm_mcu_update_state(kbdev))
++		return;
++
++	if (prev_mcu_state != KBASE_MCU_OFF &&
++	    kbdev->pm.backend.mcu_state == KBASE_MCU_OFF) {
++		if (kbase_pm_l2_update_state(kbdev))
++			return;
++	}
++#endif
+ 
+ 	if (kbase_pm_is_in_desired_state_nolock(kbdev)) {
+-		KBASE_KTRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL, kbdev->pm.backend.shaders_avail);
++		KBASE_KTRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL,
++				 kbdev->pm.backend.shaders_avail);
+ 
+ 		kbase_pm_trace_power_state(kbdev);
+ 
+@@ -1363,7 +1800,8 @@ int kbase_pm_state_machine_init(struct kbase_device *kbdev)
+ 	hrtimer_init(&stt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	stt->timer.function = shader_tick_timer_callback;
+ 	stt->configured_interval = HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
+-	stt->configured_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
++	stt->default_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
++	stt->configured_ticks = stt->default_ticks;
+ 
+ 	return 0;
+ }
+@@ -1382,7 +1820,19 @@ void kbase_pm_reset_start_locked(struct kbase_device *kbdev)
+ 
+ 	backend->in_reset = true;
+ 	backend->l2_state = KBASE_L2_RESET_WAIT;
++#if !MALI_USE_CSF
+ 	backend->shaders_state = KBASE_SHADERS_RESET_WAIT;
++#else
++	/* MCU state machine is exercised only after the initial load/boot
++	 * of the firmware.
++	 */
++	if (likely(kbdev->csf.firmware_inited)) {
++		backend->mcu_state = KBASE_MCU_RESET_WAIT;
++		kbdev->csf.firmware_reload_needed = true;
++	} else {
++		WARN_ON(backend->mcu_state != KBASE_MCU_OFF);
++	}
++#endif
+ 
+ 	/* We're in a reset, so hwcnt will have been synchronously disabled by
+ 	 * this function's caller as part of the reset process. We therefore
+@@ -1422,15 +1872,28 @@ void kbase_pm_reset_complete(struct kbase_device *kbdev)
+ 
+ /* Timeout for kbase_pm_wait_for_desired_state when wait_event_killable has
+  * aborted due to a fatal signal. If the time spent waiting has exceeded this
+- * threshold then there is most likely a hardware issue. */
+-#define PM_TIMEOUT (5*HZ) /* 5s */
++ * threshold then there is most likely a hardware issue.
++ */
++#define PM_TIMEOUT_MS (5000) /* 5s */
+ 
+ static void kbase_pm_timed_out(struct kbase_device *kbdev)
+ {
++	unsigned long flags;
++
+ 	dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
++#if !MALI_USE_CSF
++	CSTD_UNUSED(flags);
+ 	dev_err(kbdev->dev, "Desired state :\n");
+ 	dev_err(kbdev->dev, "\tShader=%016llx\n",
+ 			kbdev->pm.backend.shaders_desired ? kbdev->pm.backend.shaders_avail : 0);
++#else
++	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
++	dev_err(kbdev->dev, "\tMCU desired = %d\n",
++		kbase_pm_is_mcu_desired(kbdev));
++	dev_err(kbdev->dev, "\tMCU sw state = %d\n",
++		kbdev->pm.backend.mcu_state);
++	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++#endif
+ 	dev_err(kbdev->dev, "Current state :\n");
+ 	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ 			kbase_reg_read(kbdev,
+@@ -1447,6 +1910,10 @@ static void kbase_pm_timed_out(struct kbase_device *kbdev)
+ 				GPU_CONTROL_REG(L2_READY_HI)),
+ 			kbase_reg_read(kbdev,
+ 				GPU_CONTROL_REG(L2_READY_LO)));
++#if MALI_USE_CSF
++	dev_err(kbdev->dev, "\tMCU status = %d\n",
++		kbase_reg_read(kbdev, GPU_CONTROL_REG(MCU_STATUS)));
++#endif
+ 	dev_err(kbdev->dev, "Cores transitioning :\n");
+ 	dev_err(kbdev->dev, "\tShader=%08x%08x\n",
+ 			kbase_reg_read(kbdev, GPU_CONTROL_REG(
+@@ -1465,49 +1932,87 @@ static void kbase_pm_timed_out(struct kbase_device *kbdev)
+ 					L2_PWRTRANS_LO)));
+ 
+ 	dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
+-	if (kbase_prepare_to_reset_gpu(kbdev))
++	if (kbase_prepare_to_reset_gpu(kbdev,
++				       RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+ 		kbase_reset_gpu(kbdev);
+ }
+ 
+-void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev)
++int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+ 	unsigned long timeout;
+-	int err;
++	long remaining;
++	int err = 0;
+ 
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 	kbase_pm_update_state(kbdev);
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+-	timeout = jiffies + PM_TIMEOUT;
++#if MALI_USE_CSF
++	timeout = kbase_csf_timeout_in_jiffies(PM_TIMEOUT_MS);
++#else
++	timeout = msecs_to_jiffies(PM_TIMEOUT_MS);
++#endif
+ 
+ 	/* Wait for cores */
+-	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+-			kbase_pm_is_in_desired_state_with_l2_powered(kbdev));
++#if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
++	remaining = wait_event_killable_timeout(
++#else
++	remaining = wait_event_timeout(
++#endif
++		kbdev->pm.backend.gpu_in_desired_state_wait,
++		kbase_pm_is_in_desired_state_with_l2_powered(kbdev), timeout);
+ 
+-	if (err < 0 && time_after(jiffies, timeout))
++	if (!remaining) {
+ 		kbase_pm_timed_out(kbdev);
++		err = -ETIMEDOUT;
++	} else if (remaining < 0) {
++		dev_info(
++			kbdev->dev,
++			"Wait for desired PM state with L2 powered got interrupted");
++		err = (int)remaining;
++	}
++
++	return err;
+ }
+ 
+-void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev)
++int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+-	unsigned long timeout;
+-	int err;
++	long remaining;
++#if MALI_USE_CSF
++	long timeout = kbase_csf_timeout_in_jiffies(PM_TIMEOUT_MS);
++#else
++	long timeout = msecs_to_jiffies(PM_TIMEOUT_MS);
++#endif
++	int err = 0;
+ 
+ 	/* Let the state machine latch the most recent desired state. */
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ 	kbase_pm_update_state(kbdev);
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
+-	timeout = jiffies + PM_TIMEOUT;
+-
+ 	/* Wait for cores */
+-	err = wait_event_killable(kbdev->pm.backend.gpu_in_desired_state_wait,
+-			kbase_pm_is_in_desired_state(kbdev));
++#if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
++	remaining = wait_event_killable_timeout(
++		kbdev->pm.backend.gpu_in_desired_state_wait,
++		kbase_pm_is_in_desired_state(kbdev), timeout);
++#else
++	remaining = wait_event_timeout(
++		kbdev->pm.backend.gpu_in_desired_state_wait,
++		kbase_pm_is_in_desired_state(kbdev), timeout);
++#endif
+ 
+-	if (err < 0 && time_after(jiffies, timeout))
++	if (!remaining) {
+ 		kbase_pm_timed_out(kbdev);
++		err = -ETIMEDOUT;
++	} else if (remaining < 0) {
++		dev_info(kbdev->dev,
++			 "Wait for desired PM state got interrupted");
++		err = (int)remaining;
++	}
++
++	return err;
+ }
+ KBASE_EXPORT_TEST_API(kbase_pm_wait_for_desired_state);
+ 
+@@ -1515,7 +2020,7 @@ void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+ 
+-	KBASE_DEBUG_ASSERT(NULL != kbdev);
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
+ 	/*
+ 	 * Clear all interrupts,
+ 	 * and unmask them all.
+@@ -1529,14 +2034,19 @@ void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
+ 	kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF);
+ 
+ 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
++#if MALI_USE_CSF
++	/* Enable only the Page fault bits part */
++	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFF);
++#else
+ 	kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF);
++#endif
+ }
+ 
+ KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
+ 
+ void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
+ {
+-	KBASE_DEBUG_ASSERT(NULL != kbdev);
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
+ 	/*
+ 	 * Mask all interrupts,
+ 	 * and clear them all.
+@@ -1563,6 +2073,23 @@ void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+ 
+ KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+ 
++#if MALI_USE_CSF
++static void update_user_reg_page_mapping(struct kbase_device *kbdev)
++{
++	lockdep_assert_held(&kbdev->pm.lock);
++
++	if (kbdev->csf.mali_file_inode) {
++		/* This would zap the pte corresponding to the mapping of User
++		 * register page for all the Kbase contexts.
++		 */
++		unmap_mapping_range(kbdev->csf.mali_file_inode->i_mapping,
++				    BASEP_MEM_CSF_USER_REG_PAGE_HANDLE,
++				    PAGE_SIZE, 1);
++	}
++}
++#endif
++
++
+ /*
+  * pmu layout:
+  * 0x0000: PMU TAG (RO) (0xCAFECAFE)
+@@ -1574,10 +2101,20 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+ 	bool reset_required = is_resume;
+ 	unsigned long flags;
+ 
+-	KBASE_DEBUG_ASSERT(NULL != kbdev);
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
++#if !MALI_USE_CSF
+ 	lockdep_assert_held(&kbdev->js_data.runpool_mutex);
++#endif /* !MALI_USE_CSF */
+ 	lockdep_assert_held(&kbdev->pm.lock);
+ 
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (WARN_ON(kbase_pm_is_gpu_lost(kbdev))) {
++		dev_err(kbdev->dev,
++			"%s: Cannot power up while GPU lost", __func__);
++		return;
++	}
++#endif
++
+ 	if (kbdev->pm.backend.gpu_powered) {
+ 		/* Already turned on */
+ 		if (kbdev->poweroff_pending)
+@@ -1602,11 +2139,40 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+ 	kbdev->pm.backend.gpu_powered = true;
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 
++#if MALI_USE_CSF
++	/* GPU has been turned on, can switch to actual register page */
++	update_user_reg_page_mapping(kbdev);
++#endif
++
+ 	if (reset_required) {
+ 		/* GPU state was lost, reset GPU to ensure it is in a
+-		 * consistent state */
++		 * consistent state
++		 */
+ 		kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
+ 	}
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	else {
++		if (kbdev->arb.arb_if) {
++			struct kbase_arbiter_vm_state *arb_vm_state =
++				kbdev->pm.arb_vm_state;
++
++			/* In the case that the GPU has just been granted by
++			 * the Arbiter, a reset will have already been done.
++			 * However, it is still necessary to initialize the GPU.
++			 */
++			if (arb_vm_state->vm_arb_starting)
++				kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS |
++						PM_NO_RESET);
++		}
++	}
++	/*
++	 * This point means that the GPU trasitioned to ON. So there is a chance
++	 * that a repartitioning occurred. In this case the current config
++	 * should be read again.
++	 */
++	kbase_gpuprops_get_curr_config_props(kbdev,
++		&kbdev->gpu_props.curr_config);
++#endif /* CONFIG_MALI_ARBITER_SUPPORT */
+ 
+ 	mutex_lock(&kbdev->mmu_hw_mutex);
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+@@ -1628,7 +2194,19 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
+ 
+ 	/* Turn on the L2 caches */
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
++	kbdev->pm.backend.gpu_ready = true;
+ 	kbdev->pm.backend.l2_desired = true;
++#if MALI_USE_CSF
++	if (reset_required) {
++		/* GPU reset was done after the power on, so send the post
++		 * reset event instead. This is okay as GPU power off event
++		 * is same as pre GPU reset event.
++		 */
++		kbase_ipa_control_handle_gpu_reset_post(kbdev);
++	} else {
++		kbase_ipa_control_handle_gpu_power_on(kbdev);
++	}
++#endif
+ 	kbase_pm_update_state(kbdev);
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+@@ -1639,7 +2217,7 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev)
+ {
+ 	unsigned long flags;
+ 
+-	KBASE_DEBUG_ASSERT(NULL != kbdev);
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
+ 	lockdep_assert_held(&kbdev->pm.lock);
+ 
+ 	/* ASSERT that the cores should now be unavailable. No lock needed. */
+@@ -1663,16 +2241,38 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev)
+ 
+ 	if (atomic_read(&kbdev->faults_pending)) {
+ 		/* Page/bus faults are still being processed. The GPU can not
+-		 * be powered off until they have completed */
++		 * be powered off until they have completed
++		 */
+ 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ 		return false;
+ 	}
+ 
+ 	kbase_pm_cache_snoop_disable(kbdev);
++#if MALI_USE_CSF
++	kbase_ipa_control_handle_gpu_power_off(kbdev);
++#endif
++
++	kbdev->pm.backend.gpu_ready = false;
+ 
+ 	/* The GPU power may be turned off from this point */
+ 	kbdev->pm.backend.gpu_powered = false;
++
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (kbase_pm_is_gpu_lost(kbdev)) {
++		/* Ensure we unblock any threads that are stuck waiting
++		 * for the GPU
++		 */
++		kbase_gpu_cache_clean_wait_complete(kbdev);
++	}
++#endif
++
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
++
++#if MALI_USE_CSF
++	/* GPU is about to be turned off, switch to dummy page */
++	update_user_reg_page_mapping(kbdev);
++#endif
++
+ #ifdef CONFIG_MALI_ARBITER_SUPPORT
+ 	kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_IDLE_EVENT);
+ #endif /* CONFIG_MALI_ARBITER_SUPPORT */
+@@ -1720,19 +2320,23 @@ static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
+ 	struct kbasep_reset_timeout_data *rtdata =
+ 		container_of(timer, struct kbasep_reset_timeout_data, timer);
+ 
+-	rtdata->timed_out = 1;
++	rtdata->timed_out = true;
+ 
+ 	/* Set the wait queue to wake up kbase_pm_init_hw even though the reset
+-	 * hasn't completed */
++	 * hasn't completed
++	 */
+ 	kbase_pm_reset_done(rtdata->kbdev);
+ 
+ 	return HRTIMER_NORESTART;
+ }
+ 
+-static int kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
++static int kbase_set_gpu_quirks(struct kbase_device *kbdev, const u32 prod_id)
+ {
+-	u32 hw_quirks_jm = kbase_reg_read(kbdev,
+-				GPU_CONTROL_REG(JM_CONFIG));
++#if MALI_USE_CSF
++	kbdev->hw_quirks_gpu =
++		kbase_reg_read(kbdev, GPU_CONTROL_REG(CSF_CONFIG));
++#else
++	u32 hw_quirks_gpu = kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG));
+ 
+ 	if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
+ 		/* Only for tMIx */
+@@ -1746,38 +2350,38 @@ static int kbase_set_jm_quirks(struct kbase_device *kbdev, const u32 prod_id)
+ 		 */
+ 		if (coherency_features ==
+ 				COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
+-			hw_quirks_jm |= (COHERENCY_ACE_LITE |
+-					COHERENCY_ACE) <<
+-					JM_FORCE_COHERENCY_FEATURES_SHIFT;
++			hw_quirks_gpu |= (COHERENCY_ACE_LITE | COHERENCY_ACE)
++					 << JM_FORCE_COHERENCY_FEATURES_SHIFT;
+ 		}
+ 	}
+ 
+-	if (kbase_is_gpu_lost(kbdev))
++	if (kbase_is_gpu_removed(kbdev))
+ 		return -EIO;
+ 
+-	kbdev->hw_quirks_jm = hw_quirks_jm;
++	kbdev->hw_quirks_gpu = hw_quirks_gpu;
+ 
++#endif /* !MALI_USE_CSF */
+ 	if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
+ 		int default_idvs_group_size = 0xF;
+-		u32 tmp;
++		u32 group_size = 0;
+ 
+-		if (of_property_read_u32(kbdev->dev->of_node,
+-					"idvs-group-size", &tmp))
+-			tmp = default_idvs_group_size;
++		if (of_property_read_u32(kbdev->dev->of_node, "idvs-group-size",
++					 &group_size))
++			group_size = default_idvs_group_size;
+ 
+-		if (tmp > IDVS_GROUP_MAX_SIZE) {
++		if (group_size > IDVS_GROUP_MAX_SIZE) {
+ 			dev_err(kbdev->dev,
+ 				"idvs-group-size of %d is too large. Maximum value is %d",
+-				tmp, IDVS_GROUP_MAX_SIZE);
+-			tmp = default_idvs_group_size;
++				group_size, IDVS_GROUP_MAX_SIZE);
++			group_size = default_idvs_group_size;
+ 		}
+ 
+-		kbdev->hw_quirks_jm |= tmp << IDVS_GROUP_SIZE_SHIFT;
++		kbdev->hw_quirks_gpu |= group_size << IDVS_GROUP_SIZE_SHIFT;
+ 	}
+ 
+ #define MANUAL_POWER_CONTROL ((u32)(1 << 8))
+ 	if (corestack_driver_control)
+-		kbdev->hw_quirks_jm |= MANUAL_POWER_CONTROL;
++		kbdev->hw_quirks_gpu |= MANUAL_POWER_CONTROL;
+ 
+ 	return 0;
+ }
+@@ -1787,7 +2391,7 @@ static int kbase_set_sc_quirks(struct kbase_device *kbdev, const u32 prod_id)
+ 	u32 hw_quirks_sc = kbase_reg_read(kbdev,
+ 					GPU_CONTROL_REG(SHADER_CONFIG));
+ 
+-	if (kbase_is_gpu_lost(kbdev))
++	if (kbase_is_gpu_removed(kbdev))
+ 		return -EIO;
+ 
+ 	if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
+@@ -1811,7 +2415,7 @@ static int kbase_set_tiler_quirks(struct kbase_device *kbdev)
+ 	u32 hw_quirks_tiler = kbase_reg_read(kbdev,
+ 					GPU_CONTROL_REG(TILER_CONFIG));
+ 
+-	if (kbase_is_gpu_lost(kbdev))
++	if (kbase_is_gpu_removed(kbdev))
+ 		return -EIO;
+ 
+ 	/* Set tiler clock gate override if required */
+@@ -1831,18 +2435,17 @@ static int kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
+ 				GPU_ID_VERSION_PRODUCT_ID_SHIFT;
+ 	int error = 0;
+ 
+-	kbdev->hw_quirks_jm = 0;
++	kbdev->hw_quirks_gpu = 0;
+ 	kbdev->hw_quirks_sc = 0;
+ 	kbdev->hw_quirks_tiler = 0;
+ 	kbdev->hw_quirks_mmu = 0;
+ 
+-	if (!of_property_read_u32(np, "quirks_jm",
+-				&kbdev->hw_quirks_jm)) {
++	if (!of_property_read_u32(np, "quirks_gpu", &kbdev->hw_quirks_gpu)) {
+ 		dev_info(kbdev->dev,
+-			"Found quirks_jm = [0x%x] in Devicetree\n",
+-			kbdev->hw_quirks_jm);
++			 "Found quirks_gpu = [0x%x] in Devicetree\n",
++			 kbdev->hw_quirks_gpu);
+ 	} else {
+-		error = kbase_set_jm_quirks(kbdev, prod_id);
++		error = kbase_set_gpu_quirks(kbdev, prod_id);
+ 		if (error)
+ 			return error;
+ 	}
+@@ -1891,15 +2494,20 @@ static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
+ 
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
+ 			kbdev->hw_quirks_mmu);
++#if MALI_USE_CSF
++	kbase_reg_write(kbdev, GPU_CONTROL_REG(CSF_CONFIG),
++			kbdev->hw_quirks_gpu);
++#else
+ 	kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
+-			kbdev->hw_quirks_jm);
++			kbdev->hw_quirks_gpu);
++#endif
+ }
+ 
+ void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+ {
+ 	if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
+ 		!kbdev->cci_snoop_enabled) {
+-#ifdef CONFIG_ARM64
++#if IS_ENABLED(CONFIG_ARM64)
+ 		if (kbdev->snoop_enable_smc != 0)
+ 			kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
+ #endif /* CONFIG_ARM64 */
+@@ -1911,7 +2519,7 @@ void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
+ void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+ {
+ 	if (kbdev->cci_snoop_enabled) {
+-#ifdef CONFIG_ARM64
++#if IS_ENABLED(CONFIG_ARM64)
+ 		if (kbdev->snoop_disable_smc != 0) {
+ 			mali_cci_flush_l2(kbdev);
+ 			kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
+@@ -1922,6 +2530,7 @@ void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
+ 	}
+ }
+ 
++#if !MALI_USE_CSF
+ static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
+ {
+ 	unsigned long irq_flags;
+@@ -1934,6 +2543,7 @@ static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
+ 	}
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+ }
++#endif
+ 
+ static int kbase_pm_do_reset(struct kbase_device *kbdev)
+ {
+@@ -1960,7 +2570,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
+ 
+ 	/* Initialize a structure for tracking the status of the reset */
+ 	rtdata.kbdev = kbdev;
+-	rtdata.timed_out = 0;
++	rtdata.timed_out = false;
+ 
+ 	/* Create a timer to use as a timeout on the reset */
+ 	hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+@@ -1972,7 +2582,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
+ 	/* Wait for the RESET_COMPLETED interrupt to be raised */
+ 	kbase_pm_wait_for_reset(kbdev);
+ 
+-	if (rtdata.timed_out == 0) {
++	if (!rtdata.timed_out) {
+ 		/* GPU has been reset */
+ 		hrtimer_cancel(&rtdata.timer);
+ 		destroy_hrtimer_on_stack(&rtdata.timer);
+@@ -1980,46 +2590,60 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
+ 	}
+ 
+ 	/* No interrupt has been received - check if the RAWSTAT register says
+-	 * the reset has completed */
++	 * the reset has completed
++	 */
+ 	if ((kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
+-							RESET_COMPLETED)
+-		|| kbase_is_gpu_lost(kbdev)) {
++							RESET_COMPLETED)) {
+ 		/* The interrupt is set in the RAWSTAT; this suggests that the
+-		 * interrupts are not getting to the CPU */
++		 * interrupts are not getting to the CPU
++		 */
+ 		dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
+ 		/* If interrupts aren't working we can't continue. */
+ 		destroy_hrtimer_on_stack(&rtdata.timer);
+ 		return -EINVAL;
+ 	}
+ 
++	if (kbase_is_gpu_removed(kbdev)) {
++		dev_dbg(kbdev->dev, "GPU has been removed, reset no longer needed.\n");
++		destroy_hrtimer_on_stack(&rtdata.timer);
++		return -EINVAL;
++	}
++
+ 	/* The GPU doesn't seem to be responding to the reset so try a hard
+-	 * reset */
+-	dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
+-								RESET_TIMEOUT);
+-	KBASE_KTRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, 0);
+-	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+-						GPU_COMMAND_HARD_RESET);
++	 * reset, but only when NOT in arbitration mode.
++	 */
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (!kbdev->arb.arb_if) {
++#endif /* CONFIG_MALI_ARBITER_SUPPORT */
++		dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
++					RESET_TIMEOUT);
++		KBASE_KTRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, 0);
++		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
++					GPU_COMMAND_HARD_RESET);
+ 
+-	/* Restart the timer to wait for the hard reset to complete */
+-	rtdata.timed_out = 0;
++		/* Restart the timer to wait for the hard reset to complete */
++		rtdata.timed_out = false;
+ 
+-	hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
+-							HRTIMER_MODE_REL);
++		hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
++					HRTIMER_MODE_REL);
+ 
+-	/* Wait for the RESET_COMPLETED interrupt to be raised */
+-	kbase_pm_wait_for_reset(kbdev);
++		/* Wait for the RESET_COMPLETED interrupt to be raised */
++		kbase_pm_wait_for_reset(kbdev);
+ 
+-	if (rtdata.timed_out == 0) {
+-		/* GPU has been reset */
+-		hrtimer_cancel(&rtdata.timer);
+-		destroy_hrtimer_on_stack(&rtdata.timer);
+-		return 0;
+-	}
++		if (!rtdata.timed_out) {
++			/* GPU has been reset */
++			hrtimer_cancel(&rtdata.timer);
++			destroy_hrtimer_on_stack(&rtdata.timer);
++			return 0;
++		}
+ 
+-	destroy_hrtimer_on_stack(&rtdata.timer);
++		destroy_hrtimer_on_stack(&rtdata.timer);
+ 
+-	dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
+-								RESET_TIMEOUT);
++		dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
++					RESET_TIMEOUT);
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	}
++#endif /* CONFIG_MALI_ARBITER_SUPPORT */
+ 
+ 	return -EINVAL;
+ }
+@@ -2041,9 +2665,9 @@ int kbase_pm_protected_mode_disable(struct kbase_device *const kbdev)
+ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+ {
+ 	unsigned long irq_flags;
+-	int err;
++	int err = 0;
+ 
+-	KBASE_DEBUG_ASSERT(NULL != kbdev);
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
+ 	lockdep_assert_held(&kbdev->pm.lock);
+ 
+ 	/* Ensure the clock is on before attempting to access the hardware */
+@@ -2055,7 +2679,8 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+ 	}
+ 
+ 	/* Ensure interrupts are off to begin with, this also clears any
+-	 * outstanding interrupts */
++	 * outstanding interrupts
++	 */
+ 	kbase_pm_disable_interrupts(kbdev);
+ 	/* Ensure cache snoops are disabled before reset. */
+ 	kbase_pm_cache_snoop_disable(kbdev);
+@@ -2069,10 +2694,24 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+ 
+ 	/* Soft reset the GPU */
+-	err = kbdev->protected_ops->protected_mode_disable(
+-			kbdev->protected_dev);
++#ifdef CONFIG_MALI_ARBITER_SUPPORT
++	if (!(flags & PM_NO_RESET))
++#endif /* CONFIG_MALI_ARBITER_SUPPORT */
++		err = kbdev->protected_ops->protected_mode_disable(
++				kbdev->protected_dev);
+ 
+ 	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
++#if MALI_USE_CSF
++	if (kbdev->protected_mode) {
++		unsigned long flags;
++
++		kbase_ipa_control_protm_exited(kbdev);
++
++		kbase_csf_scheduler_spin_lock(kbdev, &flags);
++		kbase_hwcnt_backend_csf_protm_exited(&kbdev->hwcnt_gpu_iface);
++		kbase_csf_scheduler_spin_unlock(kbdev, flags);
++	}
++#endif
+ 	kbdev->protected_mode = false;
+ 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+ 
+@@ -2093,7 +2732,8 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+ 			GPU_STATUS_PROTECTED_MODE_ACTIVE);
+ 
+ 	/* If cycle counter was in use re-enable it, enable_irqs will only be
+-	 * false when called from kbase_pm_powerup */
++	 * false when called from kbase_pm_powerup
++	 */
+ 	if (kbdev->pm.backend.gpu_cycle_counter_requests &&
+ 						(flags & PM_ENABLE_IRQS)) {
+ 		kbase_pm_enable_interrupts(kbdev);
+@@ -2116,12 +2756,14 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
+ 		kbase_pm_enable_interrupts(kbdev);
+ 
+ exit:
++#if !MALI_USE_CSF
+ 	if (!kbdev->pm.backend.protected_entry_transition_override) {
+ 		/* Re-enable GPU hardware counters if we're resetting from
+ 		 * protected mode.
+ 		 */
+ 		reenable_protected_mode_hwcnt(kbdev);
+ 	}
++#endif
+ 
+ 	return err;
+ }
+@@ -2148,12 +2790,21 @@ kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
+ 
+ 	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+ 									flags);
+-
+ 	++kbdev->pm.backend.gpu_cycle_counter_requests;
+ 
+-	if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
++	if (kbdev->pm.backend.gpu_cycle_counter_requests == 1)
+ 		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ 					GPU_COMMAND_CYCLE_COUNT_START);
++	else {
++		/* This might happen after GPU reset.
++		 * Then counter needs to be kicked.
++		 */
++		if (!(kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
++		      GPU_STATUS_CYCLE_COUNT_ACTIVE)) {
++			kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
++					GPU_COMMAND_CYCLE_COUNT_START);
++		}
++	}
+ 
+ 	spin_unlock_irqrestore(
+ 			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
+@@ -2169,6 +2820,8 @@ void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
+ 	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
+ 								INT_MAX);
+ 
++	kbase_pm_wait_for_l2_powered(kbdev);
++
+ 	kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
+ }
+ 
+@@ -2203,7 +2856,7 @@ void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
+ 
+ 	--kbdev->pm.backend.gpu_cycle_counter_requests;
+ 
+-	if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
++	if (kbdev->pm.backend.gpu_cycle_counter_requests == 0)
+ 		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
+ 					GPU_COMMAND_CYCLE_COUNT_STOP);
+ 
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
+index 95f10e0..70d009e 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * Power management API definitions used internally by GPU backend
+  */
+@@ -31,7 +28,7 @@
+ 
+ #include <mali_kbase_hwaccess_pm.h>
+ 
+-#include "mali_kbase_pm_ca.h"
++#include "backend/gpu/mali_kbase_pm_ca.h"
+ #include "mali_kbase_pm_policy.h"
+ 
+ 
+@@ -205,6 +202,30 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
+  */
+ void kbase_pm_reset_done(struct kbase_device *kbdev);
+ 
++#if MALI_USE_CSF
++/**
++ * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
++ *                                   reached
++ *
++ * Wait for the L2 and MCU state machines to reach the states corresponding
++ * to the values of 'kbase_pm_is_l2_desired' and 'kbase_pm_is_mcu_desired'.
++ *
++ * The usual use-case for this is to ensure that all parts of GPU have been
++ * powered up after performing a GPU Reset.
++ *
++ * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
++ * because this function will take that lock itself.
++ *
++ * NOTE: This may not wait until the correct state is reached if there is a
++ * power off in progress and kbase_pm_context_active() was called instead of
++ * kbase_csf_scheduler_pm_active().
++ *
++ * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Return: 0 on success, error code on error
++ */
++int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
++#else
+ /**
+  * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
+  *                                   reached
+@@ -224,15 +245,17 @@ void kbase_pm_reset_done(struct kbase_device *kbdev);
+  * kbase_pm_wait_for_poweroff_complete()
+  *
+  * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Return: 0 on success, error code on error
+  */
+-void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
++int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
++#endif
+ 
+ /**
+  * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
+  *
+- * Wait for the L2 to be powered on, and for the L2 and shader state machines to
+- * stabilise by reaching the states corresponding to the values of 'l2_desired'
+- * and 'shaders_desired'.
++ * Wait for the L2 to be powered on, and for the L2 and the state machines of
++ * its dependent stack components to stabilise.
+  *
+  * kbdev->pm.active_count must be non-zero when calling this function.
+  *
+@@ -240,8 +263,10 @@ void kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
+  * because this function will take that lock itself.
+  *
+  * @kbdev: The kbase device structure for the device (must be a valid pointer)
++ *
++ * Return: 0 on success, error code on error
+  */
+-void kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
++int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
+ 
+ /**
+  * kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state
+@@ -467,7 +492,8 @@ void kbase_pm_register_access_enable(struct kbase_device *kbdev);
+ void kbase_pm_register_access_disable(struct kbase_device *kbdev);
+ 
+ /* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
+- * function */
++ * function
++ */
+ 
+ /**
+  * kbase_pm_metrics_is_active - Check if the power management metrics
+@@ -511,8 +537,22 @@ void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+ 
+ #ifdef CONFIG_MALI_MIDGARD_DVFS
+ 
++#if MALI_USE_CSF
++/**
++ * kbase_platform_dvfs_event - Report utilisation to DVFS code for CSF GPU
++ *
++ * Function provided by platform specific code when DVFS is enabled to allow
++ * the power management metrics system to report utilisation.
++ *
++ * @kbdev:         The kbase device structure for the device (must be a
++ *                 valid pointer)
++ * @utilisation:   The current calculated utilisation by the metrics system.
++ * Return:         Returns 0 on failure and non zero on success.
++ */
++int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);
++#else
+ /**
+- * kbase_platform_dvfs_event - Report utilisation to DVFS code
++ * kbase_platform_dvfs_event - Report utilisation to DVFS code for JM GPU
+  *
+  * Function provided by platform specific code when DVFS is enabled to allow
+  * the power management metrics system to report utilisation.
+@@ -525,11 +565,12 @@ void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+  *                 group.
+  * Return:         Returns 0 on failure and non zero on success.
+  */
+-
+ int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
+-	u32 util_gl_share, u32 util_cl_share[2]);
++			      u32 util_gl_share, u32 util_cl_share[2]);
+ #endif
+ 
++#endif /* CONFIG_MALI_MIDGARD_DVFS */
++
+ void kbase_pm_power_changed(struct kbase_device *kbdev);
+ 
+ /**
+@@ -683,6 +724,72 @@ extern bool corestack_driver_control;
+  */
+ bool kbase_pm_is_l2_desired(struct kbase_device *kbdev);
+ 
++#if MALI_USE_CSF
++/**
++ * kbase_pm_is_mcu_desired - Check whether MCU is desired
++ *
++ * @kbdev: Device pointer
++ *
++ * This shall be called to check whether MCU needs to be enabled.
++ *
++ * Return: true if MCU needs to be enabled.
++ */
++bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev);
++
++/**
++ * kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be
++ *                                        suspended to low power state when all
++ *                                        the CSGs are idle
++ *
++ * @kbdev: Device pointer
++ *
++ * Return: true if allowed to enter the suspended state.
++ */
++static inline
++bool kbase_pm_idle_groups_sched_suspendable(struct kbase_device *kbdev)
++{
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	return !(kbdev->pm.backend.csf_pm_sched_flags &
++		 CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE);
++}
++
++/**
++ * kbase_pm_no_runnables_sched_suspendable - Check whether the scheduler can be
++ *                                        suspended to low power state when
++ *                                        there are no runnable CSGs.
++ *
++ * @kbdev: Device pointer
++ *
++ * Return: true if allowed to enter the suspended state.
++ */
++static inline
++bool kbase_pm_no_runnables_sched_suspendable(struct kbase_device *kbdev)
++{
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	return !(kbdev->pm.backend.csf_pm_sched_flags &
++		 CSF_DYNAMIC_PM_SCHED_NO_SUSPEND);
++}
++
++/**
++ * kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the
++ *                               MCU core powered in accordance to the active
++ *                               power management policy
++ *
++ * @kbdev: Device pointer
++ *
++ * Return: true if the MCU is to retain powered.
++ */
++static inline bool kbase_pm_no_mcu_core_pwroff(struct kbase_device *kbdev)
++{
++	lockdep_assert_held(&kbdev->hwaccess_lock);
++
++	return kbdev->pm.backend.csf_pm_sched_flags &
++		CSF_DYNAMIC_PM_CORE_KEEP_ON;
++}
++#endif
++
+ /**
+  * kbase_pm_lock - Lock all necessary mutexes to perform PM actions
+  *
+@@ -692,7 +799,9 @@ bool kbase_pm_is_l2_desired(struct kbase_device *kbdev);
+  */
+ static inline void kbase_pm_lock(struct kbase_device *kbdev)
+ {
++#if !MALI_USE_CSF
+ 	mutex_lock(&kbdev->js_data.runpool_mutex);
++#endif /* !MALI_USE_CSF */
+ 	mutex_lock(&kbdev->pm.lock);
+ }
+ 
+@@ -704,7 +813,9 @@ static inline void kbase_pm_lock(struct kbase_device *kbdev)
+ static inline void kbase_pm_unlock(struct kbase_device *kbdev)
+ {
+ 	mutex_unlock(&kbdev->pm.lock);
++#if !MALI_USE_CSF
+ 	mutex_unlock(&kbdev->js_data.runpool_mutex);
++#endif /* !MALI_USE_CSF */
+ }
+ 
+ #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h
+index 12cb051..ef72f60 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_l2_states.h
+@@ -1,11 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ /*
+  *
+- * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,8 +17,6 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+ /*
+@@ -25,6 +24,19 @@
+  * The function-like macro KBASEP_L2_STATE() must be defined before including
+  * this header file. This header file can be included multiple times in the
+  * same compilation unit with different definitions of KBASEP_L2_STATE().
++ *
++ * @OFF:              The L2 cache and tiler are off
++ * @PEND_ON:          The L2 cache and tiler are powering on
++ * @RESTORE_CLOCKS:   The GPU clock is restored. Conditionally used.
++ * @ON_HWCNT_ENABLE:  The L2 cache and tiler are on, and hwcnt is being enabled
++ * @ON:               The L2 cache and tiler are on, and hwcnt is enabled
++ * @ON_HWCNT_DISABLE: The L2 cache and tiler are on, and hwcnt is being disabled
++ * @SLOW_DOWN_CLOCKS: The GPU clock is set to appropriate or lowest clock.
++ *                    Conditionally used.
++ * @POWER_DOWN:       The L2 cache and tiler are about to be powered off
++ * @PEND_OFF:         The L2 cache and tiler are powering off
++ * @RESET_WAIT:       The GPU is resetting, L2 cache and tiler power state are
++ *                    unknown
+  */
+ KBASEP_L2_STATE(OFF)
+ KBASEP_L2_STATE(PEND_ON)
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_mcu_states.h b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_mcu_states.h
+new file mode 100644
+index 0000000..4e99928
+--- /dev/null
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_mcu_states.h
+@@ -0,0 +1,63 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ *
++ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
++ *
++ * This program is free software and is provided to you under the terms of the
++ * GNU General Public License version 2 as published by the Free Software
++ * Foundation, and any use by you of this program is subject to the terms
++ * of such GNU license.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, you can access it online at
++ * http://www.gnu.org/licenses/gpl-2.0.html.
++ *
++ */
++
++/*
++ * Backend-specific Power Manager MCU state definitions.
++ * The function-like macro KBASEP_MCU_STATE() must be defined before including
++ * this header file. This header file can be included multiple times in the
++ * same compilation unit with different definitions of KBASEP_MCU_STATE().
++ *
++ * @OFF:                      The MCU is powered off.
++ * @PEND_ON_RELOAD:           The warm boot of MCU or cold boot of MCU (with
++ *                            firmware reloading) is in progress.
++ * @ON_GLB_REINIT_PEND:       The MCU is enabled and Global configuration
++ *                            requests have been sent to the firmware.
++ * @ON_HWCNT_ENABLE:          The Global requests have completed and MCU is now
++ *                            ready for use and hwcnt is being enabled.
++ * @ON:                       The MCU is active and hwcnt has been enabled.
++ * @ON_CORE_ATTR_UPDATE_PEND: The MCU is active and mask of enabled shader cores
++ *                            is being updated.
++ * @ON_HWCNT_DISABLE:         The MCU is on and hwcnt is being disabled.
++ * @ON_HALT:                  The MCU is on and hwcnt has been disabled, MCU
++ *                            halt would be triggered.
++ * @ON_PEND_HALT:             MCU halt in progress, confirmation pending.
++ * @POWER_DOWN:               MCU halted operations, pending being disabled.
++ * @PEND_OFF:                 MCU is being disabled, pending on powering off.
++ * @RESET_WAIT:               The GPU is resetting, MCU state is unknown.
++ */
++KBASEP_MCU_STATE(OFF)
++KBASEP_MCU_STATE(PEND_ON_RELOAD)
++KBASEP_MCU_STATE(ON_GLB_REINIT_PEND)
++KBASEP_MCU_STATE(ON_HWCNT_ENABLE)
++KBASEP_MCU_STATE(ON)
++KBASEP_MCU_STATE(ON_CORE_ATTR_UPDATE_PEND)
++KBASEP_MCU_STATE(ON_HWCNT_DISABLE)
++KBASEP_MCU_STATE(ON_HALT)
++KBASEP_MCU_STATE(ON_PEND_HALT)
++KBASEP_MCU_STATE(POWER_DOWN)
++KBASEP_MCU_STATE(PEND_OFF)
++KBASEP_MCU_STATE(RESET_WAIT)
++/* Additional MCU states with HOST_CONTROL_SHADERS */
++KBASEP_MCU_STATE(HCTL_SHADERS_PEND_ON)
++KBASEP_MCU_STATE(HCTL_CORES_NOTIFY_PEND)
++KBASEP_MCU_STATE(HCTL_MCU_ON_RECHECK)
++KBASEP_MCU_STATE(HCTL_SHADERS_READY_OFF)
++KBASEP_MCU_STATE(HCTL_SHADERS_PEND_OFF)
+diff --git a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
+index de3babe..69e8dd3 100644
+--- a/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
++++ b/dvalin/kernel/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
+@@ -1,11 +1,12 @@
++// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ /*
+  *
+- * (C) COPYRIGHT 2011-2020 ARM Limited. All rights reserved.
++ * (C) COPYRIGHT 2011-2021 ARM Limited. All rights reserved.
+  *
+  * This program is free software and is provided to you under the terms of the
+  * GNU General Public License version 2 as published by the Free Software
+  * Foundation, and any use by you of this program is subject to the terms
+- * of such GNU licence.
++ * of such GNU license.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+@@ -16,12 +17,8 @@
+  * along with this program; if not, you can access it online at
+  * http://www.gnu.org/licenses/gpl-2.0.html.
+  *
+- * SPDX-License-Identifier: GPL-2.0
+- *
+  */
+ 
+-
+-
+ /*
+  * Metrics for power management
+  */
+@@ -29,22 +26,28 @@
+ #include <mali_kbase.h>
+ #include <mali_kbase_pm.h>
+ #include <backend/gpu/mali_kbase_pm_internal.h>
++
++#if MALI_USE_CSF
++#include "backend/gpu/mali_kbase_clk_rate_trace_mgr.h"
++#include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
++#else
+ #include <backend/gpu/mali_kbase_jm_rb.h>
++#endif /* !MALI_USE_CSF */
++
+ #include <backend/gpu/mali_kbase_pm_defs.h>
+ #include <mali_linux_trace.h>
+ 
+-/* When VSync is being hit aim for utilisation between 70-90% */
+-#define KBASE_PM_VSYNC_MIN_UTILISATION          70
+-#define KBASE_PM_VSYNC_MAX_UTILISATION          90
+-/* Otherwise aim for 10-40% */
+-#define KBASE_PM_NO_VSYNC_MIN_UTILISATION       10
+-#define KBASE_PM_NO_VSYNC_MAX_UTILISATION       40
+-
+ /* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
+  * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
+- * under 11s. Exceeding this will cause overflow */
++ * under 11s. Exceeding this will cause overflow
++ */
+ #define KBASE_PM_TIME_SHIFT			8
+ 
++#if MALI_USE_CSF
++/* To get the GPU_ACTIVE value in nano seconds unit */
++#define GPU_ACTIVE_SCALING_FACTOR ((u64)1E9)
++#endif
++
+ #ifdef CONFIG_MALI_MIDGARD_DVFS
+ static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+ {
+@@ -71,11 +74,45 @@ static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
+ 
+ int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+ {
+-	KBASE_DEBUG_ASSERT(kbdev != NULL);
++#if MALI_USE_CSF
++	struct kbase_ipa_control_perf_counter perf_counter;
++	int err;
+ 
++	/* One counter group */
++	const size_t NUM_PERF_COUNTERS = 1;
++
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
+ 	kbdev->pm.backend.metrics.kbdev = kbdev;
++	kbdev->pm.backend.metrics.time_period_start = ktime_get();
++	kbdev->pm.backend.metrics.values.time_busy = 0;
++	kbdev->pm.backend.metrics.values.time_idle = 0;
++	kbdev->pm.backend.metrics.values.time_in_protm = 0;
++
++	perf_counter.scaling_factor = GPU_ACTIVE_SCALING_FACTOR;
+ 
++	/* Normalize values by GPU frequency */
++	perf_counter.gpu_norm = true;
++
++	/* We need the GPU_ACTIVE counter, which is in the CSHW group */
++	perf_counter.type = KBASE_IPA_CORE_TYPE_CSHW;
++
++	/* We need the GPU_ACTIVE counter */
++	perf_counter.idx = GPU_ACTIVE_CNT_IDX;
++
++	err = kbase_ipa_control_register(
++		kbdev, &perf_counter, NUM_PERF_COUNTERS,
++		&kbdev->pm.backend.metrics.ipa_control_client);
++	if (err) {
++		dev_err(kbdev->dev,
++			"Failed to register IPA with kbase_ipa_control: err=%d",
++			err);
++		return -1;
++	}
++#else
++	KBASE_DEBUG_ASSERT(kbdev != NULL);
++	kbdev->pm.backend.metrics.kbdev = kbdev;
+ 	kbdev->pm.backend.metrics.time_period_start = ktime_get();
++
+ 	kbdev->pm.backend.metrics.gpu_active = false;
+ 	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
+ 	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
+@@ -89,16 +126,25 @@ int kbasep_pm_metrics_init(struct kbase_device *kbdev)
+ 	kbdev->pm.backend.metrics.values.busy_cl[1] = 0;
+ 	kbdev->pm.backend.metrics.values.busy_gl = 0;
+ 
++#endif
+ 	spin_lock_init(&kbdev->pm.backend.metrics.lock);
+ 
+ #ifdef CONFIG_MALI_MIDGARD_DVFS
+ 	hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
+ 							HRTIMER_MODE_REL);
+ 	kbdev->pm.backend.metrics.timer.function = dvfs_callback;
+-
++	kbdev->pm.backend.metrics.initialized = true;
+ 	kbase_pm_metrics_start(kbdev);
+ #endif /* CONFIG_MALI_MIDGARD_DVFS */
+ 
++#if MALI_USE_CSF
++	/* The sanity check on the GPU_ACTIVE performance counter
++	 * is skipped for Juno platforms that have timing problems.
++	 */
++	kbdev->pm.backend.metrics.skip_gpu_active_sanity_check =
++		of_machine_is_compatible("arm,juno");
++#endif
++
+ 	return 0;
+ }
+ KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
+@@ -115,7 +161,13 @@ void kbasep_pm_metrics_term(struct kbase_device *kbdev)
+ 	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ 
+ 	hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
++	kbdev->pm.backend.metrics.initialized = false;
+ #endif /* CONFIG_MALI_MIDGARD_DVFS */
++
++#if MALI_USE_CSF
++	kbase_ipa_control_unregister(
++		kbdev, kbdev->pm.backend.metrics.ipa_control_client);
++#endif
+ }
+ 
+ KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+@@ -123,8 +175,117 @@ KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
+ /* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
+  * function
+  */
++#if MALI_USE_CSF
++#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
++static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev)
++{
++	int err;
++	u64 gpu_active_counter;
++	u64 protected_time;
++	ktime_t now;
++
++	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
++
++	/* Query IPA_CONTROL for the latest GPU-active and protected-time
++	 * info.
++	 */
++	err = kbase_ipa_control_query(
++		kbdev, kbdev->pm.backend.metrics.ipa_control_client,
++		&gpu_active_counter, 1, &protected_time);
++
++	/* Read the timestamp after reading the GPU_ACTIVE counter value.
++	 * This ensures the time gap between the 2 reads is consistent for
++	 * a meaningful comparison between the increment of GPU_ACTIVE and
++	 * elapsed time. The lock taken inside kbase_ipa_control_query()
++	 * function can cause lot of variation.
++	 */
++	now = ktime_get();
++
++	if (err) {
++		dev_err(kbdev->dev,
++			"Failed to query the increment of GPU_ACTIVE counter: err=%d",
++			err);
++	} else {
++		u64 diff_ns;
++		s64 diff_ns_signed;
++		u32 ns_time;
++		ktime_t diff = ktime_sub(
++			now, kbdev->pm.backend.metrics.time_period_start);
++
++		diff_ns_signed = ktime_to_ns(diff);
++
++		if (diff_ns_signed < 0)
++			return;
++
++		diff_ns = (u64)diff_ns_signed;
++
++#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
++		/* The GPU_ACTIVE counter shouldn't clock-up more time than has
++		 * actually elapsed - but still some margin needs to be given
++		 * when doing the comparison. There could be some drift between
++		 * the CPU and GPU clock.
++		 *
++		 * Can do the check only in a real driver build, as an arbitrary
++		 * value for GPU_ACTIVE can be fed into dummy model in no_mali
++		 * configuration which may not correspond to the real elapsed
++		 * time.
++		 */
++		if (!kbdev->pm.backend.metrics.skip_gpu_active_sanity_check) {
++			/* Use a margin value that is approximately 1% of the time
++			 * difference.
++			 */
++			u64 margin_ns = diff_ns >> 6;
++			if (gpu_active_counter > (diff_ns + margin_ns)) {
++				dev_info(
++					kbdev->dev,
++					"GPU activity takes longer than time interval: %llu ns > %llu ns",
++					(unsigned long long)gpu_active_counter,
++					(unsigned long long)diff_ns);
++			}
++		}
++#endif
++		/* Calculate time difference in units of 256ns */
++		ns_time = (u32)(diff_ns >> KBASE_PM_TIME_SHIFT);
++
++		/* Add protected_time to gpu_active_counter so that time in
++		 * protected mode is included in the apparent GPU active time,
++		 * then convert it from units of 1ns to units of 256ns, to
++		 * match what JM GPUs use. The assumption is made here that the
++		 * GPU is 100% busy while in protected mode, so we should add
++		 * this since the GPU can't (and thus won't) update these
++		 * counters while it's actually in protected mode.
++		 *
++		 * Perform the add after dividing each value down, to reduce
++		 * the chances of overflows.
++		 */
++		protected_time >>= KBASE_PM_TIME_SHIFT;
++		gpu_active_counter >>= KBASE_PM_TIME_SHIFT;
++		gpu_active_counter += protected_time;
++
++		/* Ensure the following equations don't go wrong if ns_time is
++		 * slightly larger than gpu_active_counter somehow
++		 */
++		gpu_active_counter = MIN(gpu_active_counter, ns_time);
++
++		kbdev->pm.backend.metrics.values.time_busy +=
++			gpu_active_counter;
++
++		kbdev->pm.backend.metrics.values.time_idle +=
++			ns_time - gpu_active_counter;
++
++		/* Also make time in protected mode available explicitly,
++		 * so users of this data have this info, too.
++		 */
++		kbdev->pm.backend.metrics.values.time_in_protm +=
++			protected_time;
++	}
++
++	kbdev->pm.backend.metrics.time_period_start = now;
++}
++#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
++#else
+ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+-								ktime_t now)
++					       ktime_t now)
+ {
+ 	ktime_t diff;
+ 
+@@ -149,12 +310,13 @@ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
+ 		if (kbdev->pm.backend.metrics.active_gl_ctx[2])
+ 			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
+ 	} else {
+-		kbdev->pm.backend.metrics.values.time_idle += (u32) (ktime_to_ns(diff)
+-							>> KBASE_PM_TIME_SHIFT);
++		kbdev->pm.backend.metrics.values.time_idle +=
++			(u32)(ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
+ 	}
+ 
+ 	kbdev->pm.backend.metrics.time_period_start = now;
+ }
++#endif  /* MALI_USE_CSF */
+ 
+ #if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
+ void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+@@ -165,14 +327,23 @@ void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
++#if MALI_USE_CSF
++	kbase_pm_get_dvfs_utilisation_calc(kbdev);
++#else
+ 	kbase_pm_get_dvfs_utilisation_calc(kbdev, ktime_get());