blob: e3932bf4e0f9d35a6363519b019c82e21e9255a9 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/firmware.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <soc/qcom/subsystem_restart.h>
#include <linux/clk/qcom.h>
#include "adreno.h"
#include "adreno_a6xx.h"
#include "adreno_llc.h"
#include "adreno_pm4types.h"
#include "adreno_trace.h"
#include "kgsl_trace.h"
/* IFPC & Preemption static powerup restore list */
static u32 a6xx_pwrup_reglist[] = {
A6XX_VSC_ADDR_MODE_CNTL,
A6XX_GRAS_ADDR_MODE_CNTL,
A6XX_RB_ADDR_MODE_CNTL,
A6XX_PC_ADDR_MODE_CNTL,
A6XX_HLSQ_ADDR_MODE_CNTL,
A6XX_VFD_ADDR_MODE_CNTL,
A6XX_VPC_ADDR_MODE_CNTL,
A6XX_UCHE_ADDR_MODE_CNTL,
A6XX_SP_ADDR_MODE_CNTL,
A6XX_TPL1_ADDR_MODE_CNTL,
A6XX_UCHE_WRITE_RANGE_MAX_LO,
A6XX_UCHE_WRITE_RANGE_MAX_HI,
A6XX_UCHE_TRAP_BASE_LO,
A6XX_UCHE_TRAP_BASE_HI,
A6XX_UCHE_WRITE_THRU_BASE_LO,
A6XX_UCHE_WRITE_THRU_BASE_HI,
A6XX_UCHE_GMEM_RANGE_MIN_LO,
A6XX_UCHE_GMEM_RANGE_MIN_HI,
A6XX_UCHE_GMEM_RANGE_MAX_LO,
A6XX_UCHE_GMEM_RANGE_MAX_HI,
A6XX_UCHE_FILTER_CNTL,
A6XX_UCHE_CACHE_WAYS,
A6XX_UCHE_MODE_CNTL,
A6XX_RB_NC_MODE_CNTL,
A6XX_TPL1_NC_MODE_CNTL,
A6XX_SP_NC_MODE_CNTL,
A6XX_PC_DBG_ECO_CNTL,
A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
};
/* IFPC only static powerup restore list */
static u32 a6xx_ifpc_pwrup_reglist[] = {
A6XX_CP_CHICKEN_DBG,
A6XX_CP_DBG_ECO_CNTL,
A6XX_CP_PROTECT_CNTL,
A6XX_CP_PROTECT_REG,
A6XX_CP_PROTECT_REG+1,
A6XX_CP_PROTECT_REG+2,
A6XX_CP_PROTECT_REG+3,
A6XX_CP_PROTECT_REG+4,
A6XX_CP_PROTECT_REG+5,
A6XX_CP_PROTECT_REG+6,
A6XX_CP_PROTECT_REG+7,
A6XX_CP_PROTECT_REG+8,
A6XX_CP_PROTECT_REG+9,
A6XX_CP_PROTECT_REG+10,
A6XX_CP_PROTECT_REG+11,
A6XX_CP_PROTECT_REG+12,
A6XX_CP_PROTECT_REG+13,
A6XX_CP_PROTECT_REG+14,
A6XX_CP_PROTECT_REG+15,
A6XX_CP_PROTECT_REG+16,
A6XX_CP_PROTECT_REG+17,
A6XX_CP_PROTECT_REG+18,
A6XX_CP_PROTECT_REG+19,
A6XX_CP_PROTECT_REG+20,
A6XX_CP_PROTECT_REG+21,
A6XX_CP_PROTECT_REG+22,
A6XX_CP_PROTECT_REG+23,
A6XX_CP_PROTECT_REG+24,
A6XX_CP_PROTECT_REG+25,
A6XX_CP_PROTECT_REG+26,
A6XX_CP_PROTECT_REG+27,
A6XX_CP_PROTECT_REG+28,
A6XX_CP_PROTECT_REG+29,
A6XX_CP_PROTECT_REG+30,
A6XX_CP_PROTECT_REG+31,
A6XX_CP_AHB_CNTL,
};
/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
static u32 a650_pwrup_reglist[] = {
A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
A6XX_CP_PROTECT_REG + 47,
};
/* Applicable to a640 and a680 */
static u32 a640_pwrup_reglist[] = {
A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
};
/* Applicable to a630 */
static u32 a630_pwrup_reglist[] = {
A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
};
/* Applicable to a615 family */
static u32 a615_pwrup_reglist[] = {
A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
A6XX_UCHE_GBIF_GX_CONFIG,
};
/* Applicable to a612 */
static u32 a612_pwrup_reglist[] = {
A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
A6XX_RBBM_PERFCTR_CNTL,
};
static void _update_always_on_regs(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned int *const regs = gpudev->reg_offsets->offsets;
regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
A6XX_CP_ALWAYS_ON_COUNTER_LO;
regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
A6XX_CP_ALWAYS_ON_COUNTER_HI;
}
static void a6xx_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
adreno_dev->highest_bank_bit = 13;
of_property_read_u32(device->pdev->dev.of_node,
"qcom,highest-bank-bit", &adreno_dev->highest_bank_bit);
if (WARN(adreno_dev->highest_bank_bit < 13 ||
adreno_dev->highest_bank_bit > 16,
"The highest-bank-bit property is invalid\n"))
adreno_dev->highest_bank_bit =
clamp_t(unsigned int, adreno_dev->highest_bank_bit,
13, 16);
/* LP DDR4 highest bank bit is different and needs to be overridden */
if (adreno_is_a650(adreno_dev) && of_fdt_get_ddrtype() == 0x7)
adreno_dev->highest_bank_bit = 15;
else if (adreno_is_a610(adreno_dev) && of_fdt_get_ddrtype() == 0x5) {
/*
* LPDDR3 has multiple different highest bank bit value
* based on different DDR density. Query this value from
* FDT, in case FDT returns error fallback to value in GPU
* DT node.
*/
int hbb = of_fdt_get_ddrhbb(0, 0);
if (hbb > 0)
adreno_dev->highest_bank_bit = hbb;
}
a6xx_crashdump_init(adreno_dev);
/*
* If the GMU is not enabled, rewrite the offset for the always on
* counters to point to the CP always on instead of GMU always on
*/
if (!gmu_core_isenabled(device))
_update_always_on_regs(adreno_dev);
kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
"powerup_register_list");
}
static void a6xx_protect_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
int i;
/*
* Enable access protection to privileged registers, fault on an access
* protect violation and select the last span to protect from the start
* address all the way to the end of the register address space
*/
kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
(1 << 0) | (1 << 1) | (1 << 3));
/* Program each register defined by the core definition */
for (i = 0; regs[i].reg; i++) {
u32 count;
/*
* This is the offset of the end register as counted from the
* start, i.e. # of registers in the range - 1
*/
count = regs[i].end - regs[i].start;
kgsl_regwrite(device, regs[i].reg,
(regs[i].start & 0x3ffff) | ((count & 0x1fff) << 18) |
(regs[i].noaccess << 31));
}
}
static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
static inline unsigned int
__get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
{
if (adreno_is_a630(adreno_dev))
return 0x8AA8AA02;
else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev))
return 0xAAA8AA82;
else if (adreno_is_a702(adreno_dev))
return 0xAAAAAA82;
else
return 0x8AA8AA82;
}
static inline unsigned int
__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
{
if (adreno_is_a612(adreno_dev))
return 0x00000022;
else if (adreno_is_a615_family(adreno_dev))
return 0x00000222;
else
return 0x00020202;
}
static inline unsigned int
__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
{
if (adreno_is_a612(adreno_dev))
return 0x00000011;
else if (adreno_is_a615_family(adreno_dev))
return 0x00000111;
else
return 0x00010111;
}
static inline unsigned int
__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
{
if (adreno_is_a612(adreno_dev))
return 0x00000055;
else if (adreno_is_a615_family(adreno_dev))
return 0x00000555;
else
return 0x00005555;
}
static unsigned int __get_gmu_wfi_config(struct adreno_device *adreno_dev)
{
if (adreno_is_a620(adreno_dev) || adreno_is_a640(adreno_dev) ||
adreno_is_a650(adreno_dev))
return 0x00000002;
return 0x00000000;
}
static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
unsigned int value;
int i;
if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
on = false;
if (gmu_core_isenabled(device)) {
gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
gmu_core_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG,
on ? __get_gmu_wfi_config(adreno_dev) : 0);
}
kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
if (value == __get_rbbm_clock_cntl_on(adreno_dev) && on)
return;
if (value == 0 && !on)
return;
/*
* Disable SP clock before programming HWCG registers.
* A612 and A610 GPU is not having the GX power domain.
* Hence skip GMU_GX registers for A12 and A610.
*/
if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
!adreno_is_a610(adreno_dev) && !(adreno_is_a702(adreno_dev)))
gmu_core_regrmw(device,
A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; i < a6xx_core->hwcg_count; i++)
kgsl_regwrite(device, a6xx_core->hwcg[i].offset,
on ? a6xx_core->hwcg[i].value : 0);
/*
* Enable SP clock after programming HWCG registers.
* A612 and A610 GPU is not having the GX power domain.
* Hence skip GMU_GX registers for A612.
*/
if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
!adreno_is_a610(adreno_dev) && !adreno_is_a702(adreno_dev))
gmu_core_regrmw(device,
A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
/* enable top level HWCG */
kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
}
struct a6xx_reglist_list {
u32 *regs;
u32 count;
};
#define REGLIST(_a) \
(struct a6xx_reglist_list) { .regs = _a, .count = ARRAY_SIZE(_a), }
static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
{
struct a6xx_reglist_list reglist[3];
void *ptr = adreno_dev->pwrup_reglist.hostptr;
struct cpu_gpu_lock *lock = ptr;
int items = 0, i, j;
u32 *dest = ptr + sizeof(*lock);
/* Static IFPC-only registers */
reglist[items++] = REGLIST(a6xx_ifpc_pwrup_reglist);
/* Static IFPC + preemption registers */
reglist[items++] = REGLIST(a6xx_pwrup_reglist);
/* Add target specific registers */
if (adreno_is_a612(adreno_dev))
reglist[items++] = REGLIST(a612_pwrup_reglist);
else if (adreno_is_a615_family(adreno_dev))
reglist[items++] = REGLIST(a615_pwrup_reglist);
else if (adreno_is_a630(adreno_dev))
reglist[items++] = REGLIST(a630_pwrup_reglist);
else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
reglist[items++] = REGLIST(a640_pwrup_reglist);
else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
reglist[items++] = REGLIST(a650_pwrup_reglist);
/*
* For each entry in each of the lists, write the offset and the current
* register value into the GPU buffer
*/
for (i = 0; i < items; i++) {
u32 *r = reglist[i].regs;
for (j = 0; j < reglist[i].count; j++) {
*dest++ = r[j];
kgsl_regread(KGSL_DEVICE(adreno_dev), r[j], dest++);
}
lock->list_length += reglist[i].count * 2;
}
/*
* The overall register list is composed of
* 1. Static IFPC-only registers
* 2. Static IFPC + preemption registers
* 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
*
* The CP views the second and third entries as one dynamic list
* starting from list_offset. list_length should be the total dwords in
* all the lists and list_offset should be specified as the size in
* dwords of the first entry in the list.
*/
lock->list_offset = reglist[0].count * 2;
}
/*
* a6xx_start() - Device start
* @adreno_dev: Pointer to adreno device
*
* a6xx device start
*/
static void a6xx_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
unsigned int bit, lower_bit, mal, mode, upper_bit;
unsigned int uavflagprd_inv;
unsigned int amsbc = 0;
unsigned int rgb565_predicator = 0;
static bool patch_reglist;
/* runtime adjust callbacks based on feature sets */
if (!gmu_core_isenabled(device))
/* Legacy idle management if gmu is disabled */
ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
/* enable hardware clockgating */
a6xx_hwcg_set(adreno_dev, true);
/* Set up VBIF registers from the GPU core definition */
adreno_reglist_write(adreno_dev, a6xx_core->vbif,
a6xx_core->vbif_count);
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
/* Make all blocks contribute to the GPU BUSY perf counter */
kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
/*
* Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
* disabling L2 bypass
*/
kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
/*
* Some A6xx targets no longer use a programmed UCHE GMEM base
* address so only write the registers if this address is
* non zero.
*/
if (adreno_dev->uche_gmem_base) {
kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
adreno_dev->uche_gmem_base);
kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
adreno_dev->uche_gmem_base +
adreno_dev->gpucore->gmem_size - 1);
kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
}
kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
if (adreno_is_a640_family(adreno_dev) ||
adreno_is_a650_family(adreno_dev)) {
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
} else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev) ||
adreno_is_a702(adreno_dev)) {
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
} else {
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
}
if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
/* For A612 and A610 Mem pool size is reduced to 48 */
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else if (adreno_is_a702(adreno_dev)) {
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 64);
kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 63);
} else {
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
}
/* Setting the primFifo thresholds values */
kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL,
a6xx_core->prim_fifo_threshold);
/* Set the AHB default slave response to "ERROR" */
kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
/* Turn on performance counters */
kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
/* Turn on the IFPC counter (countable 4 on XOCLK4) */
if (gmu_core_isenabled(device))
gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
0xff, 0x4);
/* Turn on GX_MEM retention */
if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
/* For CP IPC interrupt */
kgsl_regwrite(device, A6XX_RBBM_INT_2_MASK, 0x00000010);
}
if (of_property_read_u32(device->pdev->dev.of_node,
"qcom,min-access-length", &mal))
mal = 32;
if (of_property_read_u32(device->pdev->dev.of_node,
"qcom,ubwc-mode", &mode))
mode = 0;
switch (mode) {
case KGSL_UBWC_1_0:
mode = 1;
break;
case KGSL_UBWC_2_0:
mode = 0;
break;
case KGSL_UBWC_3_0:
mode = 0;
amsbc = 1; /* Only valid for A640 and A680 */
break;
case KGSL_UBWC_4_0:
mode = 0;
rgb565_predicator = 1;
amsbc = 1;
break;
default:
break;
}
bit = adreno_dev->highest_bank_bit ?
adreno_dev->highest_bank_bit - 13 : 0;
lower_bit = bit & 0x3;
upper_bit = (bit >> 0x2) & 1;
mal = (mal == 64) ? 1 : 0;
uavflagprd_inv = (adreno_is_a650_family(adreno_dev)) ? 2 : 0;
kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (rgb565_predicator << 11)|
(upper_bit << 10) | (amsbc << 4) | (mal << 3) |
(lower_bit << 1) | mode);
kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (upper_bit << 4) |
(mal << 3) | (lower_bit << 1) | mode);
kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (upper_bit << 10) |
(mal << 3) | (uavflagprd_inv << 4) |
(lower_bit << 1) | mode);
kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (mal << 23) |
(lower_bit << 21));
kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
(1 << 30) | a6xx_core->hang_detect_cycles);
kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_dev)) {
kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
0x3FE05FF4);
kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
0x3FA0EBEE);
kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
0x3F5193ED);
kgsl_regwrite(device, A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
0x3F0243F0);
}
/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
if (a6xx_core->disable_tseskip)
kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
/*
* Set the bit HLSQCluster3ContextDis for A702 as HLSQ doesn't
* have context buffer for third context
*/
if (adreno_is_a702(adreno_dev))
kgsl_regwrite(device, A6XX_CP_CHICKEN_DBG, (1 << 24));
/* Enable the GMEM save/restore feature for preemption */
if (adreno_is_preemption_enabled(adreno_dev))
kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
0x1);
/*
* Enable GMU power counter 0 to count GPU busy. This is applicable to
* all a6xx targets
*/
kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
kgsl_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
a6xx_protect_init(adreno_dev);
if (!patch_reglist && (adreno_dev->pwrup_reglist.gpuaddr != 0)) {
a6xx_patch_pwrup_reglist(adreno_dev);
patch_reglist = true;
}
a6xx_preemption_start(adreno_dev);
/*
* We start LM here because we want all the following to be up
* 1. GX HS
* 2. SPTPRAC
* 3. HFI
* At this point, we are guaranteed all.
*/
gmu_core_dev_enable_lm(device);
}
/*
* a6xx_zap_load() - Load zap shader
* @adreno_dev: Pointer to adreno device
*/
static int a6xx_zap_load(struct adreno_device *adreno_dev)
{
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
void *zap;
int ret = 0;
/* Load the zap shader firmware through PIL if its available */
if (a6xx_core->zap_name && !adreno_dev->zap_loaded) {
zap = subsystem_get(a6xx_core->zap_name);
/* Return error if the zap shader cannot be loaded */
if (IS_ERR_OR_NULL(zap)) {
ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
zap = NULL;
} else
adreno_dev->zap_loaded = 1;
}
return ret;
}
/*
* CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
* be used at once of if they should be serialized
*/
#define CP_INIT_MAX_CONTEXT BIT(0)
/* Enables register protection mode */
#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
/* Header dump information */
#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
/* Default Reset states enabled for PFP and ME */
#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
/* Drawcall filter range */
#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
/* Ucode workaround masks */
#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
/*
* Operation mode mask
*
* This ordinal provides the option to disable the
* save/restore of performance counters across preemption.
*/
#define CP_INIT_OPERATION_MODE_MASK BIT(6)
/* Register initialization list */
#define CP_INIT_REGISTER_INIT_LIST BIT(7)
/* Register initialization list with spinlock */
#define CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK BIT(8)
#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
CP_INIT_ERROR_DETECTION_CONTROL | \
CP_INIT_HEADER_DUMP | \
CP_INIT_DEFAULT_RESET_STATE | \
CP_INIT_UCODE_WORKAROUND_MASK | \
CP_INIT_OPERATION_MODE_MASK | \
CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK)
static void _set_ordinals(struct adreno_device *adreno_dev,
unsigned int *cmds, unsigned int count)
{
unsigned int *start = cmds;
/* Enabled ordinal mask */
*cmds++ = CP_INIT_MASK;
if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
*cmds++ = 0x00000003;
if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
*cmds++ = 0x20000000;
if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
/* Header dump address */
*cmds++ = 0x00000000;
/* Header dump enable and dump size */
*cmds++ = 0x00000000;
}
if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
*cmds++ = 0x00000000;
if (CP_INIT_MASK & CP_INIT_OPERATION_MODE_MASK)
*cmds++ = 0x00000002;
if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK) {
uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
*cmds++ = lower_32_bits(gpuaddr);
*cmds++ = upper_32_bits(gpuaddr);
*cmds++ = 0;
}
/* Pad rest of the cmds with 0's */
while ((unsigned int)(cmds - start) < count)
*cmds++ = 0x0;
}
/*
* a6xx_send_cp_init() - Initialize ringbuffer
* @adreno_dev: Pointer to adreno device
* @rb: Pointer to the ringbuffer of device
*
* Submit commands for ME initialization,
*/
static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int *cmds;
int ret;
cmds = adreno_ringbuffer_allocspace(rb, 12);
if (IS_ERR(cmds))
return PTR_ERR(cmds);
*cmds++ = cp_type7_packet(CP_ME_INIT, 11);
_set_ordinals(adreno_dev, cmds, 11);
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret) {
adreno_spin_idle_debug(adreno_dev,
"CP initialization failed to idle\n");
if (!adreno_is_a3xx(adreno_dev))
kgsl_sharedmem_writel(device, &device->scratch,
SCRATCH_RPTR_OFFSET(rb->id), 0);
rb->wptr = 0;
rb->_wptr = 0;
}
return ret;
}
/*
* Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
* to a different ringbuffer, if desired
*/
static int _preemption_init(struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb, unsigned int *cmds,
struct kgsl_context *context)
{
unsigned int *cmds_orig = cmds;
/* Turn CP protection OFF on legacy targets */
if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
cmds += cp_protected_mode(adreno_dev, cmds, 0);
*cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6);
*cmds++ = 1;
cmds += cp_gpuaddr(adreno_dev, cmds,
rb->preemption_desc.gpuaddr);
*cmds++ = 2;
cmds += cp_gpuaddr(adreno_dev, cmds,
rb->secure_preemption_desc.gpuaddr);
/* Turn CP protection back ON */
if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
cmds += cp_protected_mode(adreno_dev, cmds, 1);
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
*cmds++ = 0;
/* generate interrupt on preemption completion */
*cmds++ = 0;
return cmds - cmds_orig;
}
static int a6xx_post_start(struct adreno_device *adreno_dev)
{
int ret;
unsigned int *cmds, *start;
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (!adreno_is_preemption_enabled(adreno_dev))
return 0;
cmds = adreno_ringbuffer_allocspace(rb, 42);
if (IS_ERR(cmds)) {
dev_err(device->dev,
"error allocating preemption init cmds\n");
return PTR_ERR(cmds);
}
start = cmds;
cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin_nosync(rb, NULL, 2000);
if (ret)
adreno_spin_idle_debug(adreno_dev,
"hw preemption initialization failed to idle\n");
return ret;
}
/*
* Some targets support marking certain transactions as always privileged which
* allows us to mark more memory as privileged without having to explicitly set
* the APRIV bit. For those targets, choose the following transactions to be
* privileged by default:
* CDWRITE [6:6] - Crashdumper writes
* CDREAD [5:5] - Crashdumper reads
* RBRPWB [3:3] - RPTR shadow writes
* RBPRIVLEVEL [2:2] - Memory accesses from PM4 packets in the ringbuffer
* RBFETCH [1:1] - Ringbuffer reads
*/
#define A6XX_APRIV_DEFAULT \
((1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1))
/*
* a6xx_rb_start() - Start the ringbuffer
* @adreno_dev: Pointer to adreno device
*/
static int a6xx_rb_start(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
struct kgsl_device *device = &adreno_dev->dev;
struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
uint64_t addr;
int ret;
addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
gpudev->cp_rb_cntl);
adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, A6XX_APRIV_DEFAULT);
/* Program the ucode base for CP */
kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
lower_32_bits(fw->memdesc.gpuaddr));
kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
upper_32_bits(fw->memdesc.gpuaddr));
/* Clear the SQE_HALT to start the CP engine */
kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
ret = a6xx_send_cp_init(adreno_dev, rb);
if (ret)
return ret;
ret = a6xx_zap_load(adreno_dev);
if (ret)
return ret;
/* GPU comes up in secured mode, make it unsecured by default */
ret = adreno_set_unsecured_mode(adreno_dev, rb);
if (ret)
return ret;
return a6xx_post_start(adreno_dev);
}
/*
* a6xx_sptprac_enable() - Power on SPTPRAC
* @adreno_dev: Pointer to Adreno device
*/
static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
{
return a6xx_gmu_sptprac_enable(adreno_dev);
}
/*
* a6xx_sptprac_disable() - Power off SPTPRAC
* @adreno_dev: Pointer to Adreno device
*/
static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
{
a6xx_gmu_sptprac_disable(adreno_dev);
}
/*
* a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register
* @adreno_dev - Pointer to adreno_device
* This check should only be performed if the keepalive bit is set or it
* can be guaranteed that the power state of the GPU will remain unchanged
*/
bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev)
{
if (!adreno_has_sptprac_gdsc(adreno_dev))
return true;
return a6xx_gmu_sptprac_is_on(adreno_dev);
}
unsigned int a6xx_set_marker(
unsigned int *cmds, enum adreno_cp_marker_type type)
{
unsigned int cmd = 0;
*cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
/*
* Indicate the beginning and end of the IB1 list with a SET_MARKER.
* Among other things, this will implicitly enable and disable
* preemption respectively. IFPC can also be disabled and enabled
* with a SET_MARKER. Bit 8 tells the CP the marker is for IFPC.
*/
switch (type) {
case IFPC_DISABLE:
cmd = 0x101;
break;
case IFPC_ENABLE:
cmd = 0x100;
break;
case IB1LIST_START:
cmd = 0xD;
break;
case IB1LIST_END:
cmd = 0xE;
break;
}
*cmds++ = cmd;
return 2;
}
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
struct adreno_firmware *firmware)
{
const struct firmware *fw = NULL;
int ret;
ret = request_firmware(&fw, fwfile, device->dev);
if (ret) {
dev_err(device->dev, "request_firmware(%s) failed: %d\n",
fwfile, ret);
return ret;
}
ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_UCODE,
"ucode");
if (!ret) {
memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
firmware->size = (fw->size - 4) / sizeof(uint32_t);
firmware->version = adreno_get_ucode_version((u32 *)fw->data);
}
release_firmware(fw);
return ret;
}
/*
* a6xx_gpu_keepalive() - GMU reg write to request GPU stays on
* @adreno_dev: Pointer to the adreno device that has the GMU
* @state: State to set: true is ON, false is OFF
*/
static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
bool state)
{
if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
return;
adreno_write_gmureg(adreno_dev,
ADRENO_REG_GMU_PWR_COL_KEEPALIVE, state);
}
/* Bitmask for GPU idle status check */
#define GPUBUSYIGNAHB BIT(23)
static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
{
unsigned int reg;
gmu_core_regread(KGSL_DEVICE(adreno_dev),
A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
if (reg & GPUBUSYIGNAHB)
return false;
return true;
}
/*
* a6xx_microcode_read() - Read microcode
* @adreno_dev: Pointer to adreno device
*/
static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
int ret;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
if (sqe_fw->memdesc.hostptr == NULL) {
ret = _load_firmware(device, a6xx_core->sqefw_name, sqe_fw);
if (ret)
return ret;
}
return 0;
}
static int a6xx_soft_reset(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int reg;
if (gmu_core_isenabled(device))
return 0;
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
/*
* Do a dummy read to get a brief read cycle delay for the
* reset to take effect
*/
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
/* Clear GBIF client halt and CX arbiter halt */
adreno_deassert_gbif_halt(adreno_dev);
a6xx_sptprac_enable(adreno_dev);
return 0;
}
static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
int64_t adj = -1;
u32 a, b, c;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
return 0;
/* The counters are selected in a6xx_gmu_enable_lm() */
a = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
&busy->throttle_cycles[0]);
b = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
&busy->throttle_cycles[1]);
c = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
&busy->throttle_cycles[2]);
/*
* The adjustment is the number of cycles lost to throttling, which
* is calculated as a weighted average of the cycles throttled
* at different levels. The adjustment is negative because in A6XX,
* the busy count includes the throttled cycles. Therefore, we want
* to remove them to prevent appearing to be busier than
* we actually are.
*/
if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
/*
* With the newer generations, CRC throttle from SIDs of 0x14
* and above cannot be observed in power counters. Since 90%
* throttle uses SID 0x16 the adjustment calculation needs
* correction. The throttling is in increments of 4.2%, and the
* 91.7% counter does a weighted count by the value of sid used
* which are taken into consideration for the final formula.
*/
adj *= div_s64((a * 42) + (b * 500) +
(div_s64((int64_t)c - a - b * 12, 22) * 917), 1000);
else
adj *= ((a * 5) + (b * 50) + (c * 90)) / 100;
trace_kgsl_clock_throttling(0, b, c, a, adj);
return adj;
}
/**
* a6xx_reset() - Helper function to reset the GPU
* @device: Pointer to the KGSL device structure for the GPU
* @fault: Type of fault. Needed to skip soft reset for MMU fault
*
* Try to reset the GPU to recover from a fault. First, try to do a low latency
* soft reset. If the soft reset fails for some reason, then bring out the big
* guns and toggle the footswitch.
*/
static int a6xx_reset(struct kgsl_device *device, int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
/* Use the regular reset sequence for No GMU */
if (!gmu_core_isenabled(device))
return adreno_reset(device, fault);
/* Transition from ACTIVE to RESET state */
kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
/* since device is officially off now clear start bit */
clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
ret = adreno_start(device, 0);
if (ret)
return ret;
kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
/*
* If active_cnt is zero, there is no need to keep the GPU active. So,
* we should transition to SLUMBER.
*/
if (!atomic_read(&device->active_cnt))
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
return 0;
}
static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int status1, status2;
kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
if (status1 & BIT(A6XX_CP_OPCODE_ERROR)) {
unsigned int opcode;
kgsl_regwrite(device, A6XX_CP_SQE_STAT_ADDR, 1);
kgsl_regread(device, A6XX_CP_SQE_STAT_DATA, &opcode);
dev_crit_ratelimited(device->dev,
"CP opcode error interrupt | opcode=0x%8.8x\n", opcode);
}
if (status1 & BIT(A6XX_CP_UCODE_ERROR))
dev_crit_ratelimited(device->dev, "CP ucode error interrupt\n");
if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
dev_crit_ratelimited(device->dev,
"CP | Ringbuffer HW fault | status=%x\n", status2);
}
if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
dev_crit_ratelimited(device->dev,
"CP | Protected mode error | %s | addr=%x | status=%x\n",
status2 & (1 << 20) ? "READ" : "WRITE",
status2 & 0x3FFFF, status2);
}
if (status1 & BIT(A6XX_CP_AHB_ERROR))
dev_crit_ratelimited(device->dev,
"CP AHB error interrupt\n");
if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
dev_crit_ratelimited(device->dev,
"CP VSD decoder parity error\n");
if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
dev_crit_ratelimited(device->dev,
"CP Illegal instruction error\n");
}
static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
switch (bit) {
case A6XX_INT_CP_AHB_ERROR:
dev_crit_ratelimited(device->dev, "CP: AHB bus error\n");
break;
case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
dev_crit_ratelimited(device->dev,
"RBBM: ATB ASYNC overflow\n");
break;
case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
dev_crit_ratelimited(device->dev,
"RBBM: ATB bus overflow\n");
break;
case A6XX_INT_UCHE_OOB_ACCESS:
dev_crit_ratelimited(device->dev,
"UCHE: Out of bounds access\n");
break;
case A6XX_INT_UCHE_TRAP_INTR:
dev_crit_ratelimited(device->dev, "UCHE: Trap interrupt\n");
break;
case A6XX_INT_TSB_WRITE_ERROR:
dev_crit_ratelimited(device->dev, "TSB: Write error interrupt\n");
break;
default:
dev_crit_ratelimited(device->dev, "Unknown interrupt %d\n",
bit);
}
}
/*
* a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
* @adreno_dev: The adreno device pointer
*/
static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
{
uint32_t gpu_scid;
uint32_t gpu_cntl1_val = 0;
int i;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_mmu *mmu = &device->mmu;
gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
| gpu_scid;
if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
kgsl_regrmw(device, A6XX_GBIF_SCACHE_CNTL1,
A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
else
adreno_cx_misc_regrmw(adreno_dev,
A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
}
/*
* a6xx_llc_configure_gpuhtw_scid() - Program the SCID for GPU pagetables
* @adreno_dev: The adreno device pointer
*/
static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
{
uint32_t gpuhtw_scid;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_mmu *mmu = &device->mmu;
/*
* On SMMU-v500, the GPUHTW SCID is configured via a NoC override in
* the XBL image.
*/
if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
return;
gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
adreno_cx_misc_regrmw(adreno_dev,
A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
A6XX_GPUHTW_LLC_SCID_MASK,
gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
}
/*
* a6xx_llc_enable_overrides() - Override the page attributes
* @adreno_dev: The adreno device pointer
*/
static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct kgsl_mmu *mmu = &device->mmu;
/*
* Attributes override through GBIF is not supported with MMU-500.
* Attributes are used as configured through SMMU pagetable entries.
*/
if (mmu->subtype == KGSL_IOMMU_SMMU_V500)
return;
/*
* 0x3: readnoallocoverrideen=0
* read-no-alloc=0 - Allocate lines on read miss
* writenoallocoverrideen=1
* write-no-alloc=1 - Do not allocates lines on write miss
*/
adreno_cx_misc_regwrite(adreno_dev,
A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
}
static const char *uche_client[7][3] = {
{"SP | VSC | VPC | HLSQ | PC | LRZ", "TP", "VFD"},
{"VSC | VPC | HLSQ | PC | LRZ", "TP | VFD", "SP"},
{"SP | VPC | HLSQ | PC | LRZ", "TP | VFD", "VSC"},
{"SP | VSC | HLSQ | PC | LRZ", "TP | VFD", "VPC"},
{"SP | VSC | VPC | PC | LRZ", "TP | VFD", "HLSQ"},
{"SP | VSC | VPC | HLSQ | LRZ", "TP | VFD", "PC"},
{"SP | VSC | VPC | HLSQ | PC", "TP | VFD", "LRZ"},
};
#define SCOOBYDOO 0x5c00bd00
static const char *a6xx_fault_block_uche(struct kgsl_device *device,
unsigned int mid)
{
unsigned int uche_client_id = 0;
static char str[40];
/*
* Smmu driver takes a vote on CX gdsc before calling the kgsl pagefault
* handler. If there is contention for device mutex in this path and the
* dispatcher fault handler is holding this lock, trying to turn off CX
* gdsc will fail during the reset. So to avoid blocking here, try to
* lock device mutex and return if it fails.
*/
if (!mutex_trylock(&device->mutex))
return "UCHE";
if (!kgsl_state_is_awake(device)) {
mutex_unlock(&device->mutex);
return "UCHE";
}
kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
mutex_unlock(&device->mutex);
/* Ignore the value if the gpu is in IFPC */
if (uche_client_id == SCOOBYDOO)
return "UCHE";
uche_client_id &= A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK;
snprintf(str, sizeof(str), "UCHE: %s",
uche_client[uche_client_id][mid - 1]);
return str;
}
static const char *a6xx_iommu_fault_block(struct kgsl_device *device,
unsigned int fsynr1)
{
unsigned int mid = fsynr1 & 0xff;
switch (mid) {
case 0:
return "CP";
case 1:
case 2:
case 3:
return a6xx_fault_block_uche(device, mid);
case 4:
return "CCU";
case 6:
return "CDP Prefetch";
case 7:
return "GPMU";
}
return "Unknown";
}
static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
if (adreno_is_preemption_enabled(adreno_dev))
a6xx_preemption_trigger(adreno_dev);
adreno_dispatcher_schedule(device);
}
#define A6XX_INT_MASK \
((1 << A6XX_INT_CP_AHB_ERROR) | \
(1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
(1 << A6XX_INT_RBBM_GPC_ERROR) | \
(1 << A6XX_INT_CP_SW) | \
(1 << A6XX_INT_CP_HW_ERROR) | \
(1 << A6XX_INT_CP_IB2) | \
(1 << A6XX_INT_CP_IB1) | \
(1 << A6XX_INT_CP_RB) | \
(1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
(1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
(1 << A6XX_INT_RBBM_HANG_DETECT) | \
(1 << A6XX_INT_UCHE_OOB_ACCESS) | \
(1 << A6XX_INT_UCHE_TRAP_INTR) | \
(1 << A6XX_INT_TSB_WRITE_ERROR))
static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a6xx_err_callback),
ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
ADRENO_IRQ_CALLBACK(a6xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
/* 23 - MISC_HANG_DETECT */
ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 28 - TSBWRITEERROR */
ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
};
static struct adreno_irq a6xx_irq = {
.funcs = a6xx_irq_funcs,
.mask = A6XX_INT_MASK,
};
static struct adreno_coresight_register a6xx_coresight_regs[] = {
{ A6XX_DBGC_CFG_DBGBUS_SEL_A },
{ A6XX_DBGC_CFG_DBGBUS_SEL_B },
{ A6XX_DBGC_CFG_DBGBUS_SEL_C },
{ A6XX_DBGC_CFG_DBGBUS_SEL_D },
{ A6XX_DBGC_CFG_DBGBUS_CNTLT },
{ A6XX_DBGC_CFG_DBGBUS_CNTLM },
{ A6XX_DBGC_CFG_DBGBUS_OPL },
{ A6XX_DBGC_CFG_DBGBUS_OPE },
{ A6XX_DBGC_CFG_DBGBUS_IVTL_0 },
{ A6XX_DBGC_CFG_DBGBUS_IVTL_1 },
{ A6XX_DBGC_CFG_DBGBUS_IVTL_2 },
{ A6XX_DBGC_CFG_DBGBUS_IVTL_3 },
{ A6XX_DBGC_CFG_DBGBUS_MASKL_0 },
{ A6XX_DBGC_CFG_DBGBUS_MASKL_1 },
{ A6XX_DBGC_CFG_DBGBUS_MASKL_2 },
{ A6XX_DBGC_CFG_DBGBUS_MASKL_3 },
{ A6XX_DBGC_CFG_DBGBUS_BYTEL_0 },
{ A6XX_DBGC_CFG_DBGBUS_BYTEL_1 },
{ A6XX_DBGC_CFG_DBGBUS_IVTE_0 },
{ A6XX_DBGC_CFG_DBGBUS_IVTE_1 },
{ A6XX_DBGC_CFG_DBGBUS_IVTE_2 },
{ A6XX_DBGC_CFG_DBGBUS_IVTE_3 },
{ A6XX_DBGC_CFG_DBGBUS_MASKE_0 },
{ A6XX_DBGC_CFG_DBGBUS_MASKE_1 },
{ A6XX_DBGC_CFG_DBGBUS_MASKE_2 },
{ A6XX_DBGC_CFG_DBGBUS_MASKE_3 },
{ A6XX_DBGC_CFG_DBGBUS_NIBBLEE },
{ A6XX_DBGC_CFG_DBGBUS_PTRC0 },
{ A6XX_DBGC_CFG_DBGBUS_PTRC1 },
{ A6XX_DBGC_CFG_DBGBUS_LOADREG },
{ A6XX_DBGC_CFG_DBGBUS_IDX },
{ A6XX_DBGC_CFG_DBGBUS_CLRC },
{ A6XX_DBGC_CFG_DBGBUS_LOADIVT },
{ A6XX_DBGC_VBIF_DBG_CNTL },
{ A6XX_DBGC_DBG_LO_HI_GPIO },
{ A6XX_DBGC_EXT_TRACE_BUS_CNTL },
{ A6XX_DBGC_READ_AHB_THROUGH_DBG },
{ A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
{ A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
{ A6XX_DBGC_EVT_CFG },
{ A6XX_DBGC_EVT_INTF_SEL_0 },
{ A6XX_DBGC_EVT_INTF_SEL_1 },
{ A6XX_DBGC_PERF_ATB_CFG },
{ A6XX_DBGC_PERF_ATB_COUNTER_SEL_0 },
{ A6XX_DBGC_PERF_ATB_COUNTER_SEL_1 },
{ A6XX_DBGC_PERF_ATB_COUNTER_SEL_2 },
{ A6XX_DBGC_PERF_ATB_COUNTER_SEL_3 },
{ A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
{ A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
{ A6XX_DBGC_PERF_ATB_DRAIN_CMD },
{ A6XX_DBGC_ECO_CNTL },
{ A6XX_DBGC_AHB_DBG_CNTL },
};
static struct adreno_coresight_register a6xx_coresight_regs_cx[] = {
{ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A },
{ A6XX_CX_DBGC_CFG_DBGBUS_SEL_B },
{ A6XX_CX_DBGC_CFG_DBGBUS_SEL_C },
{ A6XX_CX_DBGC_CFG_DBGBUS_SEL_D },
{ A6XX_CX_DBGC_CFG_DBGBUS_CNTLT },
{ A6XX_CX_DBGC_CFG_DBGBUS_CNTLM },
{ A6XX_CX_DBGC_CFG_DBGBUS_OPL },
{ A6XX_CX_DBGC_CFG_DBGBUS_OPE },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 },
{ A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTE_0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTE_1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTE_2 },
{ A6XX_CX_DBGC_CFG_DBGBUS_IVTE_3 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKE_0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKE_1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKE_2 },
{ A6XX_CX_DBGC_CFG_DBGBUS_MASKE_3 },
{ A6XX_CX_DBGC_CFG_DBGBUS_NIBBLEE },
{ A6XX_CX_DBGC_CFG_DBGBUS_PTRC0 },
{ A6XX_CX_DBGC_CFG_DBGBUS_PTRC1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_LOADREG },
{ A6XX_CX_DBGC_CFG_DBGBUS_IDX },
{ A6XX_CX_DBGC_CFG_DBGBUS_CLRC },
{ A6XX_CX_DBGC_CFG_DBGBUS_LOADIVT },
{ A6XX_CX_DBGC_VBIF_DBG_CNTL },
{ A6XX_CX_DBGC_DBG_LO_HI_GPIO },
{ A6XX_CX_DBGC_EXT_TRACE_BUS_CNTL },
{ A6XX_CX_DBGC_READ_AHB_THROUGH_DBG },
{ A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
{ A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
{ A6XX_CX_DBGC_EVT_CFG },
{ A6XX_CX_DBGC_EVT_INTF_SEL_0 },
{ A6XX_CX_DBGC_EVT_INTF_SEL_1 },
{ A6XX_CX_DBGC_PERF_ATB_CFG },
{ A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_0 },
{ A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_1 },
{ A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_2 },
{ A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_3 },
{ A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
{ A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
{ A6XX_CX_DBGC_PERF_ATB_DRAIN_CMD },
{ A6XX_CX_DBGC_ECO_CNTL },
{ A6XX_CX_DBGC_AHB_DBG_CNTL },
};
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_a, &a6xx_coresight_regs[0]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_b, &a6xx_coresight_regs[1]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_c, &a6xx_coresight_regs[2]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_d, &a6xx_coresight_regs[3]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlt, &a6xx_coresight_regs[4]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlm, &a6xx_coresight_regs[5]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_opl, &a6xx_coresight_regs[6]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ope, &a6xx_coresight_regs[7]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_0, &a6xx_coresight_regs[8]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_1, &a6xx_coresight_regs[9]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_2, &a6xx_coresight_regs[10]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_3, &a6xx_coresight_regs[11]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_0, &a6xx_coresight_regs[12]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_1, &a6xx_coresight_regs[13]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_2, &a6xx_coresight_regs[14]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_3, &a6xx_coresight_regs[15]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_0, &a6xx_coresight_regs[16]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_1, &a6xx_coresight_regs[17]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_0, &a6xx_coresight_regs[18]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_1, &a6xx_coresight_regs[19]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_2, &a6xx_coresight_regs[20]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_3, &a6xx_coresight_regs[21]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_0, &a6xx_coresight_regs[22]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_1, &a6xx_coresight_regs[23]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_2, &a6xx_coresight_regs[24]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_3, &a6xx_coresight_regs[25]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_nibblee, &a6xx_coresight_regs[26]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc0, &a6xx_coresight_regs[27]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc1, &a6xx_coresight_regs[28]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadreg, &a6xx_coresight_regs[29]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_idx, &a6xx_coresight_regs[30]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_clrc, &a6xx_coresight_regs[31]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadivt, &a6xx_coresight_regs[32]);
static ADRENO_CORESIGHT_ATTR(vbif_dbg_cntl, &a6xx_coresight_regs[33]);
static ADRENO_CORESIGHT_ATTR(dbg_lo_hi_gpio, &a6xx_coresight_regs[34]);
static ADRENO_CORESIGHT_ATTR(ext_trace_bus_cntl, &a6xx_coresight_regs[35]);
static ADRENO_CORESIGHT_ATTR(read_ahb_through_dbg, &a6xx_coresight_regs[36]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf1, &a6xx_coresight_regs[37]);
static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf2, &a6xx_coresight_regs[38]);
static ADRENO_CORESIGHT_ATTR(evt_cfg, &a6xx_coresight_regs[39]);
static ADRENO_CORESIGHT_ATTR(evt_intf_sel_0, &a6xx_coresight_regs[40]);
static ADRENO_CORESIGHT_ATTR(evt_intf_sel_1, &a6xx_coresight_regs[41]);
static ADRENO_CORESIGHT_ATTR(perf_atb_cfg, &a6xx_coresight_regs[42]);
static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_0, &a6xx_coresight_regs[43]);
static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_1, &a6xx_coresight_regs[44]);
static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_2, &a6xx_coresight_regs[45]);
static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_3, &a6xx_coresight_regs[46]);
static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_0,
&a6xx_coresight_regs[47]);
static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_1,
&a6xx_coresight_regs[48]);
static ADRENO_CORESIGHT_ATTR(perf_atb_drain_cmd, &a6xx_coresight_regs[49]);
static ADRENO_CORESIGHT_ATTR(eco_cntl, &a6xx_coresight_regs[50]);
static ADRENO_CORESIGHT_ATTR(ahb_dbg_cntl, &a6xx_coresight_regs[51]);
/*CX debug registers*/
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_a,
&a6xx_coresight_regs_cx[0]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_b,
&a6xx_coresight_regs_cx[1]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_c,
&a6xx_coresight_regs_cx[2]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_d,
&a6xx_coresight_regs_cx[3]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlt,
&a6xx_coresight_regs_cx[4]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlm,
&a6xx_coresight_regs_cx[5]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_opl,
&a6xx_coresight_regs_cx[6]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ope,
&a6xx_coresight_regs_cx[7]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_0,
&a6xx_coresight_regs_cx[8]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_1,
&a6xx_coresight_regs_cx[9]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_2,
&a6xx_coresight_regs_cx[10]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_3,
&a6xx_coresight_regs_cx[11]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_0,
&a6xx_coresight_regs_cx[12]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_1,
&a6xx_coresight_regs_cx[13]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_2,
&a6xx_coresight_regs_cx[14]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_3,
&a6xx_coresight_regs_cx[15]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_0,
&a6xx_coresight_regs_cx[16]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_1,
&a6xx_coresight_regs_cx[17]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_0,
&a6xx_coresight_regs_cx[18]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_1,
&a6xx_coresight_regs_cx[19]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_2,
&a6xx_coresight_regs_cx[20]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_3,
&a6xx_coresight_regs_cx[21]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_0,
&a6xx_coresight_regs_cx[22]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_1,
&a6xx_coresight_regs_cx[23]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_2,
&a6xx_coresight_regs_cx[24]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_3,
&a6xx_coresight_regs_cx[25]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_nibblee,
&a6xx_coresight_regs_cx[26]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc0,
&a6xx_coresight_regs_cx[27]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc1,
&a6xx_coresight_regs_cx[28]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadreg,
&a6xx_coresight_regs_cx[29]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_idx,
&a6xx_coresight_regs_cx[30]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_clrc,
&a6xx_coresight_regs_cx[31]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadivt,
&a6xx_coresight_regs_cx[32]);
static ADRENO_CORESIGHT_ATTR(cx_vbif_dbg_cntl,
&a6xx_coresight_regs_cx[33]);
static ADRENO_CORESIGHT_ATTR(cx_dbg_lo_hi_gpio,
&a6xx_coresight_regs_cx[34]);
static ADRENO_CORESIGHT_ATTR(cx_ext_trace_bus_cntl,
&a6xx_coresight_regs_cx[35]);
static ADRENO_CORESIGHT_ATTR(cx_read_ahb_through_dbg,
&a6xx_coresight_regs_cx[36]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf1,
&a6xx_coresight_regs_cx[37]);
static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf2,
&a6xx_coresight_regs_cx[38]);
static ADRENO_CORESIGHT_ATTR(cx_evt_cfg,
&a6xx_coresight_regs_cx[39]);
static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_0,
&a6xx_coresight_regs_cx[40]);
static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_1,
&a6xx_coresight_regs_cx[41]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_cfg,
&a6xx_coresight_regs_cx[42]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_0,
&a6xx_coresight_regs_cx[43]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_1,
&a6xx_coresight_regs_cx[44]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_2,
&a6xx_coresight_regs_cx[45]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_3,
&a6xx_coresight_regs_cx[46]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_0,
&a6xx_coresight_regs_cx[47]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_1,
&a6xx_coresight_regs_cx[48]);
static ADRENO_CORESIGHT_ATTR(cx_perf_atb_drain_cmd,
&a6xx_coresight_regs_cx[49]);
static ADRENO_CORESIGHT_ATTR(cx_eco_cntl,
&a6xx_coresight_regs_cx[50]);
static ADRENO_CORESIGHT_ATTR(cx_ahb_dbg_cntl,
&a6xx_coresight_regs_cx[51]);
static struct attribute *a6xx_coresight_attrs[] = {
&coresight_attr_cfg_dbgbus_sel_a.attr.attr,
&coresight_attr_cfg_dbgbus_sel_b.attr.attr,
&coresight_attr_cfg_dbgbus_sel_c.attr.attr,
&coresight_attr_cfg_dbgbus_sel_d.attr.attr,
&coresight_attr_cfg_dbgbus_cntlt.attr.attr,
&coresight_attr_cfg_dbgbus_cntlm.attr.attr,
&coresight_attr_cfg_dbgbus_opl.attr.attr,
&coresight_attr_cfg_dbgbus_ope.attr.attr,
&coresight_attr_cfg_dbgbus_ivtl_0.attr.attr,
&coresight_attr_cfg_dbgbus_ivtl_1.attr.attr,
&coresight_attr_cfg_dbgbus_ivtl_2.attr.attr,
&coresight_attr_cfg_dbgbus_ivtl_3.attr.attr,
&coresight_attr_cfg_dbgbus_maskl_0.attr.attr,
&coresight_attr_cfg_dbgbus_maskl_1.attr.attr,
&coresight_attr_cfg_dbgbus_maskl_2.attr.attr,
&coresight_attr_cfg_dbgbus_maskl_3.attr.attr,
&coresight_attr_cfg_dbgbus_bytel_0.attr.attr,
&coresight_attr_cfg_dbgbus_bytel_1.attr.attr,
&coresight_attr_cfg_dbgbus_ivte_0.attr.attr,
&coresight_attr_cfg_dbgbus_ivte_1.attr.attr,
&coresight_attr_cfg_dbgbus_ivte_2.attr.attr,
&coresight_attr_cfg_dbgbus_ivte_3.attr.attr,
&coresight_attr_cfg_dbgbus_maske_0.attr.attr,
&coresight_attr_cfg_dbgbus_maske_1.attr.attr,
&coresight_attr_cfg_dbgbus_maske_2.attr.attr,
&coresight_attr_cfg_dbgbus_maske_3.attr.attr,
&coresight_attr_cfg_dbgbus_nibblee.attr.attr,
&coresight_attr_cfg_dbgbus_ptrc0.attr.attr,
&coresight_attr_cfg_dbgbus_ptrc1.attr.attr,
&coresight_attr_cfg_dbgbus_loadreg.attr.attr,
&coresight_attr_cfg_dbgbus_idx.attr.attr,
&coresight_attr_cfg_dbgbus_clrc.attr.attr,
&coresight_attr_cfg_dbgbus_loadivt.attr.attr,
&coresight_attr_vbif_dbg_cntl.attr.attr,
&coresight_attr_dbg_lo_hi_gpio.attr.attr,
&coresight_attr_ext_trace_bus_cntl.attr.attr,
&coresight_attr_read_ahb_through_dbg.attr.attr,
&coresight_attr_cfg_dbgbus_trace_buf1.attr.attr,
&coresight_attr_cfg_dbgbus_trace_buf2.attr.attr,
&coresight_attr_evt_cfg.attr.attr,
&coresight_attr_evt_intf_sel_0.attr.attr,
&coresight_attr_evt_intf_sel_1.attr.attr,
&coresight_attr_perf_atb_cfg.attr.attr,
&coresight_attr_perf_atb_counter_sel_0.attr.attr,
&coresight_attr_perf_atb_counter_sel_1.attr.attr,
&coresight_attr_perf_atb_counter_sel_2.attr.attr,
&coresight_attr_perf_atb_counter_sel_3.attr.attr,
&coresight_attr_perf_atb_trig_intf_sel_0.attr.attr,
&coresight_attr_perf_atb_trig_intf_sel_1.attr.attr,
&coresight_attr_perf_atb_drain_cmd.attr.attr,
&coresight_attr_eco_cntl.attr.attr,
&coresight_attr_ahb_dbg_cntl.attr.attr,
NULL,
};
/*cx*/
static struct attribute *a6xx_coresight_attrs_cx[] = {
&coresight_attr_cx_cfg_dbgbus_sel_a.attr.attr,
&coresight_attr_cx_cfg_dbgbus_sel_b.attr.attr,
&coresight_attr_cx_cfg_dbgbus_sel_c.attr.attr,
&coresight_attr_cx_cfg_dbgbus_sel_d.attr.attr,
&coresight_attr_cx_cfg_dbgbus_cntlt.attr.attr,
&coresight_attr_cx_cfg_dbgbus_cntlm.attr.attr,
&coresight_attr_cx_cfg_dbgbus_opl.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ope.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivtl_0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivtl_1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivtl_2.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivtl_3.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maskl_0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maskl_1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maskl_2.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maskl_3.attr.attr,
&coresight_attr_cx_cfg_dbgbus_bytel_0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_bytel_1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivte_0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivte_1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivte_2.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ivte_3.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maske_0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maske_1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maske_2.attr.attr,
&coresight_attr_cx_cfg_dbgbus_maske_3.attr.attr,
&coresight_attr_cx_cfg_dbgbus_nibblee.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ptrc0.attr.attr,
&coresight_attr_cx_cfg_dbgbus_ptrc1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_loadreg.attr.attr,
&coresight_attr_cx_cfg_dbgbus_idx.attr.attr,
&coresight_attr_cx_cfg_dbgbus_clrc.attr.attr,
&coresight_attr_cx_cfg_dbgbus_loadivt.attr.attr,
&coresight_attr_cx_vbif_dbg_cntl.attr.attr,
&coresight_attr_cx_dbg_lo_hi_gpio.attr.attr,
&coresight_attr_cx_ext_trace_bus_cntl.attr.attr,
&coresight_attr_cx_read_ahb_through_dbg.attr.attr,
&coresight_attr_cx_cfg_dbgbus_trace_buf1.attr.attr,
&coresight_attr_cx_cfg_dbgbus_trace_buf2.attr.attr,
&coresight_attr_cx_evt_cfg.attr.attr,
&coresight_attr_cx_evt_intf_sel_0.attr.attr,
&coresight_attr_cx_evt_intf_sel_1.attr.attr,
&coresight_attr_cx_perf_atb_cfg.attr.attr,
&coresight_attr_cx_perf_atb_counter_sel_0.attr.attr,
&coresight_attr_cx_perf_atb_counter_sel_1.attr.attr,
&coresight_attr_cx_perf_atb_counter_sel_2.attr.attr,
&coresight_attr_cx_perf_atb_counter_sel_3.attr.attr,
&coresight_attr_cx_perf_atb_trig_intf_sel_0.attr.attr,
&coresight_attr_cx_perf_atb_trig_intf_sel_1.attr.attr,
&coresight_attr_cx_perf_atb_drain_cmd.attr.attr,
&coresight_attr_cx_eco_cntl.attr.attr,
&coresight_attr_cx_ahb_dbg_cntl.attr.attr,
NULL,
};
static const struct attribute_group a6xx_coresight_group = {
.attrs = a6xx_coresight_attrs,
};
static const struct attribute_group *a6xx_coresight_groups[] = {
&a6xx_coresight_group,
NULL,
};
static const struct attribute_group a6xx_coresight_group_cx = {
.attrs = a6xx_coresight_attrs_cx,
};
static const struct attribute_group *a6xx_coresight_groups_cx[] = {
&a6xx_coresight_group_cx,
NULL,
};
static struct adreno_coresight a6xx_coresight = {
.registers = a6xx_coresight_regs,
.count = ARRAY_SIZE(a6xx_coresight_regs),
.groups = a6xx_coresight_groups,
};
static struct adreno_coresight a6xx_coresight_cx = {
.registers = a6xx_coresight_regs_cx,
.count = ARRAY_SIZE(a6xx_coresight_regs_cx),
.groups = a6xx_coresight_groups_cx,
};
static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
};
static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
A6XX_RBBM_PERFCTR_RBBM_0_HI, 14, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
};
static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
};
static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
};
static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
};
static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
};
static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
};
static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
};
static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
};
static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
};
static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
};
static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
};
static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
};
static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
};
static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
};
static struct adreno_perfcount_register a6xx_perfcounters_glc[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_0_LO,
A6XX_RBBM_PERFCTR_GLC_0_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_1_LO,
A6XX_RBBM_PERFCTR_GLC_1_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_2_LO,
A6XX_RBBM_PERFCTR_GLC_2_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_3_LO,
A6XX_RBBM_PERFCTR_GLC_3_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_3 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_4_LO,
A6XX_RBBM_PERFCTR_GLC_4_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_4 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_5_LO,
A6XX_RBBM_PERFCTR_GLC_5_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_5 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_6_LO,
A6XX_RBBM_PERFCTR_GLC_6_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_6 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_GLC_7_LO,
A6XX_RBBM_PERFCTR_GLC_7_HI, -1, A6XX_RB_PERFCTR_GLC_SEL_7 },
};
static struct adreno_perfcount_register a6xx_perfcounters_fche[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_FCHE_0_LO,
A6XX_RBBM_PERFCTR_FCHE_0_HI, -1, A6XX_RB_PERFCTR_FCHE_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_FCHE_1_LO,
A6XX_RBBM_PERFCTR_FCHE_1_HI, -1, A6XX_RB_PERFCTR_FCHE_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_FCHE_2_LO,
A6XX_RBBM_PERFCTR_FCHE_2_HI, -1, A6XX_RB_PERFCTR_FCHE_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_FCHE_3_LO,
A6XX_RBBM_PERFCTR_FCHE_3_HI, -1, A6XX_RB_PERFCTR_FCHE_SEL_3 },
};
static struct adreno_perfcount_register a6xx_perfcounters_mhub[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_MHUB_0_LO,
A6XX_RBBM_PERFCTR_MHUB_0_HI, -1, A6XX_RB_PERFCTR_MHUB_SEL_0 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_MHUB_1_LO,
A6XX_RBBM_PERFCTR_MHUB_1_HI, -1, A6XX_RB_PERFCTR_MHUB_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_MHUB_2_LO,
A6XX_RBBM_PERFCTR_MHUB_2_HI, -1, A6XX_RB_PERFCTR_MHUB_SEL_2 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_MHUB_3_LO,
A6XX_RBBM_PERFCTR_MHUB_3_HI, -1, A6XX_RB_PERFCTR_MHUB_SEL_3 },
};
/*
* ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
* because most of the perfcounter groups need to be restored
* as part of preemption and IFPC. Perfcounter groups that are
* not restored as part of preemption and IFPC should be defined
* using A6XX_PERFCOUNTER_GROUP_FLAGS macro
*/
#define A6XX_PERFCOUNTER_GROUP(offset, name) \
ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
ADRENO_PERFCOUNTER_GROUP_RESTORE)
#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
#define A6XX_POWER_COUNTER_GROUP(offset, name) \
ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
static struct adreno_perfcount_group a6xx_perfcounter_groups
[KGSL_PERFCOUNTER_GROUP_MAX] = {
A6XX_PERFCOUNTER_GROUP(CP, cp),
A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0),
A6XX_PERFCOUNTER_GROUP(PC, pc),
A6XX_PERFCOUNTER_GROUP(VFD, vfd),
A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
A6XX_PERFCOUNTER_GROUP(VPC, vpc),
A6XX_PERFCOUNTER_GROUP(CCU, ccu),
A6XX_PERFCOUNTER_GROUP(CMP, cmp),
A6XX_PERFCOUNTER_GROUP(TSE, tse),
A6XX_PERFCOUNTER_GROUP(RAS, ras),
A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
A6XX_PERFCOUNTER_GROUP(UCHE, uche),
A6XX_PERFCOUNTER_GROUP(TP, tp),
A6XX_PERFCOUNTER_GROUP(SP, sp),
A6XX_PERFCOUNTER_GROUP(RB, rb),
A6XX_PERFCOUNTER_GROUP(VSC, vsc),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
ADRENO_PERFCOUNTER_GROUP_FIXED),
};
static struct adreno_perfcounters a6xx_perfcounters = {
a6xx_perfcounter_groups,
ARRAY_SIZE(a6xx_perfcounter_groups),
};
static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
{
unsigned int val;
unsigned int speed_bin[3];
struct kgsl_device *device = &adreno_dev->dev;
if (of_property_read_u32_array(device->pdev->dev.of_node,
"qcom,gpu-speed-bin", speed_bin, 3))
return;
adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
}
static const struct {
int (*check)(struct adreno_device *adreno_dev);
void (*func)(struct adreno_device *adreno_dev);
} a6xx_efuse_funcs[] = {
{ adreno_is_a615_family, a6xx_efuse_speed_bin },
{ adreno_is_a612, a6xx_efuse_speed_bin },
};
static void a6xx_check_features(struct adreno_device *adreno_dev)
{
unsigned int i;
if (adreno_efuse_map(adreno_dev))
return;
for (i = 0; i < ARRAY_SIZE(a6xx_efuse_funcs); i++) {
if (a6xx_efuse_funcs[i].check(adreno_dev))
a6xx_efuse_funcs[i].func(adreno_dev);
}
adreno_efuse_unmap(adreno_dev);
}
static void a6xx_platform_setup(struct adreno_device *adreno_dev)
{
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (adreno_has_gbif(adreno_dev)) {
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs =
a6xx_perfcounters_gbif;
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
= ARRAY_SIZE(a6xx_perfcounters_gbif);
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
a6xx_perfcounters_gbif_pwr;
a6xx_perfcounter_groups[
KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count
= ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
gpudev->gbif_client_halt_mask = A6XX_GBIF_CLIENT_HALT_MASK;
gpudev->gbif_arb_halt_mask = A6XX_GBIF_ARB_HALT_MASK;
gpudev->gbif_gx_halt_mask = A6XX_GBIF_GX_HALT_MASK;
} else
gpudev->vbif_xin_halt_ctrl0_mask =
A6XX_VBIF_XIN_HALT_CTRL0_MASK;
if (adreno_is_a702(adreno_dev)) {
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_GLC].regs =
a6xx_perfcounters_glc;
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_GLC].reg_count
= ARRAY_SIZE(a6xx_perfcounters_glc);
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_FCHE].regs =
a6xx_perfcounters_fche;
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_FCHE].reg_count
= ARRAY_SIZE(a6xx_perfcounters_fche);
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_MHUB].regs =
a6xx_perfcounters_mhub;
a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_MHUB].reg_count
= ARRAY_SIZE(a6xx_perfcounters_mhub);
}
/* Set the GPU busy counter for frequency scaling */
adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;
/* Set the counter for IFPC */
if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
adreno_dev->perfctr_ifpc_lo =
A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;
if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
set_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag);
if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
gpudev->cp_rb_cntl |= (1 << 27);
/* Check efuse bits for various capabilties */
a6xx_check_features(adreno_dev);
}
static unsigned int a6xx_ccu_invalidate(struct adreno_device *adreno_dev,
unsigned int *cmds)
{
/* CCU_INVALIDATE_DEPTH */
*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
*cmds++ = 24;
/* CCU_INVALIDATE_COLOR */
*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
*cmds++ = 25;
return 4;
}
/* Register offset defines for A6XX, in order of enum adreno_regs */
static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
A6XX_CP_RB_RPTR_ADDR_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
A6XX_CP_RB_RPTR_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A6XX_CP_CONTEXT_SWITCH_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
ADRENO_REG_DEFINE(
ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO),
ADRENO_REG_DEFINE(
ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI),
ADRENO_REG_DEFINE(
ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO),
ADRENO_REG_DEFINE(
ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
A6XX_CP_CONTEXT_SWITCH_LEVEL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
A6XX_RBBM_PERFCTR_LOAD_CMD0),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
A6XX_RBBM_PERFCTR_LOAD_CMD1),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
A6XX_RBBM_PERFCTR_LOAD_CMD2),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
A6XX_RBBM_PERFCTR_LOAD_CMD3),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
A6XX_RBBM_INT_CLEAR_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
A6XX_RBBM_BLOCK_SW_RESET_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
A6XX_RBBM_BLOCK_SW_RESET_CMD2),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
A6XX_VBIF_XIN_HALT_CTRL0),
ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
A6XX_VBIF_XIN_HALT_CTRL1),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
A6XX_RBBM_VBIF_GX_RESET_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GBIF_HALT,
A6XX_RBBM_GBIF_HALT),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GBIF_HALT_ACK,
A6XX_RBBM_GBIF_HALT_ACK),
ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
A6XX_GMU_ALWAYS_ON_COUNTER_L),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
A6XX_GMU_ALWAYS_ON_COUNTER_H),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
A6XX_GMU_AO_AHB_FENCE_CTRL),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
A6XX_GMU_AO_INTERRUPT_EN),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
A6XX_GMU_AO_HOST_INTERRUPT_CLR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
A6XX_GMU_AO_HOST_INTERRUPT_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
A6XX_GMU_AHB_FENCE_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
A6XX_GMU_HFI_CTRL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
A6XX_GMU_HFI_VERSION_INFO),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
A6XX_GMU_HFI_SFR_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_CLR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
A6XX_GMU_GMU2HOST_INTR_INFO),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
A6XX_GMU_GMU2HOST_INTR_MASK),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
A6XX_GMU_HOST2GMU_INTR_SET),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
A6XX_GMU_HOST2GMU_INTR_CLR),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_NMI_CONTROL_STATUS,
A6XX_GMU_NMI_CONTROL_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
A6XX_GMU_CM3_CFG),
ADRENO_REG_DEFINE(ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
A6XX_GMU_RBBM_INT_UNMASKED_STATUS),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
A6XX_RBBM_SECVID_TRUST_CNTL),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
A6XX_RBBM_SECVID_TSB_CNTL),
};
static const struct adreno_reg_offsets a6xx_reg_offsets = {
.offsets = a6xx_register_offsets,
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
/* Indicate that the CPU wants the lock */
lock->flag_kmd = 1;
/* post the request */
wmb();
/* Wait for our turn */
lock->turn = 0;
/* Finish all memory transactions before moving on */
mb();
/*
* Spin here while GPU ucode holds the lock, lock->flag_ucode will
* be set to 0 after GPU ucode releases the lock. Minimum wait time
* is 1 second and this should be enough for GPU to release the lock
*/
while (lock->flag_ucode == 1 && lock->turn == 0) {
cpu_relax();
/* Get the latest updates from GPU */
rmb();
if (time_after(jiffies, timeout))
break;
}
if (lock->flag_ucode == 1 && lock->turn == 0)
return -EBUSY;
return 0;
}
static void cpu_gpu_unlock(struct cpu_gpu_lock *lock)
{
/* Make sure all writes are done before releasing the lock */
wmb();
lock->flag_kmd = 0;
}
static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
struct adreno_perfcount_register *reg, bool update_reg)
{
void *ptr = adreno_dev->pwrup_reglist.hostptr;
struct cpu_gpu_lock *lock = ptr;
u32 *data = ptr + sizeof(*lock);
int i, offset = 0;
if (cpu_gpu_lock(lock)) {
cpu_gpu_unlock(lock);
return -EBUSY;
}
/*
* If the perfcounter select register is already present in reglist
* update it, otherwise append the <select register, value> pair to
* the end of the list.
*/
for (i = 0; i < lock->list_length >> 1; i++) {
if (data[offset] == reg->select) {
data[offset + 1] = reg->countable;
goto update;
}
offset += 2;
}
/*
* For a612 targets A6XX_RBBM_PERFCTR_CNTL needs to be the last entry,
* so overwrite the existing A6XX_RBBM_PERFCNTL_CTRL and add it back to
* the end. All other targets just append the new counter to the end.
*/
if (adreno_is_a612(adreno_dev)) {
data[offset - 2] = reg->select;
data[offset - 1] = reg->countable;
data[offset] = A6XX_RBBM_PERFCTR_CNTL,
data[offset + 1] = 1;
} else {
data[offset] = reg->select;
data[offset + 1] = reg->countable;
}
lock->list_length += 2;
update:
if (update_reg)
kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg->select,
reg->countable);
cpu_gpu_unlock(lock);
return 0;
}
static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
const char *name, struct clk *clk, bool on)
{
if (!adreno_is_a610(adreno_dev) && !adreno_is_a702(adreno_dev))
return;
/* Handle clock settings for GFX PSCBCs */
if (on) {
if (!strcmp(name, "mem_iface_clk")) {
clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
} else if (!strcmp(name, "core_clk")) {
clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
}
} else {
if (!strcmp(name, "core_clk")) {
clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
}
}
}
struct adreno_gpudev adreno_a6xx_gpudev = {
.reg_offsets = &a6xx_reg_offsets,
.start = a6xx_start,
.snapshot = a6xx_snapshot,
.irq = &a6xx_irq,
.irq_trace = trace_kgsl_a5xx_irq_status,
.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
.cp_rb_cntl = A6XX_CP_RB_CNTL_DEFAULT,
.platform_setup = a6xx_platform_setup,
.init = a6xx_init,
.rb_start = a6xx_rb_start,
.regulator_enable = a6xx_sptprac_enable,
.regulator_disable = a6xx_sptprac_disable,
.perfcounters = &a6xx_perfcounters,
.read_throttling_counters = a6xx_read_throttling_counters,
.microcode_read = a6xx_microcode_read,
.enable_64bit = a6xx_enable_64bit,
.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
.llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
.llc_enable_overrides = a6xx_llc_enable_overrides,
.gpu_keepalive = a6xx_gpu_keepalive,
.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
.iommu_fault_block = a6xx_iommu_fault_block,
.reset = a6xx_reset,
.soft_reset = a6xx_soft_reset,
.preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
.preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
.preemption_init = a6xx_preemption_init,
.preemption_close = a6xx_preemption_close,
.preemption_schedule = a6xx_preemption_schedule,
.set_marker = a6xx_set_marker,
.preemption_context_init = a6xx_preemption_context_init,
.preemption_context_destroy = a6xx_preemption_context_destroy,
.sptprac_is_on = a6xx_sptprac_is_on,
.ccu_invalidate = a6xx_ccu_invalidate,
.perfcounter_update = a6xx_perfcounter_update,
.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
.clk_set_options = a6xx_clk_set_options,
};