blob: c43d26b985f0d908afa15da43c8cc689a9974ccc [file] [log] [blame]
/*
* Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
* Copyright © 2018 Google, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Rob Clark <robclark@freedesktop.org>
*/
#include <stdio.h>
#include "pipe/p_state.h"
#include "util/u_string.h"
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "util/format/u_format.h"
#include "freedreno_draw.h"
#include "freedreno_log.h"
#include "freedreno_state.h"
#include "freedreno_resource.h"
#include "fd6_blitter.h"
#include "fd6_gmem.h"
#include "fd6_context.h"
#include "fd6_draw.h"
#include "fd6_emit.h"
#include "fd6_program.h"
#include "fd6_format.h"
#include "fd6_resource.h"
#include "fd6_zsa.h"
#include "fd6_pack.h"
/**
* Emits the flags registers, suitable for RB_MRT_FLAG_BUFFER,
* RB_DEPTH_FLAG_BUFFER, SP_PS_2D_SRC_FLAGS, and RB_BLIT_FLAG_DST.
*/
void
fd6_emit_flag_reference(struct fd_ringbuffer *ring, struct fd_resource *rsc,
int level, int layer)
{
if (fd_resource_ubwc_enabled(rsc, level)) {
OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0, 0);
OUT_RING(ring,
A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(fdl_ubwc_pitch(&rsc->layout, level)) |
A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->layout.ubwc_layer_size >> 2));
} else {
OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
OUT_RING(ring, 0x00000000);
}
}
static void
emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
const struct fd_gmem_stateobj *gmem)
{
unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
unsigned srgb_cntl = 0;
unsigned i;
unsigned max_layer_index = 0;
for (i = 0; i < pfb->nr_cbufs; i++) {
enum a6xx_format format = 0;
enum a3xx_color_swap swap = WZYX;
bool sint = false, uint = false;
struct fd_resource *rsc = NULL;
struct fdl_slice *slice = NULL;
uint32_t stride = 0;
uint32_t offset;
uint32_t tile_mode;
if (!pfb->cbufs[i])
continue;
mrt_comp[i] = 0xf;
struct pipe_surface *psurf = pfb->cbufs[i];
enum pipe_format pformat = psurf->format;
rsc = fd_resource(psurf->texture);
if (!rsc->bo)
continue;
uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
slice = fd_resource_slice(rsc, psurf->u.tex.level);
format = fd6_pipe2color(pformat);
sint = util_format_is_pure_sint(pformat);
uint = util_format_is_pure_uint(pformat);
if (util_format_is_srgb(pformat))
srgb_cntl |= (1 << i);
offset = fd_resource_offset(rsc, psurf->u.tex.level,
psurf->u.tex.first_layer);
stride = fd_resource_pitch(rsc, psurf->u.tex.level);
swap = fd6_resource_swap(rsc, pformat);
tile_mode = fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
max_layer_index = psurf->u.tex.last_layer - psurf->u.tex.first_layer;
debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
OUT_REG(ring,
A6XX_RB_MRT_BUF_INFO(i,
.color_format = format,
.color_tile_mode = tile_mode,
.color_swap = swap),
A6XX_RB_MRT_PITCH(i, .a6xx_rb_mrt_pitch = stride),
A6XX_RB_MRT_ARRAY_PITCH(i, .a6xx_rb_mrt_array_pitch = slice->size0),
A6XX_RB_MRT_BASE(i, .bo = rsc->bo, .bo_offset = offset),
A6XX_RB_MRT_BASE_GMEM(i, .unknown = base));
OUT_REG(ring,
A6XX_SP_FS_MRT_REG(i, .color_format = format,
.color_sint = sint, .color_uint = uint));
OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
fd6_emit_flag_reference(ring, rsc,
psurf->u.tex.level, psurf->u.tex.first_layer);
}
OUT_REG(ring, A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
OUT_REG(ring, A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
OUT_REG(ring, A6XX_RB_RENDER_COMPONENTS(
.rt0 = mrt_comp[0],
.rt1 = mrt_comp[1],
.rt2 = mrt_comp[2],
.rt3 = mrt_comp[3],
.rt4 = mrt_comp[4],
.rt5 = mrt_comp[5],
.rt6 = mrt_comp[6],
.rt7 = mrt_comp[7]));
OUT_REG(ring, A6XX_SP_FS_RENDER_COMPONENTS(
.rt0 = mrt_comp[0],
.rt1 = mrt_comp[1],
.rt2 = mrt_comp[2],
.rt3 = mrt_comp[3],
.rt4 = mrt_comp[4],
.rt5 = mrt_comp[5],
.rt6 = mrt_comp[6],
.rt7 = mrt_comp[7]));
OUT_REG(ring, A6XX_GRAS_MAX_LAYER_INDEX(max_layer_index));
}
static void
emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
const struct fd_gmem_stateobj *gmem)
{
if (zsbuf) {
struct fd_resource *rsc = fd_resource(zsbuf->texture);
enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
uint32_t stride = fd_resource_pitch(rsc, 0);
uint32_t size = fd_resource_slice(rsc, 0)->size0;
uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
zsbuf->u.tex.first_layer);
OUT_REG(ring,
A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
A6XX_RB_DEPTH_BUFFER_PITCH(.a6xx_rb_depth_buffer_pitch = stride),
A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(.a6xx_rb_depth_buffer_array_pitch = size),
A6XX_RB_DEPTH_BUFFER_BASE(.bo = rsc->bo, .bo_offset = offset),
A6XX_RB_DEPTH_BUFFER_BASE_GMEM(.dword = base));
OUT_REG(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
fd6_emit_flag_reference(ring, rsc,
zsbuf->u.tex.level, zsbuf->u.tex.first_layer);
if (rsc->lrz) {
OUT_REG(ring,
A6XX_GRAS_LRZ_BUFFER_BASE(.bo = rsc->lrz),
A6XX_GRAS_LRZ_BUFFER_PITCH(.pitch = rsc->lrz_pitch),
// XXX a6xx seems to use a different buffer here.. not sure what for..
A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO(0),
A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI(0));
} else {
OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
OUT_RING(ring, 0x00000000);
}
/* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
* plus this CP_EVENT_WRITE at the end in it's own IB..
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(UNK_25));
if (rsc->stencil) {
stride = fd_resource_pitch(rsc->stencil, 0);
size = fd_resource_slice(rsc->stencil, 0)->size0;
uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
OUT_REG(ring,
A6XX_RB_STENCIL_INFO(.separate_stencil = true),
A6XX_RB_STENCIL_BUFFER_PITCH(.a6xx_rb_stencil_buffer_pitch = stride),
A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(.a6xx_rb_stencil_buffer_array_pitch = size),
A6XX_RB_STENCIL_BUFFER_BASE(.bo = rsc->stencil->bo),
A6XX_RB_STENCIL_BUFFER_BASE_GMEM(.dword = base));
} else {
OUT_REG(ring, A6XX_RB_STENCIL_INFO(0));
}
} else {
OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
OUT_REG(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
OUT_REG(ring, A6XX_RB_STENCIL_INFO(0));
}
}
static bool
use_hw_binning(struct fd_batch *batch)
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
// TODO figure out hw limits for binning
return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) >= 2) &&
(batch->num_draws > 0);
}
static void
patch_fb_read(struct fd_batch *batch)
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
*patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
}
util_dynarray_clear(&batch->fb_read_patches);
}
static void
update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
{
struct fd_ringbuffer *ring = batch->gmem;
uint32_t cntl = 0;
bool depth_ubwc_enable = false;
uint32_t mrts_ubwc_enable = 0;
int i;
if (pfb->zsbuf) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
}
for (i = 0; i < pfb->nr_cbufs; i++) {
if (!pfb->cbufs[i])
continue;
struct pipe_surface *psurf = pfb->cbufs[i];
struct fd_resource *rsc = fd_resource(psurf->texture);
if (!rsc->bo)
continue;
if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
mrts_ubwc_enable |= 1 << i;
}
cntl |= A6XX_RB_RENDER_CNTL_UNK4;
if (binning)
cntl |= A6XX_RB_RENDER_CNTL_BINNING;
OUT_PKT7(ring, CP_REG_WRITE, 3);
OUT_RING(ring, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
OUT_RING(ring, cntl |
COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
}
/* extra size to store VSC_DRAW_STRM_SIZE: */
#define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100)
#define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
static void
update_vsc_pipe(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
struct fd6_context *fd6_ctx = fd6_context(ctx);
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
int i;
if (batch->draw_strm_bits/8 > fd6_ctx->vsc_draw_strm_pitch) {
if (fd6_ctx->vsc_draw_strm)
fd_bo_del(fd6_ctx->vsc_draw_strm);
fd6_ctx->vsc_draw_strm = NULL;
/* Note: probably only need to align to 0x40, but aligning stronger
* reduces the odds that we will have to realloc again on the next
* frame:
*/
fd6_ctx->vsc_draw_strm_pitch = align(batch->draw_strm_bits/8, 0x4000);
debug_printf("pre-resize VSC_DRAW_STRM_PITCH to: 0x%x\n",
fd6_ctx->vsc_draw_strm_pitch);
}
if (batch->prim_strm_bits/8 > fd6_ctx->vsc_prim_strm_pitch) {
if (fd6_ctx->vsc_prim_strm)
fd_bo_del(fd6_ctx->vsc_prim_strm);
fd6_ctx->vsc_prim_strm = NULL;
fd6_ctx->vsc_prim_strm_pitch = align(batch->prim_strm_bits/8, 0x4000);
debug_printf("pre-resize VSC_PRIM_STRM_PITCH to: 0x%x\n",
fd6_ctx->vsc_prim_strm_pitch);
}
if (!fd6_ctx->vsc_draw_strm) {
fd6_ctx->vsc_draw_strm = fd_bo_new(ctx->screen->dev,
VSC_DRAW_STRM_SIZE(fd6_ctx->vsc_draw_strm_pitch),
DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_draw_strm");
}
if (!fd6_ctx->vsc_prim_strm) {
fd6_ctx->vsc_prim_strm = fd_bo_new(ctx->screen->dev,
VSC_PRIM_STRM_SIZE(fd6_ctx->vsc_prim_strm_pitch),
DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_prim_strm");
}
OUT_REG(ring,
A6XX_VSC_BIN_SIZE(.width = gmem->bin_w, .height = gmem->bin_h),
A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(
.bo = fd6_ctx->vsc_draw_strm,
.bo_offset = 32 * fd6_ctx->vsc_draw_strm_pitch));
OUT_REG(ring, A6XX_VSC_BIN_COUNT(.nx = gmem->nbins_x,
.ny = gmem->nbins_y));
OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
for (i = 0; i < 32; i++) {
const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
}
OUT_REG(ring,
A6XX_VSC_PRIM_STRM_ADDRESS(.bo = fd6_ctx->vsc_prim_strm),
A6XX_VSC_PRIM_STRM_PITCH(.dword = fd6_ctx->vsc_prim_strm_pitch),
A6XX_VSC_PRIM_STRM_LIMIT(.dword = fd6_ctx->vsc_prim_strm_pitch - 64));
OUT_REG(ring,
A6XX_VSC_DRAW_STRM_ADDRESS(.bo = fd6_ctx->vsc_draw_strm),
A6XX_VSC_DRAW_STRM_PITCH(.dword = fd6_ctx->vsc_draw_strm_pitch),
A6XX_VSC_DRAW_STRM_LIMIT(.dword = fd6_ctx->vsc_draw_strm_pitch - 64));
}
/*
* If overflow is detected, either 0x1 (VSC_DRAW_STRM overflow) or 0x3
* (VSC_PRIM_STRM overflow) plus the size of the overflowed buffer is
* written to control->vsc_overflow. This allows the CPU to
* detect which buffer overflowed (and, since the current size is
* encoded as well, this protects against already-submitted but
* not executed batches from fooling the CPU into increasing the
* size again unnecessarily).
*/
static void
emit_vsc_overflow_test(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
debug_assert((fd6_ctx->vsc_draw_strm_pitch & 0x3) == 0);
debug_assert((fd6_ctx->vsc_prim_strm_pitch & 0x3) == 0);
/* Check for overflow, write vsc_scratch if detected: */
for (int i = 0; i < gmem->num_vsc_pipes; i++) {
OUT_PKT7(ring, CP_COND_WRITE5, 8);
OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_draw_strm_pitch - 64));
OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_draw_strm_pitch));
OUT_PKT7(ring, CP_COND_WRITE5, 8);
OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
CP_COND_WRITE5_0_WRITE_MEMORY);
OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_prim_strm_pitch - 64));
OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_prim_strm_pitch));
}
OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
}
static void
check_vsc_overflow(struct fd_context *ctx)
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct fd6_control *control = fd_bo_map(fd6_ctx->control_mem);
uint32_t vsc_overflow = control->vsc_overflow;
if (!vsc_overflow)
return;
/* clear overflow flag: */
control->vsc_overflow = 0;
unsigned buffer = vsc_overflow & 0x3;
unsigned size = vsc_overflow & ~0x3;
if (buffer == 0x1) {
/* VSC_DRAW_STRM overflow: */
if (size < fd6_ctx->vsc_draw_strm_pitch) {
/* we've already increased the size, this overflow is
* from a batch submitted before resize, but executed
* after
*/
return;
}
fd_bo_del(fd6_ctx->vsc_draw_strm);
fd6_ctx->vsc_draw_strm = NULL;
fd6_ctx->vsc_draw_strm_pitch *= 2;
debug_printf("resized VSC_DRAW_STRM_PITCH to: 0x%x\n",
fd6_ctx->vsc_draw_strm_pitch);
} else if (buffer == 0x3) {
/* VSC_PRIM_STRM overflow: */
if (size < fd6_ctx->vsc_prim_strm_pitch) {
/* we've already increased the size */
return;
}
fd_bo_del(fd6_ctx->vsc_prim_strm);
fd6_ctx->vsc_prim_strm = NULL;
fd6_ctx->vsc_prim_strm_pitch *= 2;
debug_printf("resized VSC_PRIM_STRM_PITCH to: 0x%x\n",
fd6_ctx->vsc_prim_strm_pitch);
} else {
/* NOTE: it's possible, for example, for overflow to corrupt the
* control page. I mostly just see this hit if I set initial VSC
* buffer size extremely small. Things still seem to recover,
* but maybe we should pre-emptively realloc vsc_data/vsc_data2
* and hope for different memory placement?
*/
DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow);
}
}
/*
* Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
* is skipped for tiles that have no visible geometry.
*/
static void
emit_conditional_ib(struct fd_batch *batch, const struct fd_tile *tile,
struct fd_ringbuffer *target)
{
struct fd_ringbuffer *ring = batch->gmem;
if (target->cur == target->start)
return;
emit_marker6(ring, 6);
unsigned count = fd_ringbuffer_cmd_count(target);
BEGIN_RING(ring, 5 + 4 * count); /* ensure conditional doesn't get split */
OUT_PKT7(ring, CP_REG_TEST, 1);
OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile->p)) |
A6XX_CP_REG_TEST_0_BIT(tile->n) |
A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
OUT_RING(ring, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
OUT_RING(ring, CP_COND_REG_EXEC_1_DWORDS(4 * count));
for (unsigned i = 0; i < count; i++) {
uint32_t dwords;
OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
assert(dwords > 0);
OUT_RING(ring, dwords);
}
emit_marker6(ring, 6);
}
static void
set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
{
OUT_REG(ring,
A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
OUT_REG(ring,
A6XX_GRAS_RESOLVE_CNTL_1(.x = x1, .y = y1),
A6XX_GRAS_RESOLVE_CNTL_2(.x = x2, .y = y2));
}
static void
set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
{
OUT_REG(ring, A6XX_GRAS_BIN_CONTROL(.binw = w, .binh = h, .dword = flag));
OUT_REG(ring, A6XX_RB_BIN_CONTROL(.binw = w, .binh = h, .dword = flag));
/* no flag for RB_BIN_CONTROL2... */
OUT_REG(ring, A6XX_RB_BIN_CONTROL2(.binw = w, .binh = h));
}
static void
emit_binning_pass(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
debug_assert(!batch->tessellation);
set_scissor(ring, 0, 0, gmem->width - 1, gmem->height - 1);
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
OUT_RING(ring, 0x1);
OUT_PKT7(ring, CP_SET_MODE, 1);
OUT_RING(ring, 0x1);
OUT_WFI5(ring);
OUT_REG(ring, A6XX_VFD_MODE_CNTL(.binning_pass = true));
update_vsc_pipe(batch);
OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
OUT_RING(ring, fd6_ctx->magic.PC_UNKNOWN_9805);
OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
OUT_RING(ring, fd6_ctx->magic.SP_UNKNOWN_A0F8);
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, UNK_2C);
OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
A6XX_RB_WINDOW_OFFSET_Y(0));
OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
A6XX_SP_TP_WINDOW_OFFSET_Y(0));
/* emit IB to binning drawcmds: */
fd_log(batch, "GMEM: START BINNING IB");
fd6_emit_ib(ring, batch->draw);
fd_log(batch, "GMEM: END BINNING IB");
fd_reset_wfi(batch);
OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
CP_SET_DRAW_STATE__0_GROUP_ID(0));
OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, UNK_2D);
fd6_cache_inv(batch, ring);
fd6_cache_flush(batch, ring);
fd_wfi(batch, ring);
OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
fd_log(batch, "START VSC OVERFLOW TEST");
emit_vsc_overflow_test(batch);
fd_log(batch, "END VSC OVERFLOW TEST");
OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
OUT_RING(ring, 0x0);
OUT_PKT7(ring, CP_SET_MODE, 1);
OUT_RING(ring, 0x0);
OUT_WFI5(ring);
OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
OUT_RING(ring, fd6_ctx->magic.RB_CCU_CNTL_gmem);
}
static void
emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
{
enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
}
static void prepare_tile_setup_ib(struct fd_batch *batch);
static void prepare_tile_fini_ib(struct fd_batch *batch);
/* before first tile */
static void
fd6_emit_tile_init(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
fd6_emit_restore(batch, ring);
fd6_emit_lrz_flush(ring);
if (batch->lrz_clear) {
fd_log(batch, "START LRZ CLEAR");
fd6_emit_ib(ring, batch->lrz_clear);
fd_log(batch, "END LRZ CLEAR");
}
fd6_cache_inv(batch, ring);
prepare_tile_setup_ib(batch);
prepare_tile_fini_ib(batch);
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
/* blob controls "local" in IB2, but I think that is not required */
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x1);
fd_wfi(batch, ring);
OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
OUT_RING(ring, fd6_context(ctx)->magic.RB_CCU_CNTL_gmem);
emit_zs(ring, pfb->zsbuf, batch->gmem_state);
emit_mrt(ring, pfb, batch->gmem_state);
emit_msaa(ring, pfb->samples);
patch_fb_read(batch);
if (use_hw_binning(batch)) {
/* enable stream-out during binning pass: */
OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
OUT_RING(ring, 0);
set_bin_size(ring, gmem->bin_w, gmem->bin_h,
A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
update_render_cntl(batch, pfb, true);
emit_binning_pass(batch);
/* and disable stream-out for draw pass: */
OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
OUT_RING(ring, A6XX_VPC_SO_OVERRIDE_SO_DISABLE);
/*
* NOTE: even if we detect VSC overflow and disable use of
* visibility stream in draw pass, it is still safe to execute
* the reset of these cmds:
*/
// NOTE a618 not setting .USE_VIZ .. from a quick check on a630, it
// does not appear that this bit changes much (ie. it isn't actually
// .USE_VIZ like previous gens)
set_bin_size(ring, gmem->bin_w, gmem->bin_h,
A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
OUT_RING(ring, 0x0);
OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
OUT_RING(ring, fd6_context(ctx)->magic.PC_UNKNOWN_9805);
OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
OUT_RING(ring, fd6_context(ctx)->magic.SP_UNKNOWN_A0F8);
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x1);
} else {
/* no binning pass, so enable stream-out for draw pass:: */
OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
OUT_RING(ring, 0);
set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
}
update_render_cntl(batch, pfb, false);
}
static void
set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
{
OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
A6XX_RB_WINDOW_OFFSET_Y(y1));
OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
A6XX_RB_WINDOW_OFFSET2_Y(y1));
OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
A6XX_SP_WINDOW_OFFSET_Y(y1));
OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
}
/* before mem2gmem */
static void
fd6_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
emit_marker6(ring, 7);
uint32_t x1 = tile->xoff;
uint32_t y1 = tile->yoff;
uint32_t x2 = tile->xoff + tile->bin_w - 1;
uint32_t y2 = tile->yoff + tile->bin_h - 1;
set_scissor(ring, x1, y1, x2, y2);
if (use_hw_binning(batch)) {
const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
OUT_PKT7(ring, CP_SET_MODE, 1);
OUT_RING(ring, 0x0);
OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
CP_SET_BIN_DATA5_0_VSC_N(tile->n));
OUT_RELOC(ring, fd6_ctx->vsc_draw_strm, /* per-pipe draw-stream address */
(tile->p * fd6_ctx->vsc_draw_strm_pitch), 0, 0);
OUT_RELOC(ring, fd6_ctx->vsc_draw_strm, /* VSC_DRAW_STRM_ADDRESS + (p * 4) */
(tile->p * 4) + (32 * fd6_ctx->vsc_draw_strm_pitch), 0, 0);
OUT_RELOC(ring, fd6_ctx->vsc_prim_strm,
(tile->p * fd6_ctx->vsc_prim_strm_pitch), 0, 0);
OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
OUT_RING(ring, 0x0);
set_window_offset(ring, x1, y1);
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
OUT_PKT7(ring, CP_SET_MODE, 1);
OUT_RING(ring, 0x0);
} else {
set_window_offset(ring, x1, y1);
OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
OUT_RING(ring, 0x1);
OUT_PKT7(ring, CP_SET_MODE, 1);
OUT_RING(ring, 0x0);
}
}
static void
set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
struct pipe_scissor_state blit_scissor = batch->max_scissor;
blit_scissor.minx = ROUND_DOWN_TO(blit_scissor.minx, 16);
blit_scissor.miny = ROUND_DOWN_TO(blit_scissor.miny, 4);
blit_scissor.maxx = ALIGN(blit_scissor.maxx, 16);
blit_scissor.maxy = ALIGN(blit_scissor.maxy, 4);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
OUT_RING(ring,
A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
OUT_RING(ring,
A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
}
static void
emit_blit(struct fd_batch *batch,
struct fd_ringbuffer *ring,
uint32_t base,
struct pipe_surface *psurf,
bool stencil)
{
struct fd_resource *rsc = fd_resource(psurf->texture);
enum pipe_format pfmt = psurf->format;
uint32_t offset;
bool ubwc_enabled;
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
/* separate stencil case: */
if (stencil) {
rsc = rsc->stencil;
pfmt = rsc->base.format;
}
offset = fd_resource_offset(rsc, psurf->u.tex.level,
psurf->u.tex.first_layer);
ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
enum a6xx_format format = fd6_pipe2color(pfmt);
uint32_t stride = fd_resource_pitch(rsc, psurf->u.tex.level);
uint32_t size = fd_resource_slice(rsc, psurf->u.tex.level)->size0;
enum a3xx_color_swap swap = fd6_resource_swap(rsc, pfmt);
enum a3xx_msaa_samples samples =
fd_msaa_samples(rsc->base.nr_samples);
uint32_t tile_mode = fd_resource_tile_mode(&rsc->base, psurf->u.tex.level);
OUT_REG(ring,
A6XX_RB_BLIT_DST_INFO(.tile_mode = tile_mode, .samples = samples,
.color_format = format, .color_swap = swap, .flags = ubwc_enabled),
A6XX_RB_BLIT_DST(.bo = rsc->bo, .bo_offset = offset),
A6XX_RB_BLIT_DST_PITCH(.a6xx_rb_blit_dst_pitch = stride),
A6XX_RB_BLIT_DST_ARRAY_PITCH(.a6xx_rb_blit_dst_array_pitch = size));
OUT_REG(ring, A6XX_RB_BLIT_BASE_GMEM(.dword = base));
if (ubwc_enabled) {
OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
fd6_emit_flag_reference(ring, rsc,
psurf->u.tex.level, psurf->u.tex.first_layer);
}
fd6_emit_blit(batch, ring);
}
static void
emit_restore_blit(struct fd_batch *batch,
struct fd_ringbuffer *ring,
uint32_t base,
struct pipe_surface *psurf,
unsigned buffer)
{
bool stencil = (buffer == FD_BUFFER_STENCIL);
OUT_REG(ring, A6XX_RB_BLIT_INFO(
.gmem = true, .unk0 = true,
.depth = (buffer == FD_BUFFER_DEPTH),
.integer = util_format_is_pure_integer(psurf->format)));
emit_blit(batch, ring, base, psurf, stencil);
}
static void
emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
uint32_t buffers = batch->fast_cleared;
if (buffers & PIPE_CLEAR_COLOR) {
for (int i = 0; i < pfb->nr_cbufs; i++) {
union pipe_color_union *color = &batch->clear_color[i];
union util_color uc = {0};
if (!pfb->cbufs[i])
continue;
if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
continue;
enum pipe_format pfmt = pfb->cbufs[i]->format;
// XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
union pipe_color_union swapped;
switch (fd6_pipe2swap(pfmt)) {
case WZYX:
swapped.ui[0] = color->ui[0];
swapped.ui[1] = color->ui[1];
swapped.ui[2] = color->ui[2];
swapped.ui[3] = color->ui[3];
break;
case WXYZ:
swapped.ui[2] = color->ui[0];
swapped.ui[1] = color->ui[1];
swapped.ui[0] = color->ui[2];
swapped.ui[3] = color->ui[3];
break;
case ZYXW:
swapped.ui[3] = color->ui[0];
swapped.ui[0] = color->ui[1];
swapped.ui[1] = color->ui[2];
swapped.ui[2] = color->ui[3];
break;
case XYZW:
swapped.ui[3] = color->ui[0];
swapped.ui[2] = color->ui[1];
swapped.ui[1] = color->ui[2];
swapped.ui[0] = color->ui[3];
break;
}
util_pack_color_union(pfmt, &uc, &swapped);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
OUT_RING(ring, gmem->cbuf_base[i]);
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
OUT_RING(ring, 0);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
OUT_RING(ring, uc.ui[0]);
OUT_RING(ring, uc.ui[1]);
OUT_RING(ring, uc.ui[2]);
OUT_RING(ring, uc.ui[3]);
fd6_emit_blit(batch, ring);
}
}
const bool has_depth = pfb->zsbuf;
const bool has_separate_stencil =
has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
/* First clear depth or combined depth/stencil. */
if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
(!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
enum pipe_format pfmt = pfb->zsbuf->format;
uint32_t clear_value;
uint32_t mask = 0;
if (has_separate_stencil) {
pfmt = util_format_get_depth_only(pfb->zsbuf->format);
clear_value = util_pack_z(pfmt, batch->clear_depth);
} else {
pfmt = pfb->zsbuf->format;
clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
batch->clear_stencil);
}
if (buffers & PIPE_CLEAR_DEPTH)
mask |= 0x1;
if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
mask |= 0x2;
OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
// XXX UNK0 for separate stencil ??
A6XX_RB_BLIT_INFO_DEPTH |
A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
OUT_RING(ring, gmem->zsbuf_base[0]);
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
OUT_RING(ring, 0);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
OUT_RING(ring, clear_value);
fd6_emit_blit(batch, ring);
}
/* Then clear the separate stencil buffer in case of 32 bit depth
* formats with separate stencil. */
if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(FMT6_8_UINT));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
//A6XX_RB_BLIT_INFO_UNK0 |
A6XX_RB_BLIT_INFO_DEPTH |
A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
OUT_RING(ring, gmem->zsbuf_base[1]);
OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
OUT_RING(ring, 0);
OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
OUT_RING(ring, batch->clear_stencil & 0xff);
fd6_emit_blit(batch, ring);
}
}
/*
* transfer from system memory to gmem
*/
static void
emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
if (batch->restore & FD_BUFFER_COLOR) {
unsigned i;
for (i = 0; i < pfb->nr_cbufs; i++) {
if (!pfb->cbufs[i])
continue;
if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
continue;
emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
FD_BUFFER_COLOR);
}
}
if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
FD_BUFFER_DEPTH);
}
if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
FD_BUFFER_STENCIL);
}
}
}
static void
prepare_tile_setup_ib(struct fd_batch *batch)
{
batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
FD_RINGBUFFER_STREAMING);
set_blit_scissor(batch, batch->tile_setup);
emit_restore_blits(batch, batch->tile_setup);
emit_clears(batch, batch->tile_setup);
}
/*
* transfer from system memory to gmem
*/
static void
fd6_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
}
/* before IB to rendering cmds: */
static void
fd6_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
fd_log(batch, "TILE: START CLEAR/RESTORE");
if (batch->fast_cleared || !use_hw_binning(batch)) {
fd6_emit_ib(batch->gmem, batch->tile_setup);
} else {
emit_conditional_ib(batch, tile, batch->tile_setup);
}
fd_log(batch, "TILE: END CLEAR/RESTORE");
}
static void
emit_resolve_blit(struct fd_batch *batch,
struct fd_ringbuffer *ring,
uint32_t base,
struct pipe_surface *psurf,
unsigned buffer)
{
uint32_t info = 0;
bool stencil = false;
if (!fd_resource(psurf->texture)->valid)
return;
switch (buffer) {
case FD_BUFFER_COLOR:
break;
case FD_BUFFER_STENCIL:
info |= A6XX_RB_BLIT_INFO_UNK0;
stencil = true;
break;
case FD_BUFFER_DEPTH:
info |= A6XX_RB_BLIT_INFO_DEPTH;
break;
}
if (util_format_is_pure_integer(psurf->format))
info |= A6XX_RB_BLIT_INFO_INTEGER;
OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
OUT_RING(ring, info);
emit_blit(batch, ring, base, psurf, stencil);
}
/*
* transfer from gmem to system memory (ie. normal RAM)
*/
static void
prepare_tile_fini_ib(struct fd_batch *batch)
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring;
batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
FD_RINGBUFFER_STREAMING);
ring = batch->tile_fini;
set_blit_scissor(batch, ring);
if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
emit_resolve_blit(batch, ring,
gmem->zsbuf_base[0], pfb->zsbuf,
FD_BUFFER_DEPTH);
}
if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
emit_resolve_blit(batch, ring,
gmem->zsbuf_base[1], pfb->zsbuf,
FD_BUFFER_STENCIL);
}
}
if (batch->resolve & FD_BUFFER_COLOR) {
unsigned i;
for (i = 0; i < pfb->nr_cbufs; i++) {
if (!pfb->cbufs[i])
continue;
if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
continue;
emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
FD_BUFFER_COLOR);
}
}
}
static void
fd6_emit_tile(struct fd_batch *batch, const struct fd_tile *tile)
{
if (!use_hw_binning(batch)) {
fd6_emit_ib(batch->gmem, batch->draw);
} else {
emit_conditional_ib(batch, tile, batch->draw);
}
}
static void
fd6_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
if (use_hw_binning(batch)) {
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
}
OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
CP_SET_DRAW_STATE__0_GROUP_ID(0));
OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x0);
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
emit_marker6(ring, 7);
fd_log(batch, "TILE: START RESOLVE");
if (batch->fast_cleared || !use_hw_binning(batch)) {
fd6_emit_ib(batch->gmem, batch->tile_fini);
} else {
emit_conditional_ib(batch, tile, batch->tile_fini);
}
fd_log(batch, "TILE: END RESOLVE");
}
static void
fd6_emit_tile_fini(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
if (batch->epilogue)
fd6_emit_ib(batch->gmem, batch->epilogue);
OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE);
fd6_emit_lrz_flush(ring);
fd6_event_write(batch, ring, PC_CCU_RESOLVE_TS, true);
if (use_hw_binning(batch)) {
check_vsc_overflow(batch->ctx);
}
}
static void
emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t buffers = batch->fast_cleared;
if (buffers & PIPE_CLEAR_COLOR) {
for (int i = 0; i < pfb->nr_cbufs; i++) {
union pipe_color_union *color = &batch->clear_color[i];
if (!pfb->cbufs[i])
continue;
if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
continue;
fd6_clear_surface(ctx, ring,
pfb->cbufs[i], pfb->width, pfb->height, color);
}
}
if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
union pipe_color_union value = {};
const bool has_depth = pfb->zsbuf;
struct pipe_resource *separate_stencil =
has_depth && fd_resource(pfb->zsbuf->texture)->stencil ?
&fd_resource(pfb->zsbuf->texture)->stencil->base : NULL;
if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
(!separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
value.f[0] = batch->clear_depth;
value.ui[1] = batch->clear_stencil;
fd6_clear_surface(ctx, ring,
pfb->zsbuf, pfb->width, pfb->height, &value);
}
if (separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
value.ui[0] = batch->clear_stencil;
struct pipe_surface stencil_surf = *pfb->zsbuf;
stencil_surf.format = PIPE_FORMAT_S8_UINT;
stencil_surf.texture = separate_stencil;
fd6_clear_surface(ctx, ring,
&stencil_surf, pfb->width, pfb->height, &value);
}
}
fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
}
static void
setup_tess_buffers(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
struct fd_context *ctx = batch->ctx;
batch->tessfactor_bo = fd_bo_new(ctx->screen->dev,
batch->tessfactor_size,
DRM_FREEDRENO_GEM_TYPE_KMEM, "tessfactor");
batch->tessparam_bo = fd_bo_new(ctx->screen->dev,
batch->tessparam_size,
DRM_FREEDRENO_GEM_TYPE_KMEM, "tessparam");
OUT_PKT4(ring, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
OUT_RELOC(ring, batch->tessfactor_bo, 0, 0, 0);
batch->tess_addrs_constobj->cur = batch->tess_addrs_constobj->start;
OUT_RELOC(batch->tess_addrs_constobj, batch->tessparam_bo, 0, 0, 0);
OUT_RELOC(batch->tess_addrs_constobj, batch->tessfactor_bo, 0, 0, 0);
}
static void
fd6_emit_sysmem_prep(struct fd_batch *batch)
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
fd6_emit_restore(batch, ring);
if (pfb->width > 0 && pfb->height > 0)
set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
else
set_scissor(ring, 0, 0, 0, 0);
set_window_offset(ring, 0, 0);
set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
emit_sysmem_clears(batch, ring);
fd6_emit_lrz_flush(ring);
if (batch->lrz_clear)
fd6_emit_ib(ring, batch->lrz_clear);
emit_marker6(ring, 7);
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
emit_marker6(ring, 7);
if (batch->tessellation)
setup_tess_buffers(batch, ring);
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
/* blob controls "local" in IB2, but I think that is not required */
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x1);
fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
fd6_cache_inv(batch, ring);
fd_wfi(batch, ring);
OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
OUT_RING(ring, fd6_context(batch->ctx)->magic.RB_CCU_CNTL_bypass);
/* enable stream-out, with sysmem there is only one pass: */
OUT_PKT4(ring, REG_A6XX_VPC_SO_OVERRIDE, 1);
OUT_RING(ring, 0);
OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
OUT_RING(ring, 0x1);
emit_zs(ring, pfb->zsbuf, NULL);
emit_mrt(ring, pfb, NULL);
emit_msaa(ring, pfb->samples);
update_render_cntl(batch, pfb, false);
}
static void
fd6_emit_sysmem_fini(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
if (batch->epilogue)
fd6_emit_ib(batch->gmem, batch->epilogue);
OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x0);
fd6_emit_lrz_flush(ring);
fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
}
void
fd6_gmem_init(struct pipe_context *pctx)
{
struct fd_context *ctx = fd_context(pctx);
ctx->emit_tile_init = fd6_emit_tile_init;
ctx->emit_tile_prep = fd6_emit_tile_prep;
ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
ctx->emit_tile = fd6_emit_tile;
ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
ctx->emit_tile_fini = fd6_emit_tile_fini;
ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
}