blob: ed2b01ad5cd253de03536e19de0b8ef812dc7b76 [file] [log] [blame]
// Copyright (C) 2016 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gles
import (
"bytes"
"fmt"
"reflect"
"android.googlesource.com/platform/tools/gpu/framework/binary/cyclic"
"android.googlesource.com/platform/tools/gpu/framework/binary/vle"
"android.googlesource.com/platform/tools/gpu/framework/device"
"android.googlesource.com/platform/tools/gpu/framework/interval"
"android.googlesource.com/platform/tools/gpu/framework/log"
"android.googlesource.com/platform/tools/gpu/framework/shadertools"
"android.googlesource.com/platform/tools/gpu/gapid/atom"
"android.googlesource.com/platform/tools/gpu/gapid/config"
"android.googlesource.com/platform/tools/gpu/gapid/database"
"android.googlesource.com/platform/tools/gpu/gapid/gfxapi"
"android.googlesource.com/platform/tools/gpu/gapid/gfxapi/gles/glsl/ast"
"android.googlesource.com/platform/tools/gpu/gapid/gfxapi/state"
"android.googlesource.com/platform/tools/gpu/gapid/memory"
"android.googlesource.com/platform/tools/gpu/gapid/replay"
"android.googlesource.com/platform/tools/gpu/gapid/replay/builder"
)
type support int
const (
unsupported support = iota
supported
required
)
var (
// We don't include tests directly in the gles package as it adds
// signaficantly to the test build time.
VisibleForTestingCompat = compat
VisibleForTestingGlSlCompat = glslCompat
)
// If the default vertex array object (id 0) is not allowed on
// the target platform, we remap the uses to this array.
const DefaultVertexArrayId = VertexArrayId(0xFFFF0001)
type extensions map[string]struct{}
func listToExtensions(list []string) extensions {
out := extensions{}
for _, s := range list {
out[s] = struct{}{}
}
return out
}
func translateExtensions(in U32ːstringᵐ) extensions {
out := make(extensions, len(in))
for _, s := range in {
out[s] = struct{}{}
}
return out
}
func (e extensions) get(name string) support {
if _, ok := e[name]; ok {
return supported
}
return unsupported
}
func (s support) String() string {
switch s {
case unsupported:
return "unsupported"
case supported:
return "supported"
case required:
return "required"
default:
return fmt.Sprintf("support<%d>", s)
}
}
type features struct {
vertexHalfFloatOES support // support for GL_OES_vertex_half_float
eglImageExternal support // support for GL_OES_EGL_image_external
vertexArrayObjects support // support for VBOs
supportGenerateMipmapHint bool // support for GL_GENERATE_MIPMAP_HINT
compressedTextureFormats map[GLenum]struct{}
framebufferSrgb support // support for GL_FRAMEBUFFER_SRGB
}
func getFeatures(ctx log.Context, version string, ext extensions) (features, *Version, error) {
v, err := ParseVersion(version)
if err != nil {
return features{}, v, err
}
f := features{
vertexHalfFloatOES: ext.get("GL_OES_vertex_half_float"),
eglImageExternal: ext.get("GL_OES_EGL_image_external"),
compressedTextureFormats: getSupportedCompressedTextureFormats(ext),
supportGenerateMipmapHint: v.IsES,
}
// TODO: Properly check the specifications for these flags.
switch {
case v.IsES && v.Major >= 3:
f.vertexArrayObjects = supported
case !v.IsES && v.Major >= 3:
f.vertexArrayObjects = required
}
// GLES defaults FRAMEBUFFER_SRGB to enabled and only allows changing it via
// an extension, while desktop GL defaults to disabled.
if v.IsES {
f.framebufferSrgb = ext.get("GL_EXT_sRGB_write_control")
} else {
f.framebufferSrgb = required
}
return f, v, nil
}
type scratchBuffer struct {
size GLsizeiptr
id BufferId
}
func cloneAtom(a atom.Atom) atom.Atom {
var b bytes.Buffer
cyclic.Encoder(vle.Writer(&b)).Variant(a)
return (cyclic.Decoder(vle.Reader(&b)).Variant()).(atom.Atom)
}
func compat(ctx log.Context, device *device.Information, d database.Database) (atom.Transformer, error) {
ctx = ctx.Enter("compat")
glDev := device.Configuration.GraphicsDrivers.OpenGL
target, version, err := getFeatures(ctx, glDev.Version, listToExtensions(glDev.Extensions))
if err != nil {
return nil, fmt.Errorf(
"Error '%v' when getting feature list for version: '%s', extensions: '%s'.",
err, glDev.Version, glDev.Extensions)
}
contexts := map[*Context]features{}
s := state.New(ctx)
scratchBuffers := map[interface{}]scratchBuffer{}
nextBufferID := BufferId(0xffff0000)
newBuffer := func(out atom.Writer) BufferId {
id := nextBufferID
tmp := atom.Must(atom.AllocData(ctx, s, d, id))
out.Write(ctx, atom.NoID, NewGlGenBuffers(1, tmp.Ptr()).AddWrite(tmp.Data()))
nextBufferID--
return id
}
textureCompat := &textureCompat{
f: target,
v: version,
ctx: ctx,
s: s,
d: d,
textureCompatSwizzle: map[*Texture]map[GLenum]GLenum{},
}
var t atom.Transformer
t = atom.Transform("compat", func(ctx log.Context, i atom.ID, a atom.Atom, out atom.Writer) {
switch a := a.(type) {
case *EglMakeCurrent: // TODO: Check for GLX, CGL, WGL...
// The compatibility layer introduces calls to GL functions that are defined for desktop GL
// and for GLES 3.0+. If the trace originated on a GLES 2.0 device, these new atoms' mutate
// functions will fail the minRequiredVersion checks (which look at the version coming from
// the original context from the trace).
// TODO(dsrbecky): This might make some atoms valid for replay which were invalid on trace.
scs := FindStaticContextState(a.Extras())
if scs != nil && !version.IsES && scs.Constants.MajorVersion < 3 {
a = cloneAtom(a).(*EglMakeCurrent)
scs = FindStaticContextState(a.Extras())
scs.Constants.MajorVersion = 3
scs.Constants.MinorVersion = 0
}
// Mutate to set the context, Version and Extensions strings.
mutateAndWrite(ctx, i, a, s, d, out)
c := GetContext(s)
if c == nil || !c.Info.Initialized {
return
}
if _, found := contexts[c]; found {
return
}
source, _, err := getFeatures(ctx, c.Constants.Version, translateExtensions(c.Constants.Extensions))
if err != nil {
ctx.Error().V("version", c.Constants.Version).V("extensions", c.Constants.Extensions).
Fail(err, "Getting feature list.")
return
}
contexts[c] = source
if target.vertexArrayObjects == required &&
source.vertexArrayObjects != required {
// Replay device requires VAO, but capture did not enforce it.
// Satisfy the target by creating and binding a single VAO
// which we will use instead of the default VAO (id 0).
tmp := atom.Must(atom.AllocData(ctx, s, d, VertexArrayId(DefaultVertexArrayId)))
out.Write(ctx, atom.NoID, NewGlGenVertexArrays(1, tmp.Ptr()).AddWrite(tmp.Data()))
out.Write(ctx, atom.NoID, NewGlBindVertexArray(DefaultVertexArrayId))
}
return
}
c := GetContext(s)
if c == nil {
// The compatibility translations below assume that we have a valid context.
mutateAndWrite(ctx, i, a, s, d, out)
return
}
switch a := a.(type) {
case *GlBindBuffer:
if a.Buffer != 0 && !c.Instances.Buffers.Contains(a.Buffer) {
// glGenBuffers() was not used to generate the buffer. Legal in GLES 2.
tmp := atom.Must(atom.AllocData(ctx, s, d, a.Buffer))
out.Write(ctx, atom.NoID, NewGlGenBuffers(1, tmp.Ptr()).AddRead(tmp.Data()))
}
case *GlBindTexture:
if !c.Instances.Textures.Contains(a.Texture) {
// glGenTextures() was not used to generate the texture. Legal in GLES 2.
tmp := atom.Must(atom.AllocData(ctx, s, d, VertexArrayId(a.Texture)))
out.Write(ctx, atom.NoID, NewGlGenTextures(1, tmp.Ptr()).AddRead(tmp.Data()))
}
if a.Target == GLenum_GL_TEXTURE_EXTERNAL_OES && target.eglImageExternal == unsupported {
// TODO: Implement full support for external images.
// Remap external textures to plain 2D textures - this matches GLSL compat.
out.Write(ctx, atom.NoID, NewGlBindTexture(GLenum_GL_TEXTURE_2D, a.Texture))
return
}
case *GlBindVertexArray:
if a.Array == VertexArrayId(0) {
if target.vertexArrayObjects == required &&
contexts[c].vertexArrayObjects != required {
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
out.Write(ctx, atom.NoID, NewGlBindVertexArray(DefaultVertexArrayId))
return
}
}
case *GlBindVertexArrayOES:
if a.Array == VertexArrayId(0) {
if target.vertexArrayObjects == required &&
contexts[c].vertexArrayObjects != required {
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
out.Write(ctx, atom.NoID, NewGlBindVertexArray(DefaultVertexArrayId))
return
}
}
case *GlBindBufferRange:
misalignment := a.Offset % GLintptr(glDev.UniformBufferAlignment)
if a.Target == GLenum_GL_UNIFORM_BUFFER && misalignment != 0 {
// We have a glBindBufferRange() taking a uniform buffer with an
// illegal offset alignment.
// TODO: We don't handle the case where the buffer is kept bound
// while the buffer is updated. It's an unlikely issue, but
// something that may break us.
if _, ok := c.Instances.Buffers[a.Buffer]; !ok {
return // Don't know what buffer this is referring to.
}
// We need a scratch buffer to copy the buffer data to a correct
// alignment.
key := struct {
c *Context
Target GLenum
Index GLuint
}{c, a.Target, a.Index}
// Look for pre-existing buffer we can reuse.
buffer, ok := scratchBuffers[key]
if !ok {
buffer.id = newBuffer(out)
scratchBuffers[key] = buffer
}
// Bind the scratch buffer to GL_COPY_WRITE_BUFFER
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_COPY_WRITE_BUFFER, buffer.id))
if buffer.size < a.Size {
// Resize the scratch buffer
out.Write(ctx, atom.NoID, NewGlBufferData(GLenum_GL_COPY_WRITE_BUFFER, a.Size, memory.Nullptr, GLenum_GL_DYNAMIC_COPY))
buffer.size = a.Size
scratchBuffers[key] = buffer
}
// Copy out the misaligned data to the scratch buffer in the
// GL_COPY_WRITE_BUFFER binding.
out.Write(ctx, atom.NoID, NewGlBindBuffer(a.Target, a.Buffer))
out.Write(ctx, atom.NoID, NewGlCopyBufferSubData(a.Target, GLenum_GL_COPY_WRITE_BUFFER, a.Offset, 0, a.Size))
// We can now bind the range with correct alignment.
out.Write(ctx, i, NewGlBindBufferRange(a.Target, a.Index, buffer.id, 0, a.Size))
// Restore old GL_COPY_WRITE_BUFFER binding.
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_COPY_WRITE_BUFFER, c.BoundBuffers.CopyWriteBuffer))
return
}
case *GlDisableVertexAttribArray:
vao := c.Instances.VertexArrays[c.BoundVertexArray]
if vao.VertexAttributeArrays[a.Location].Enabled == GLboolean_GL_FALSE {
// Ignore the call if it is redundant (i.e. it is already disabled).
// Some applications iterate over all arrays and explicitly disable them.
// This is a problem if the target supports fewer arrays than the capture.
return
}
case *GlVertexAttrib4fv:
if oldAttrib, ok := c.VertexAttributes[a.Location]; ok {
oldValue := oldAttrib.Value.Read(ctx, a, s, d, nil /* builder */)
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
newAttrib := c.VertexAttributes[a.Location]
newValue := newAttrib.Value.Read(ctx, a, s, d, nil /* builder */)
if reflect.DeepEqual(oldValue, newValue) {
// Ignore the call if it is redundant.
// Some applications iterate over all arrays and explicitly initialize them.
// This is a problem if the target supports fewer arrays than the capture.
return
}
}
out.Write(ctx, i, a)
return
case *GlGetVertexAttribIiv,
*GlGetVertexAttribIuiv,
*GlGetVertexAttribPointerv,
*GlGetVertexAttribfv,
*GlGetVertexAttribiv:
// Some applications iterate over all arrays and query their state.
// This may fail if the target supports fewer arrays than the capture.
// As these should have no side-effects, just drop them.
return
case *GlShaderSource:
// Apply the state mutation of the unmodified glShaderSource atom.
// This is so we can grab the source string from the Shader object.
if err := a.Mutate(ctx, s, d, nil /* no builder, just mutate */); err != nil {
return
}
shader := c.Instances.Shaders.Get(a.Shader)
src := ""
if config.UseGlslang {
opts := shadertools.Option{
IsFragmentShader: shader.Type == GLenum_GL_FRAGMENT_SHADER,
IsVertexShader: shader.Type == GLenum_GL_VERTEX_SHADER,
}
res := shadertools.ConvertGlsl(shader.Source, &opts)
if !res.Ok {
ctx.Error().V("id", i).Logf("Failed to translate GLSL:\n%s\nSource:%s\n", res.Message, shader.Source)
return
}
src = res.SourceCode
} else {
lang := ast.LangVertexShader
switch shader.Type {
case GLenum_GL_VERTEX_SHADER:
case GLenum_GL_FRAGMENT_SHADER:
lang = ast.LangFragmentShader
default:
ctx.Warning().V("type", shader.Type).Log("Unknown shader type")
}
src, err = glslCompat(ctx, shader.Source, lang, device, d)
if err != nil {
ctx.Error().V("id", i).Fail(err, "Reformatting GLSL source for atom")
}
}
tmpSrc := atom.Must(atom.AllocData(ctx, s, d, src))
tmpPtrToSrc := atom.Must(atom.AllocData(ctx, s, d, tmpSrc.Ptr()))
a = NewGlShaderSource(a.Shader, 1, tmpPtrToSrc.Ptr(), memory.Nullptr).
AddRead(tmpSrc.Data()).
AddRead(tmpPtrToSrc.Data())
mutateAndWrite(ctx, i, a, s, d, out)
return
// TODO: glVertexAttribIPointer
case *GlVertexAttribPointer:
if a.Type == GLenum_GL_HALF_FLOAT_OES && target.vertexHalfFloatOES == unsupported {
// Convert GL_HALF_FLOAT_OES to GL_HALF_FLOAT_ARB.
a = NewGlVertexAttribPointer(a.Location, a.Size, GLenum_GL_HALF_FLOAT_ARB, a.Normalized, a.Stride, memory.Pointer(a.Data))
}
if target.vertexArrayObjects == required &&
c.BoundBuffers.ArrayBuffer == 0 {
// Client-pointers are not supported, we need to copy this data to a buffer.
// However, we can't do this now as the observation only happens at the draw call.
// Apply the state changes, but don't write the emit the atom - we need to defer
// the trickery to the draw call.
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
return
}
mutateAndWrite(ctx, i, a, s, d, out)
return
case *GlDrawArrays:
if target.vertexArrayObjects == required {
if clientVAsBound(c) {
first := int(a.FirstIndex)
last := first + int(a.IndicesCount) - 1
defer moveClientVBsToVAs(ctx, first, last, i, a, s, c, d, out)()
}
}
case *GlDrawElements:
if target.vertexArrayObjects == required {
e := externs{ctx: ctx, a: a, s: s, d: d}
ib := c.Instances.VertexArrays[c.BoundVertexArray].ElementArrayBuffer
clientIB := ib == 0
clientVB := clientVAsBound(c)
if clientIB {
// The indices for the glDrawElements call is in client memory.
// We need to move this into a temporary buffer.
// Generate a new element array buffer and bind it.
id := BufferId(newUnusedID(func(x uint32) bool { _, ok := c.Instances.Buffers[BufferId(x)]; return ok }))
c.Instances.Buffers[id] = &Buffer{} // Not used aside from reserving the ID.
tmp := atom.Must(atom.AllocData(ctx, s, d, id))
out.Write(ctx, atom.NoID, NewGlGenBuffers(1, tmp.Ptr()).AddRead(tmp.Data()))
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ELEMENT_ARRAY_BUFFER, id))
// By moving the draw call's observations earlier, populate the element array buffer.
size, base := DataTypeSize(a.IndicesType)*int(a.IndicesCount), memory.Pointer(a.Indices)
glBufferData := NewGlBufferData(GLenum_GL_ELEMENT_ARRAY_BUFFER, GLsizeiptr(size), memory.Pointer(base), GLenum_GL_STATIC_DRAW)
glBufferData.extras = a.extras
out.Write(ctx, atom.NoID, glBufferData)
// Clean-up
defer func() {
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ELEMENT_ARRAY_BUFFER, ib))
delete(c.Instances.Buffers, id)
}()
if clientVB {
// Some of the vertex arrays for the glDrawElements call is in
// client memory and we need to move this into temporary buffer(s).
// The indices are also in client memory, so we need to apply the
// atom's reads now so that the indices can be read from the
// application pool.
a.Extras().Observations().ApplyReads(s.Memory[memory.ApplicationPool])
limits := e.calcIndexLimits(U8ᵖ(a.Indices), a.IndicesType, 0, uint32(a.IndicesCount))
defer moveClientVBsToVAs(ctx, int(limits.Min), int(limits.Max), i, a, s, c, d, out)()
}
glDrawElements := *a
glDrawElements.Indices.Address = 0
glDrawElements.Mutate(ctx, s, d, nil /* no builder, just mutate */)
out.Write(ctx, i, &glDrawElements)
return
} else if clientVB { // GL_ELEMENT_ARRAY_BUFFER is bound
// Some of the vertex arrays for the glDrawElements call is in
// client memory and we need to move this into temporary buffer(s).
// The indices are server-side, so can just be read from the internal
// pooled buffer.
data := c.Instances.Buffers[ib].Data.Index(0, s)
base := uint32(a.Indices.Address)
limits := e.calcIndexLimits(data, a.IndicesType, base, uint32(a.IndicesCount))
defer moveClientVBsToVAs(ctx, int(limits.Min), int(limits.Max), i, a, s, c, d, out)()
}
}
case *GlCompressedTexImage2D:
if _, supported := target.compressedTextureFormats[a.Format]; !supported {
if err := decompressTexImage2D(ctx, i, a, s, d, out); err == nil {
return
}
ctx.Fail(err, "Decompressing texture")
}
case *GlCompressedTexSubImage2D:
if _, supported := target.compressedTextureFormats[a.Format]; !supported {
if err := decompressTexSubImage2D(ctx, i, a, s, d, out); err == nil {
return
}
ctx.Fail(err, "Decompressing texture")
}
// TODO: glTexStorage functions are not guaranteed to be supported. Consider replacing with glTexImage calls.
// TODO: Handle glTextureStorage family of functions - those use direct state access, not the bound texture.
case *GlTexStorage1DEXT:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
if !version.IsES { // Strip suffix on desktop.
a := NewGlTexStorage1D(a.Target, a.Levels, a.Internalformat, a.Width)
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage2D:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage2DEXT:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
if !version.IsES { // Strip suffix on desktop.
a := NewGlTexStorage2D(a.Target, a.Levels, a.Internalformat, a.Width, a.Height)
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage2DMultisample:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage3D:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage3DEXT:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
if !version.IsES { // Strip suffix on desktop.
a := NewGlTexStorage3D(a.Target, a.Levels, a.Internalformat, a.Width, a.Height, a.Depth)
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage3DMultisample:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexStorage3DMultisampleOES:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
if !version.IsES { // Strip suffix on desktop.
a := NewGlTexStorage3DMultisample(a.Target, a.Samples, a.Internalformat, a.Width, a.Height, a.Depth, a.Fixedsamplelocations)
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexImage2D:
{
a := *a
internalformat := GLenum(a.Internalformat)
textureCompat.convertFormat(a.Target, &internalformat, &a.Format, &a.Type, out)
a.Internalformat = GLint(internalformat)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexImage3D:
{
a := *a
internalformat := GLenum(a.Internalformat)
textureCompat.convertFormat(a.Target, &internalformat, &a.Format, &a.Type, out)
a.Internalformat = GLint(internalformat)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexImage3DOES:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, &a.Format, &a.Type, out)
if !version.IsES { // Strip suffix on desktop.
extras := a.extras
a := NewGlTexImage3D(a.Target, a.Level, GLint(a.Internalformat), a.Width, a.Height, a.Depth, a.Border, a.Format, a.Type, memory.Pointer(a.Pixels))
a.extras = extras
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexSubImage2D:
{
a := *a
textureCompat.convertFormat(a.Target, nil, &a.Format, &a.Type, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexSubImage3D:
{
a := *a
textureCompat.convertFormat(a.Target, nil, &a.Format, &a.Type, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexSubImage3DOES:
{
a := *a
textureCompat.convertFormat(a.Target, nil, &a.Format, &a.Type, out)
if !version.IsES { // Strip suffix on desktop.
extras := a.extras
a := NewGlTexSubImage3D(a.Target, a.Level, a.Xoffset, a.Yoffset, a.Zoffset, a.Width, a.Height, a.Depth, a.Format, a.Type, memory.Pointer(a.Pixels))
a.extras = extras
mutateAndWrite(ctx, i, a, s, d, out)
return
}
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlCopyTexImage2D:
{
a := *a
textureCompat.convertFormat(a.Target, &a.Internalformat, nil, nil, out)
mutateAndWrite(ctx, i, &a, s, d, out)
return
}
case *GlTexParameterIivOES:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterIuivOES:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterIiv:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterIuiv:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterf:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Parameter, out)
return
case *GlTexParameterfv:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameteri:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Parameter, out)
return
case *GlTexParameteriv:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterIivEXT:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlTexParameterIuivEXT:
mutateAndWrite(ctx, i, a, s, d, out)
textureCompat.postTexParameter(a.Target, a.Pname, out)
return
case *GlProgramBinary:
if !canUsePrecompiledShader(c, glDev) {
for _, a := range buildStubProgram(ctx, a.Extras(), s, d, a.Program) {
mutateAndWrite(ctx, atom.NoID, a, s, d, atom.TransformWriter{T: t, O: out})
}
return
}
case *GlProgramBinaryOES:
if !canUsePrecompiledShader(c, glDev) {
for _, a := range buildStubProgram(ctx, a.Extras(), s, d, a.Program) {
mutateAndWrite(ctx, atom.NoID, a, s, d, atom.TransformWriter{T: t, O: out})
}
return
}
case *GlHint:
if a.Target == GLenum_GL_GENERATE_MIPMAP_HINT && !target.supportGenerateMipmapHint {
return // Not supported in the core profile of OpenGL.
}
case *GlGetBooleani_v,
*GlGetBooleanv,
*GlGetFloatv,
*GlGetInteger64i_v,
*GlGetInteger64v,
*GlGetIntegeri_v,
*GlGetIntegerv,
*GlGetInternalformativ,
*GlGetString,
*GlGetStringi:
// The acceptable values of these get functions vary between GL versions.
// As these should have no side-effects, just drop them.
return
case *GlGetActiveAttrib,
*GlGetActiveUniform:
// The number of active attributes and uniforms can vary between compilers
// depending on their ability to eliminate dead code. In particular,
// dead code in pixel shader can allow code removal in the vertex shader.
// As these should have no side-effects, just drop them.
return
case *GlLabelObjectEXT,
*GlGetObjectLabelEXT,
*GlObjectLabel,
*GlObjectLabelKHR,
*GlGetObjectLabel,
*GlObjectPtrLabel,
*GlGetObjectPtrLabel,
*GlGetObjectLabelKHR:
// These methods require non-trivial remapping for replay.
// As they do not affect rendering output, just drop them.
return
case *GlInsertEventMarkerEXT,
*GlPushGroupMarkerEXT,
*GlPopGroupMarkerEXT:
// GL_EXT_debug_marker may not be supported on the replay device.
// As they do not affect rendering output, just drop them.
return
case *GlGetProgramBinary,
*GlGetProgramBinaryOES:
// Program binaries are very driver specific. This command may fail on replay
// because one of the arguments must be GL_PROGRAM_BINARY_LENGTH.
// It has no side effects, so just drop it.
return
case *GlEnable:
if a.Capability == GLenum_GL_FRAMEBUFFER_SRGB &&
target.framebufferSrgb == required && contexts[c].framebufferSrgb != required &&
c.BoundFramebuffers[GLenum_GL_DRAW_FRAMEBUFFER] == 0 {
// Ignore enabling of FRAMEBUFFER_SRGB if the capture device did not
// support an SRGB default framebuffer, but the replay device does. This
// is only done if the current bound draw framebuffer is the default
// framebuffer. The state is mutated so that when a non-default
// framebuffer is bound later on, FRAMEBUFFER_SRGB will be enabled.
// (see GlBindFramebuffer below)
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
return
}
case *GlDisable:
// GL_QCOM_alpha_test adds back GL_ALPHA_TEST from GLES 1.0 as extension.
// It seems that applications only disable it to make sure it is off, so
// we can safely ignore it. We should not ignore glEnable for it though.
if a.Capability == GLenum_GL_ALPHA_TEST_QCOM {
return
}
case *GlGetGraphicsResetStatusEXT:
// From extension GL_EXT_robustness
// It may not be implemented by the replay driver.
// It has no effect on rendering so just drop it.
return
case *GlInvalidateFramebuffer,
*GlDiscardFramebufferEXT: // GL_EXT_discard_framebuffer
// It may not be implemented by the replay driver.
// It is only a hint so we can just drop it.
// TODO: It has performance impact so we should not ignore it when profiling.
return
case *GlMapBufferOES:
if !version.IsES { // Remove extension suffix on desktop.
a := NewGlMapBuffer(a.Target, a.Access, memory.Pointer(a.Result))
mutateAndWrite(ctx, i, a, s, d, out)
return
}
case *GlMapBufferRangeEXT:
if !version.IsES { // Remove extension suffix on desktop.
a := NewGlMapBufferRange(a.Target, a.Offset, a.Length, a.Access, memory.Pointer(a.Result))
mutateAndWrite(ctx, i, a, s, d, out)
return
}
case *GlFlushMappedBufferRangeEXT:
if !version.IsES { // Remove extension suffix on desktop.
extras := a.extras
a := NewGlFlushMappedBufferRange(a.Target, a.Offset, a.Length)
a.extras = extras
mutateAndWrite(ctx, i, a, s, d, out)
return
}
case *GlUnmapBufferOES:
if !version.IsES { // Remove extension suffix on desktop.
extras := a.extras
a := NewGlUnmapBuffer(a.Target, a.Result)
a.extras = extras
mutateAndWrite(ctx, i, a, s, d, out)
return
}
case *GlBindFramebuffer:
if target.framebufferSrgb == required && contexts[c].framebufferSrgb != required &&
c.FragmentOperations.FramebufferSrgb != 0 {
// Replay device defaults FRAMEBUFFER_SRGB to disabled and allows
// enabling it (desktop), while the capture device defaulted to enabled
// and may or may not have allowed it to be changed (GLES). While at the
// same time, we currently assume that the default frame buffer is not
// SRGB capable. Thus, when SRGB is enabled in the state, and we're
// binding the default framebuffer, SRGB needs to be disabled, and
// specifically enabled when binding the non-default framebuffer.
// (If it was explicetly disabled in the capture, no change is needed.)
// TODO: Handle the use of the EGL KHR_gl_colorspace extension.
if a.Target == GLenum_GL_FRAMEBUFFER || a.Target == GLenum_GL_DRAW_FRAMEBUFFER {
if a.Framebuffer == 0 {
out.Write(ctx, atom.NoID, NewGlDisable(GLenum_GL_FRAMEBUFFER_SRGB))
} else {
out.Write(ctx, atom.NoID, NewGlEnable(GLenum_GL_FRAMEBUFFER_SRGB))
}
}
}
default:
if a.AtomFlags().IsDrawCall() && clientVAsBound(c) {
ctx.Warning().T("atom", a).Log("Draw call with client-pointers not handled by the compatability layer")
}
}
mutateAndWrite(ctx, i, a, s, d, out)
})
return t, nil
}
func mutateAndWrite(ctx log.Context, i atom.ID, a atom.Atom, s *gfxapi.State, d database.Database, out atom.Writer) {
a.Mutate(ctx, s, d, nil /* no builder, just mutate */)
out.Write(ctx, i, a)
}
// canUsePrecompiledShader returns true if precompiled shaders / programs
// captured with the context c can be replayed on the device d.
func canUsePrecompiledShader(c *Context, d *device.OpenGLDriver) bool {
return c.Constants.Vendor == d.Vendor && c.Constants.Version == d.Version
}
// clientVAsBound returns true if there are any vertex attribute arrays enabled
// with pointers to client-side memory.
func clientVAsBound(c *Context) bool {
// Only the default vertex array can use client-side memory.
if c.BoundVertexArray == 0 {
va := c.Instances.VertexArrays[c.BoundVertexArray]
for _, arr := range va.VertexAttributeArrays {
if arr.Enabled == GLboolean_GL_TRUE {
vb := va.VertexBufferBindings[arr.Binding]
if vb.Buffer == 0 && arr.Pointer.Address != 0 {
return true
}
}
}
}
return false
}
// moveClientVBsToVAs is a compatability helper for transforming client-side
// vertex array data (which is not supported by glVertexAttribPointer in later
// versions of GL), into array-buffers.
func moveClientVBsToVAs(
ctx log.Context,
first, last int, // vertex indices
i atom.ID,
a atom.Atom,
s *gfxapi.State,
c *Context,
d database.Database,
out atom.Writer) (revert func()) {
rngs := interval.U64RangeList{}
// Gather together all the client-buffers in use by the vertex-attribs.
// Merge together all the memory intervals that these use.
va := c.Instances.VertexArrays[c.BoundVertexArray]
for _, arr := range va.VertexAttributeArrays {
if arr.Enabled == GLboolean_GL_TRUE {
vb := va.VertexBufferBindings[arr.Binding]
if vb.Buffer == 0 && arr.Pointer.Address != 0 {
// TODO: We're currently ignoring the Offset and Stride fields of the VBB.
// TODO: We're currently ignoring the RelativeOffset field of the VA.
// TODO: Merge logic with ReadVertexArrays macro in vertex_arrays.api.
if vb.Divisor != 0 {
panic("Instanced draw calls not currently supported by the compatibility layer")
}
size := DataTypeSize(arr.Type) * int(arr.Size)
stride := int(vb.Stride)
base := memory.Pointer(arr.Pointer) // Always start from the 0'th vertex to simplify logic.
rng := base.Range(uint64(last*stride + size))
interval.Merge(&rngs, rng.Span(), true)
}
}
}
if len(rngs) == 0 {
// Draw call does not use client-side buffers. Just draw.
mutateAndWrite(ctx, i, a, s, d, out)
return
}
// Create an array-buffer for each chunk of overlapping client-side buffers in
// use. These are populated with data below.
ids := make([]BufferId, len(rngs))
for i := range rngs {
id := BufferId(newUnusedID(func(x uint32) bool { _, ok := c.Instances.Buffers[BufferId(x)]; return ok }))
c.Instances.Buffers[id] = &Buffer{} // Not used aside from reserving the ID.
ids[i] = id
}
tmp := atom.Must(atom.AllocData(ctx, s, d, ids))
out.Write(ctx, atom.NoID, NewGlGenBuffers(GLsizei(len(ids)), tmp.Ptr()).AddRead(tmp.Data()))
// Apply the memory observations that were made by the draw call now.
// We need to do this as the glBufferData calls below will require the data.
out.Write(ctx, atom.NoID, replay.Custom(func(ctx log.Context, s *gfxapi.State, d database.Database, b *builder.Builder) error {
a.Extras().Observations().ApplyReads(s.Memory[memory.ApplicationPool])
return nil
}))
// Note: be careful of overwriting the observations made above, before the
// calls to glBufferData below.
// Fill the array-buffers with the observed memory data.
for i, rng := range rngs {
base := memory.Pointer{Address: rng.First, Pool: memory.ApplicationPool}
size := GLsizeiptr(rng.Count)
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, ids[i]))
out.Write(ctx, atom.NoID, NewGlBufferData(GLenum_GL_ARRAY_BUFFER, size, base, GLenum_GL_STATIC_DRAW))
}
// Redirect all the vertex attrib arrays to point to the array-buffer data.
for l, arr := range va.VertexAttributeArrays {
if arr.Enabled == GLboolean_GL_TRUE {
vb := va.VertexBufferBindings[arr.Binding]
if vb.Buffer == 0 && arr.Pointer.Address != 0 {
i := interval.IndexOf(&rngs, arr.Pointer.Address)
offset := arr.Pointer.Address - rngs[i].First
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, ids[i]))
out.Write(ctx, atom.NoID, &GlVertexAttribPointer{
Location: l,
Size: GLint(arr.Size),
Type: arr.Type,
Normalized: arr.Normalized,
Stride: arr.Stride,
Data: NewVertexPointer(offset),
})
}
}
}
// Restore original state.
return func() {
out.Write(ctx, atom.NoID, NewGlBindBuffer(GLenum_GL_ARRAY_BUFFER, c.BoundBuffers.ArrayBuffer))
for _, id := range ids {
delete(c.Instances.Buffers, id)
tmp := atom.Must(atom.AllocData(ctx, s, d, id))
out.Write(ctx, atom.NoID, NewGlDeleteBuffers(1, tmp.Ptr()).AddRead(tmp.Data()))
}
}
}