| // Copyright (C) 2016 The Android Open Source Project |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| // Package builder implements builders for resources from requests |
| // typically stored in the database, optionally depending on replay outputs. |
| package builder |
| |
| import ( |
| "bytes" |
| "io" |
| "sync" |
| |
| "android.googlesource.com/platform/tools/gpu/client/gapii" |
| "android.googlesource.com/platform/tools/gpu/framework/binary" |
| "android.googlesource.com/platform/tools/gpu/framework/binary/cyclic" |
| "android.googlesource.com/platform/tools/gpu/framework/binary/vle" |
| "android.googlesource.com/platform/tools/gpu/framework/id" |
| "android.googlesource.com/platform/tools/gpu/framework/log" |
| "android.googlesource.com/platform/tools/gpu/gapid/atom" |
| "android.googlesource.com/platform/tools/gpu/gapid/database" |
| "android.googlesource.com/platform/tools/gpu/gapid/gfxapi" |
| "android.googlesource.com/platform/tools/gpu/gapid/replay" |
| "android.googlesource.com/platform/tools/gpu/gapid/service" |
| "android.googlesource.com/platform/tools/gpu/gapid/service/path" |
| ) |
| |
| // The list of captures currently imported. |
| // TODO: This needs to be moved to persistent storage. |
| var capturesLock sync.RWMutex |
| var captures = []*path.Capture{} |
| |
| // Proxy is the type that should be passed to the database constructor's |
| // buildProxy parameter. |
| type Proxy struct { |
| ReplayManager *replay.Manager |
| } |
| |
| func encode(v binary.Object) ([]byte, error) { |
| buf := &bytes.Buffer{} |
| e := cyclic.Encoder(vle.Writer(buf)) |
| if e.Object(v); e.Error() != nil { |
| return nil, e.Error() |
| } |
| return buf.Bytes(), nil |
| } |
| |
| func decode(data []byte) (binary.Object, error) { |
| d := cyclic.Decoder(vle.Reader(bytes.NewBuffer(data))) |
| o := d.Object() |
| return o, d.Error() |
| } |
| |
| // extractResources returns a new atom list with all the resources extracted |
| // and placed into the database. |
| func extractResources(ctx log.Context, a *atom.List, d database.Database) (*atom.List, error) { |
| out := atom.NewList(make([]atom.Atom, 0, len(a.Atoms))...) |
| idmap := map[id.ID]id.ID{} |
| for _, a := range a.Atoms { |
| switch a := a.(type) { |
| case *atom.Resource: |
| id, err := database.Store(ctx, a.Data, d) |
| if err != nil { |
| return out, err |
| } |
| idmap[a.ID] = id |
| |
| default: |
| // Replace resource IDs from identifiers generated at capture time to |
| // direct database identifiers. This avoids a database link indirection. |
| if observations := a.Extras().Observations(); observations != nil { |
| for i, r := range observations.Reads { |
| if id, found := idmap[r.ID]; found { |
| observations.Reads[i].ID = id |
| } |
| } |
| for i, w := range observations.Writes { |
| if id, found := idmap[w.ID]; found { |
| observations.Writes[i].ID = id |
| } |
| } |
| } |
| out.Atoms = append(out.Atoms, a) |
| } |
| } |
| return out, nil |
| } |
| |
| // AtomsImportHandler is the interface optionally implements by APIs that want |
| // to process the atom stream on import. |
| type AtomsImportHandler interface { |
| TransformAtomStream(log.Context, []atom.Atom, database.Database) ([]atom.Atom, error) |
| } |
| |
| // ImportCapture builds a new capture containing a, stores it into d and |
| // returns the new capture path. |
| func ImportCapture(ctx log.Context, name string, a *atom.List, d database.Database) (*path.Capture, error) { |
| a, err := extractResources(ctx, a, d) |
| if err != nil { |
| return nil, err |
| } |
| |
| streamID, err := database.Store(ctx, a, d) |
| if err != nil { |
| return nil, err |
| } |
| |
| // Gather all the APIs used by the capture |
| apis := map[gfxapi.ID]gfxapi.API{} |
| apiIDs := []service.ApiID{} |
| for _, a := range a.Atoms { |
| if api := a.API(); api != nil { |
| id := api.ID() |
| if _, found := apis[id]; !found { |
| apis[id] = api |
| apiIDs = append(apiIDs, service.ApiID(id)) |
| } |
| } |
| } |
| |
| for _, api := range apis { |
| if aih, ok := api.(AtomsImportHandler); ok { |
| a.Atoms, err = aih.TransformAtomStream(ctx, a.Atoms, d) |
| if err != nil { |
| return nil, err |
| } |
| } |
| } |
| |
| capture := &service.Capture{ |
| Apis: apiIDs, |
| Name: name, |
| Atoms: service.AtomsID(streamID), |
| } |
| |
| captureID, err := database.Store(ctx, capture, d) |
| if err != nil { |
| return nil, err |
| } |
| |
| p := &path.Capture{ID: captureID} |
| capturesLock.Lock() |
| captures = append(captures, p) |
| capturesLock.Unlock() |
| |
| return p, nil |
| } |
| |
| // ReadAndImportCapture reads capture data from an io.Reader, imports into the |
| // given database and returns the new capture identifier. |
| func ReadAndImportCapture(ctx log.Context, name string, in io.Reader, d database.Database) (*path.Capture, error) { |
| list, err := gapii.ReadCapture(ctx, in) |
| if err != nil { |
| return nil, err |
| } |
| if len(list.Atoms) == 0 { |
| return nil, nil |
| } |
| return ImportCapture(ctx, name, list, d) |
| } |
| |
| // ExportCapture encodes the given capture and associated resources |
| // and writes it to the supplied io.Writer in the .gfxtrace format, |
| // producing output suitable for use with ReadAndImportCapture or |
| // opening in the trace editor. |
| func ExportCapture(ctx log.Context, capture *path.Capture, d database.Database, w io.Writer) error { |
| atoms, err := ResolveAtoms(ctx, capture.Atoms(), d) |
| if err != nil { |
| return err |
| } |
| encoder := cyclic.Encoder(vle.Writer(w)) |
| encoder.String(gapii.CaptureTag) |
| |
| // IDs seen, so we can avoid encoding the same resource data multiple times. |
| seen := map[id.ID]bool{} |
| |
| encodeObservation := func(o atom.Observation) error { |
| if seen[o.ID] { |
| return nil |
| } |
| data, err := database.Resolve(ctx, o.ID, d) |
| if err != nil { |
| return err |
| } |
| encoder.Variant(&atom.Resource{ID: o.ID, Data: data.([]uint8)}) |
| seen[o.ID] = true |
| return encoder.Error() |
| } |
| |
| for _, a := range atoms { |
| if observations := a.Extras().Observations(); observations != nil { |
| for _, r := range observations.Reads { |
| if err := encodeObservation(r); err != nil { |
| return err |
| } |
| } |
| for _, w := range observations.Writes { |
| if err := encodeObservation(w); err != nil { |
| return err |
| } |
| } |
| } |
| encoder.Variant(a) |
| if encoder.Error() != nil { |
| return encoder.Error() |
| } |
| } |
| return encoder.Error() |
| } |
| |
| // Captures returns all the captures stored by the database by identifier. |
| func Captures(ctx log.Context, db database.Database) ([]*path.Capture, error) { |
| capturesLock.RLock() |
| defer capturesLock.RUnlock() |
| return captures, nil |
| } |
| |
| func uniformScale(width, height, maxWidth, maxHeight uint32) (w, h uint32) { |
| w, h = width, height |
| scaleX, scaleY := float32(w)/float32(maxWidth), float32(h)/float32(maxHeight) |
| if scaleX > 1.0 || scaleY > 1.0 { |
| if scaleX > scaleY { |
| w, h = uint32(float32(w)/scaleX), uint32(float32(h)/scaleX) |
| } else { |
| w, h = uint32(float32(w)/scaleY), uint32(float32(h)/scaleY) |
| } |
| } |
| return w, h |
| } |