blob: d4551ed34130eeeb663843693f95b010674dffba [file] [log] [blame]
// Copyright 2017 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! This module implements the virtio wayland used by the guest to access the host's wayland server.
//!
//! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
//! sending commands to the guest that are generated by the host, usually messages from the wayland
//! server. The `out` queue is for commands from the guest, usually requests to allocate shared
//! memory, open a wayland server connection, or send data over an existing connection.
//!
//! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
//! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
//! or a unix domain socket to the wayland server. In the shared memory case, there is also an
//! associated slot that indicates which hypervisor memory slot the memory is installed into, as
//! well as a page frame number that the guest can access the memory from.
//!
//! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
//! wire." They are decoded and executed in the `execute` function and encoded as some variant of
//! `WlResp` for responses.
//!
//! There is one `WlState` instance that contains every known vfd and the current state of `in`
//! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
//! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
//! requests, such as those registering guest memory.
//!
//! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
//! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
//! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
use std::cell::RefCell;
use std::collections::btree_map::Entry;
use std::collections::BTreeMap as Map;
use std::collections::BTreeSet as Set;
use std::collections::VecDeque;
use std::convert::From;
use std::error::Error as StdError;
use std::fmt;
use std::fs::File;
use std::io;
use std::io::IoSliceMut;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::mem::size_of;
#[cfg(feature = "minigbm")]
use std::os::raw::c_uint;
#[cfg(feature = "minigbm")]
use std::os::raw::c_ulonglong;
use std::os::unix::net::UnixStream;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::result;
use std::time::Duration;
use anyhow::anyhow;
use anyhow::Context;
use base::error;
#[cfg(feature = "minigbm")]
use base::ioctl_iow_nr;
use base::ioctl_iowr_nr;
use base::ioctl_with_ref;
use base::pagesize;
use base::pipe;
use base::round_up_to_page_size;
use base::warn;
use base::AsRawDescriptor;
use base::Error;
use base::Event;
use base::EventToken;
use base::EventType;
use base::FileFlags;
use base::FromRawDescriptor;
#[cfg(feature = "gpu")]
use base::IntoRawDescriptor;
use base::Protection;
use base::RawDescriptor;
use base::Result;
use base::SafeDescriptor;
use base::ScmSocket;
use base::SharedMemory;
use base::SharedMemoryUnix;
use base::Tube;
use base::TubeError;
use base::WaitContext;
use base::WorkerThread;
use data_model::*;
#[cfg(feature = "minigbm")]
use libc::EBADF;
#[cfg(feature = "minigbm")]
use libc::EINVAL;
use remain::sorted;
use resources::address_allocator::AddressAllocator;
use resources::AddressRange;
use resources::Alloc;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::DrmFormat;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::ImageAllocationInfo;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::ImageMemoryRequirements;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaDescriptor;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaError;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaGralloc;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaGrallocFlags;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaIntoRawDescriptor;
use thiserror::Error as ThisError;
use vm_control::VmMemorySource;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use vm_memory::GuestMemoryError;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
#[cfg(feature = "gpu")]
use super::resource_bridge::get_resource_info;
#[cfg(feature = "gpu")]
use super::resource_bridge::BufferInfo;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceBridgeError;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceInfo;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceRequest;
use super::DeviceType;
use super::Interrupt;
use super::Queue;
use super::Reader;
use super::SharedMemoryMapper;
use super::SharedMemoryRegion;
use super::SignalableInterrupt;
use super::VirtioDevice;
use super::Writer;
use crate::virtio::device_constants::wl::QUEUE_SIZES;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
use crate::virtio::virtio_device::Error as VirtioError;
use crate::virtio::VirtioDeviceSaved;
use crate::Suspendable;
const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
#[cfg(feature = "gpu")]
const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
const VIRTIO_WL_RESP_OK: u32 = 4096;
const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
const VIRTIO_WL_RESP_ERR: u32 = 4352;
const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
const VIRTIO_WL_VFD_READ: u32 = 0x2;
const VIRTIO_WL_VFD_MAP: u32 = 0x2;
const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
const NEXT_VFD_ID_BASE: u32 = 0x40000000;
const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
// Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
// number of allocs.
const IN_BUFFER_LEN: usize =
0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
#[cfg(feature = "minigbm")]
const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
#[cfg(feature = "minigbm")]
const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
#[cfg(feature = "minigbm")]
#[repr(C)]
#[derive(Copy, Clone)]
struct dma_buf_sync {
flags: c_ulonglong,
}
#[cfg(feature = "minigbm")]
ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
#[repr(C)]
#[derive(Copy, Clone, Default)]
struct sync_file_info {
name: [u8; 32],
status: i32,
flags: u32,
num_fences: u32,
pad: u32,
sync_fence_info: u64,
}
ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
fn is_fence(f: &File) -> bool {
let info = sync_file_info::default();
// Safe as f is a valid file
unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO(), &info) == 0 }
}
#[cfg(feature = "minigbm")]
#[derive(Debug, Default)]
struct GpuMemoryPlaneDesc {
stride: u32,
offset: u32,
}
#[cfg(feature = "minigbm")]
#[derive(Debug, Default)]
struct GpuMemoryDesc {
planes: [GpuMemoryPlaneDesc; 3],
}
const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
const VIRTIO_WL_PFN_SHIFT: u32 = 12;
fn encode_vfd_new(
writer: &mut Writer,
resp: bool,
vfd_id: u32,
flags: u32,
pfn: u64,
size: u32,
) -> WlResult<()> {
let ctrl_vfd_new = CtrlVfdNew {
hdr: CtrlHeader {
type_: Le32::from(if resp {
VIRTIO_WL_RESP_VFD_NEW
} else {
VIRTIO_WL_CMD_VFD_NEW
}),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
flags: Le32::from(flags),
pfn: Le64::from(pfn),
size: Le32::from(size),
padding: Default::default(),
};
writer
.write_obj(ctrl_vfd_new)
.map_err(WlError::WriteResponse)
}
#[cfg(feature = "minigbm")]
fn encode_vfd_new_dmabuf(
writer: &mut Writer,
vfd_id: u32,
flags: u32,
pfn: u64,
size: u32,
desc: GpuMemoryDesc,
) -> WlResult<()> {
let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
flags: Le32::from(flags),
pfn: Le64::from(pfn),
size: Le32::from(size),
width: Le32::from(0),
height: Le32::from(0),
format: Le32::from(0),
stride0: Le32::from(desc.planes[0].stride),
stride1: Le32::from(desc.planes[1].stride),
stride2: Le32::from(desc.planes[2].stride),
offset0: Le32::from(desc.planes[0].offset),
offset1: Le32::from(desc.planes[1].offset),
offset2: Le32::from(desc.planes[2].offset),
};
writer
.write_obj(ctrl_vfd_new_dmabuf)
.map_err(WlError::WriteResponse)
}
fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
let ctrl_vfd_recv = CtrlVfdRecv {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
vfd_count: Le32::from(vfd_ids.len() as u32),
};
writer
.write_obj(ctrl_vfd_recv)
.map_err(WlError::WriteResponse)?;
for &recv_vfd_id in vfd_ids.iter() {
writer
.write_obj(Le32::from(recv_vfd_id))
.map_err(WlError::WriteResponse)?;
}
writer.write_all(data).map_err(WlError::WriteResponse)
}
fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
let ctrl_vfd_new = CtrlVfd {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
};
writer
.write_obj(ctrl_vfd_new)
.map_err(WlError::WriteResponse)
}
fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
match resp {
WlResp::VfdNew {
id,
flags,
pfn,
size,
resp,
} => encode_vfd_new(writer, resp, id, flags, pfn, size),
#[cfg(feature = "minigbm")]
WlResp::VfdNewDmabuf {
id,
flags,
pfn,
size,
desc,
} => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
r => writer
.write_obj(Le32::from(r.get_code()))
.map_err(WlError::WriteResponse),
}
}
#[allow(dead_code)]
#[sorted]
#[derive(ThisError, Debug)]
enum WlError {
#[error("overflow in calculation")]
CheckedOffset,
#[error("failed to synchronize DMABuf access: {0}")]
DmabufSync(io::Error),
#[error("failed to create shared memory from descriptor: {0}")]
FromSharedMemory(Error),
#[error("gralloc error: {0}")]
#[cfg(feature = "minigbm")]
GrallocError(#[from] RutabagaError),
#[error("access violation in guest memory: {0}")]
GuestMemory(#[from] GuestMemoryError),
#[error("invalid string: {0}")]
InvalidString(std::str::Utf8Error),
#[error("failed to create shared memory allocation: {0}")]
NewAlloc(Error),
#[error("failed to create pipe: {0}")]
NewPipe(Error),
#[error("error parsing descriptor: {0}")]
ParseDesc(io::Error),
#[error("failed to read a pipe: {0}")]
ReadPipe(io::Error),
#[error("failed to recv on a socket: {0}")]
RecvVfd(Error),
#[error("failed to send on a socket: {0}")]
SendVfd(Error),
#[error("shmem mapper failure: {0}")]
ShmemMapperError(anyhow::Error),
#[error("failed to connect socket: {0}")]
SocketConnect(io::Error),
#[error("failed to set socket as non-blocking: {0}")]
SocketNonBlock(io::Error),
#[error("unknown socket name: {0}")]
UnknownSocketName(String),
#[error("invalid response from parent VM")]
VmBadResponse,
#[error("failed to control parent VM: {0}")]
VmControl(TubeError),
#[error("access violating in guest volatile memory: {0}")]
VolatileMemory(#[from] VolatileMemoryError),
#[error("failed to listen to descriptor on wait context: {0}")]
WaitContextAdd(Error),
#[error("failed to write to a pipe: {0}")]
WritePipe(io::Error),
#[error("failed to write response: {0}")]
WriteResponse(io::Error),
}
type WlResult<T> = result::Result<T, WlError>;
pub const WL_SHMEM_ID: u8 = 0;
pub const WL_SHMEM_SIZE: u64 = 1 << 32;
struct VmRequesterState {
mapper: Box<dyn SharedMemoryMapper>,
#[cfg(feature = "minigbm")]
gralloc: RutabagaGralloc,
// Allocator for shm address space
address_allocator: AddressAllocator,
// Map of existing mappings in the shm address space
allocs: Map<u64 /* offset */, Alloc>,
// The id for the next shmem allocation
next_alloc: usize,
}
#[derive(Clone)]
struct VmRequester {
state: Rc<RefCell<VmRequesterState>>,
}
// The following are wrappers to avoid base dependencies in the rutabaga crate
#[cfg(feature = "minigbm")]
fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
// Safe because we own the SafeDescriptor at this point.
unsafe { SafeDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
}
impl VmRequester {
fn new(
mapper: Box<dyn SharedMemoryMapper>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
) -> VmRequester {
VmRequester {
state: Rc::new(RefCell::new(VmRequesterState {
mapper,
#[cfg(feature = "minigbm")]
gralloc,
address_allocator: AddressAllocator::new(
AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
Some(pagesize() as u64),
None,
)
.expect("failed to create allocator"),
allocs: Map::new(),
next_alloc: 0,
})),
}
}
fn unregister_memory(&self, offset: u64) -> WlResult<()> {
let mut state = self.state.borrow_mut();
state
.mapper
.remove_mapping(offset)
.map_err(WlError::ShmemMapperError)?;
let alloc = state
.allocs
.remove(&offset)
.context("unknown offset")
.map_err(WlError::ShmemMapperError)?;
state
.address_allocator
.release(alloc)
.expect("corrupt address space");
Ok(())
}
#[cfg(feature = "minigbm")]
fn allocate_and_register_gpu_memory(
&self,
width: u32,
height: u32,
format: u32,
) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
let mut state = self.state.borrow_mut();
let img = ImageAllocationInfo {
width,
height,
drm_format: DrmFormat::from(format),
// Linear layout is a requirement as virtio wayland guest expects
// this for CPU access to the buffer. Scanout and texturing are
// optional as the consumer (wayland compositor) is expected to
// fall-back to a less efficient mechanisms for presentation if
// neccesary. In practice, linear buffers for commonly used formats
// will also support scanout and texturing.
flags: RutabagaGrallocFlags::empty().use_linear(true),
};
let reqs = state
.gralloc
.get_image_memory_requirements(img)
.map_err(WlError::GrallocError)?;
let handle = state
.gralloc
.allocate_memory(reqs)
.map_err(WlError::GrallocError)?;
drop(state);
let safe_descriptor = to_safe_descriptor(handle.os_handle);
self.register_memory(
safe_descriptor
.try_clone()
.context("failed to dup gfx handle")
.map_err(WlError::ShmemMapperError)?,
reqs.size,
)
.map(|info| (info, safe_descriptor, reqs))
}
fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
self.register_memory(
SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
.context("failed to create safe descriptor")
.map_err(WlError::ShmemMapperError)?,
shm.size(),
)
}
fn register_memory(&self, descriptor: SafeDescriptor, size: u64) -> WlResult<u64> {
let mut state = self.state.borrow_mut();
let size = round_up_to_page_size(size as usize) as u64;
let source = VmMemorySource::Descriptor {
descriptor,
offset: 0,
size,
};
let alloc = Alloc::Anon(state.next_alloc);
state.next_alloc += 1;
let offset = state
.address_allocator
.allocate(size, alloc, "virtio-wl".to_owned())
.context("failed to allocate offset")
.map_err(WlError::ShmemMapperError)?;
match state
.mapper
.add_mapping(source, offset, Protection::read_write())
{
Ok(()) => {
state.allocs.insert(offset, alloc);
Ok(offset)
}
Err(e) => {
// We just allocated it ourselves, it must exist.
state
.address_allocator
.release(alloc)
.expect("corrupt address space");
Err(WlError::ShmemMapperError(e))
}
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
struct CtrlHeader {
type_: Le32,
flags: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, FromBytes, AsBytes)]
struct CtrlVfdNew {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
pfn: Le64,
size: Le32,
padding: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, FromBytes)]
struct CtrlVfdNewCtxNamed {
hdr: CtrlHeader,
id: Le32,
flags: Le32, // Ignored.
pfn: Le64, // Ignored.
size: Le32, // Ignored.
name: [u8; 32],
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
#[cfg(feature = "minigbm")]
struct CtrlVfdNewDmabuf {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
pfn: Le64,
size: Le32,
width: Le32,
height: Le32,
format: Le32,
stride0: Le32,
stride1: Le32,
stride2: Le32,
offset0: Le32,
offset1: Le32,
offset2: Le32,
}
#[cfg(feature = "minigbm")]
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
#[cfg(feature = "minigbm")]
struct CtrlVfdDmabufSync {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, AsBytes, FromBytes)]
struct CtrlVfdRecv {
hdr: CtrlHeader,
id: Le32,
vfd_count: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
struct CtrlVfd {
hdr: CtrlHeader,
id: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
struct CtrlVfdSend {
hdr: CtrlHeader,
id: Le32,
vfd_count: Le32,
// Remainder is an array of vfd_count IDs followed by data.
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromBytes)]
struct CtrlVfdSendVfd {
kind: Le32,
id: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, FromBytes)]
union CtrlVfdSendVfdV2Payload {
id: Le32,
seqno: Le64,
}
#[repr(C)]
#[derive(Copy, Clone, FromBytes)]
struct CtrlVfdSendVfdV2 {
kind: Le32,
payload: CtrlVfdSendVfdV2Payload,
}
impl CtrlVfdSendVfdV2 {
fn id(&self) -> Le32 {
assert!(
self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
|| self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
);
unsafe { self.payload.id }
}
#[cfg(feature = "gpu")]
fn seqno(&self) -> Le64 {
assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
unsafe { self.payload.seqno }
}
}
#[derive(Debug)]
#[allow(dead_code)]
enum WlResp<'a> {
Ok,
VfdNew {
id: u32,
flags: u32,
pfn: u64,
size: u32,
// The VfdNew variant can be either a response or a command depending on this `resp`. This
// is important for the `get_code` method.
resp: bool,
},
#[cfg(feature = "minigbm")]
VfdNewDmabuf {
id: u32,
flags: u32,
pfn: u64,
size: u32,
desc: GpuMemoryDesc,
},
VfdRecv {
id: u32,
data: &'a [u8],
vfds: &'a [u32],
},
VfdHup {
id: u32,
},
Err(Box<dyn StdError>),
OutOfMemory,
InvalidId,
InvalidType,
InvalidFlags,
InvalidCommand,
}
impl<'a> WlResp<'a> {
fn get_code(&self) -> u32 {
match *self {
WlResp::Ok => VIRTIO_WL_RESP_OK,
WlResp::VfdNew { resp, .. } => {
if resp {
VIRTIO_WL_RESP_VFD_NEW
} else {
VIRTIO_WL_CMD_VFD_NEW
}
}
#[cfg(feature = "minigbm")]
WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
}
}
}
#[derive(Default)]
struct WlVfd {
socket: Option<UnixStream>,
guest_shared_memory: Option<SharedMemory>,
remote_pipe: Option<File>,
local_pipe: Option<(u32 /* flags */, File)>,
slot: Option<(u64 /* offset */, VmRequester)>,
#[cfg(feature = "minigbm")]
is_dmabuf: bool,
fence: Option<File>,
is_fence: bool,
}
impl fmt::Debug for WlVfd {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "WlVfd {{")?;
if let Some(s) = &self.socket {
write!(f, " socket: {}", s.as_raw_descriptor())?;
}
if let Some((offset, _)) = &self.slot {
write!(f, " offset: {}", offset)?;
}
if let Some(s) = &self.remote_pipe {
write!(f, " remote: {}", s.as_raw_descriptor())?;
}
if let Some((_, s)) = &self.local_pipe {
write!(f, " local: {}", s.as_raw_descriptor())?;
}
write!(f, " }}")
}
}
impl WlVfd {
fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
let mut vfd = WlVfd::default();
vfd.socket = Some(socket);
Ok(vfd)
}
fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
let size_page_aligned = round_up_to_page_size(size as usize) as u64;
let vfd_shm =
SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
let offset = vm.register_shmem(&vfd_shm)?;
let mut vfd = WlVfd::default();
vfd.guest_shared_memory = Some(vfd_shm);
vfd.slot = Some((offset, vm));
Ok(vfd)
}
#[cfg(feature = "minigbm")]
fn dmabuf(
vm: VmRequester,
width: u32,
height: u32,
format: u32,
) -> WlResult<(WlVfd, GpuMemoryDesc)> {
let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
let mut vfd = WlVfd::default();
let vfd_shm =
SharedMemory::from_safe_descriptor(desc, Some(reqs.size)).map_err(WlError::NewAlloc)?;
let mut desc = GpuMemoryDesc::default();
for i in 0..3 {
desc.planes[i] = GpuMemoryPlaneDesc {
stride: reqs.strides[i],
offset: reqs.offsets[i],
}
}
vfd.guest_shared_memory = Some(vfd_shm);
vfd.slot = Some((offset, vm));
vfd.is_dmabuf = true;
Ok((vfd, desc))
}
#[cfg(feature = "minigbm")]
fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
if !self.is_dmabuf {
return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
}
match &self.guest_shared_memory {
Some(descriptor) => {
let sync = dma_buf_sync {
flags: flags as u64,
};
// Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
Err(WlError::DmabufSync(io::Error::last_os_error()))
} else {
Ok(())
}
}
None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
}
}
fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
let mut vfd = WlVfd::default();
vfd.remote_pipe = Some(read_pipe);
vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
Ok(vfd)
}
fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
let mut vfd = WlVfd::default();
vfd.remote_pipe = Some(write_pipe);
vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
Ok(vfd)
}
fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
// We need to determine if the given file is more like shared memory or a pipe/socket. A
// quick and easy check is to seek to the end of the file. If it works we assume it's not a
// pipe/socket because those have no end. We can even use that seek location as an indicator
// for how big the shared memory chunk to map into guest memory is. If seeking to the end
// fails, we assume it's a socket or pipe with read/write semantics.
if descriptor.seek(SeekFrom::End(0)).is_ok() {
let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
let offset = vm.register_shmem(&shm)?;
let mut vfd = WlVfd::default();
vfd.guest_shared_memory = Some(shm);
vfd.slot = Some((offset, vm));
Ok(vfd)
} else if is_fence(&descriptor) {
let mut vfd = WlVfd::default();
vfd.is_fence = true;
vfd.fence = Some(descriptor);
Ok(vfd)
} else {
let flags = match FileFlags::from_file(&descriptor) {
Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
_ => 0,
};
let mut vfd = WlVfd::default();
vfd.local_pipe = Some((flags, descriptor));
Ok(vfd)
}
}
fn flags(&self, use_transition_flags: bool) -> u32 {
let mut flags = 0;
if use_transition_flags {
if self.socket.is_some() {
flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
}
if let Some((f, _)) = self.local_pipe {
flags |= f;
}
if self.is_fence {
flags |= VIRTIO_WL_VFD_FENCE;
}
} else {
if self.socket.is_some() {
flags |= VIRTIO_WL_VFD_CONTROL;
}
if self.slot.is_some() {
flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
}
}
flags
}
// Offset within the shared memory region this VFD was mapped at.
fn offset(&self) -> Option<u64> {
self.slot.as_ref().map(|s| s.0)
}
// Size in bytes of the shared memory VFD.
fn size(&self) -> Option<u64> {
self.guest_shared_memory.as_ref().map(|shm| shm.size())
}
// The descriptor that gets sent if this VFD is sent over a socket.
fn send_descriptor(&self) -> Option<RawDescriptor> {
self.guest_shared_memory
.as_ref()
.map(|shm| shm.as_raw_descriptor())
.or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
.or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
.or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
}
// The FD that is used for polling for events on this VFD.
fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
self.socket
.as_ref()
.map(|s| s as &dyn AsRawDescriptor)
.or_else(|| {
self.local_pipe
.as_ref()
.map(|(_, p)| p as &dyn AsRawDescriptor)
})
.or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
}
// Sends data/files from the guest to the host over this VFD.
fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
if let Some(socket) = &self.socket {
socket
.send_with_fds(&data.get_remaining(), rds)
.map_err(WlError::SendVfd)?;
// All remaining data in `data` is now considered consumed.
data.consume(::std::usize::MAX);
Ok(WlResp::Ok)
} else if let Some((_, local_pipe)) = &mut self.local_pipe {
// Impossible to send descriptors over a simple pipe.
if !rds.is_empty() {
return Ok(WlResp::InvalidType);
}
data.read_to(local_pipe, usize::max_value())
.map_err(WlError::WritePipe)?;
Ok(WlResp::Ok)
} else {
Ok(WlResp::InvalidType)
}
}
// Receives data/files from the host for this VFD and queues it for the guest.
fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
if let Some(socket) = self.socket.take() {
let mut buf = vec![0; IN_BUFFER_LEN];
let mut fd_buf = [0; VIRTWL_SEND_MAX_ALLOCS];
// If any errors happen, the socket will get dropped, preventing more reading.
let (len, file_count) = socket
.recv_with_fds(IoSliceMut::new(&mut buf), &mut fd_buf)
.map_err(WlError::RecvVfd)?;
// If any data gets read, the put the socket back for future recv operations.
if len != 0 || file_count != 0 {
buf.truncate(len);
buf.shrink_to_fit();
self.socket = Some(socket);
// Safe because the first file_counts fds from recv_with_fds are owned by us and
// valid.
in_file_queue.extend(
fd_buf[..file_count]
.iter()
.map(|&descriptor| unsafe { File::from_raw_descriptor(descriptor) }),
);
return Ok(buf);
}
Ok(Vec::new())
} else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
let mut buf = Vec::new();
buf.resize(IN_BUFFER_LEN, 0);
let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
if len != 0 {
buf.truncate(len);
buf.shrink_to_fit();
self.local_pipe = Some((flags, local_pipe));
return Ok(buf);
}
Ok(Vec::new())
} else {
Ok(Vec::new())
}
}
// Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
// up events.
fn close_remote(&mut self) {
self.remote_pipe = None;
}
fn close(&mut self) -> WlResult<()> {
if let Some((offset, vm)) = self.slot.take() {
vm.unregister_memory(offset)?;
}
self.socket = None;
self.remote_pipe = None;
self.local_pipe = None;
Ok(())
}
}
impl Drop for WlVfd {
fn drop(&mut self) {
let _ = self.close();
}
}
#[derive(Debug)]
enum WlRecv {
Vfd { id: u32 },
Data { buf: Vec<u8> },
Hup,
}
pub struct WlState {
wayland_paths: Map<String, PathBuf>,
vm: VmRequester,
resource_bridge: Option<Tube>,
use_transition_flags: bool,
wait_ctx: WaitContext<u32>,
vfds: Map<u32, WlVfd>,
next_vfd_id: u32,
in_file_queue: Vec<File>,
in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
current_recv_vfd: Option<u32>,
recv_vfds: Vec<u32>,
#[cfg(feature = "gpu")]
signaled_fence: Option<SafeDescriptor>,
use_send_vfd_v2: bool,
address_offset: Option<u64>,
}
impl WlState {
/// Create a new `WlState` instance for running a virtio-wl device.
pub fn new(
wayland_paths: Map<String, PathBuf>,
mapper: Box<dyn SharedMemoryMapper>,
use_transition_flags: bool,
use_send_vfd_v2: bool,
resource_bridge: Option<Tube>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
address_offset: Option<u64>,
) -> WlState {
WlState {
wayland_paths,
vm: VmRequester::new(
mapper,
#[cfg(feature = "minigbm")]
gralloc,
),
resource_bridge,
wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
use_transition_flags,
vfds: Map::new(),
next_vfd_id: NEXT_VFD_ID_BASE,
in_file_queue: Vec::new(),
in_queue: VecDeque::new(),
current_recv_vfd: None,
recv_vfds: Vec::new(),
#[cfg(feature = "gpu")]
signaled_fence: None,
use_send_vfd_v2,
address_offset,
}
}
/// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
/// long-term solution is to replace the WaitContext completely by spawning async workers
/// instead.
pub fn wait_ctx(&self) -> &WaitContext<u32> {
&self.wait_ctx
}
fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
return Ok(WlResp::InvalidFlags);
}
if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
return Ok(WlResp::InvalidFlags);
}
match self.vfds.entry(id) {
Entry::Vacant(entry) => {
let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
WlVfd::pipe_remote_read_local_write()?
} else if flags & VIRTIO_WL_VFD_READ != 0 {
WlVfd::pipe_remote_write_local_read()?
} else {
return Ok(WlResp::InvalidFlags);
};
self.wait_ctx
.add(vfd.wait_descriptor().unwrap(), id)
.map_err(WlError::WaitContextAdd)?;
let resp = WlResp::VfdNew {
id,
flags: 0,
pfn: 0,
size: 0,
resp: true,
};
entry.insert(vfd);
Ok(resp)
}
Entry::Occupied(_) => Ok(WlResp::InvalidId),
}
}
fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if self.use_transition_flags {
if flags != 0 {
return Ok(WlResp::InvalidFlags);
}
} else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
return Ok(WlResp::Err(Box::from("invalid flags")));
}
if self.vfds.contains_key(&id) {
return Ok(WlResp::InvalidId);
}
let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
let resp = WlResp::VfdNew {
id,
flags,
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
resp: true,
};
self.vfds.insert(id, vfd);
Ok(resp)
}
#[cfg(feature = "minigbm")]
fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if self.vfds.contains_key(&id) {
return Ok(WlResp::InvalidId);
}
let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
let resp = WlResp::VfdNewDmabuf {
id,
flags: 0,
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
desc,
};
self.vfds.insert(id, vfd);
Ok(resp)
}
#[cfg(feature = "minigbm")]
fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
return Ok(WlResp::InvalidFlags);
}
match self.vfds.get_mut(&vfd_id) {
Some(vfd) => {
vfd.dmabuf_sync(flags)?;
Ok(WlResp::Ok)
}
None => Ok(WlResp::InvalidId),
}
}
fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
let flags = if self.use_transition_flags {
VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
} else {
VIRTIO_WL_VFD_CONTROL
};
match self.vfds.entry(id) {
Entry::Vacant(entry) => {
let vfd = entry.insert(WlVfd::connect(
self.wayland_paths
.get(name)
.ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
)?);
self.wait_ctx
.add(vfd.wait_descriptor().unwrap(), id)
.map_err(WlError::WaitContextAdd)?;
Ok(WlResp::VfdNew {
id,
flags,
pfn: 0,
size: 0,
resp: true,
})
}
Entry::Occupied(_) => Ok(WlResp::InvalidId),
}
}
fn process_wait_context(&mut self) {
let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
Ok(v) => v,
Err(e) => {
error!("failed waiting for vfd evens: {}", e);
return;
}
};
for event in events.iter().filter(|e| e.is_readable) {
if let Err(e) = self.recv(event.token) {
error!("failed to recv from vfd: {}", e)
}
}
for event in events.iter().filter(|e| e.is_hungup) {
if !event.is_readable {
let vfd_id = event.token;
if let Some(descriptor) =
self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
{
if let Err(e) = self.wait_ctx.delete(descriptor) {
warn!("failed to remove hungup vfd from poll context: {}", e);
}
}
self.in_queue.push_back((vfd_id, WlRecv::Hup));
}
}
}
fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
let mut to_delete = Set::new();
for (dest_vfd_id, q) in &self.in_queue {
if *dest_vfd_id == vfd_id {
if let WlRecv::Vfd { id } = q {
to_delete.insert(*id);
}
}
}
for vfd_id in to_delete {
// Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
let _ = self.close(vfd_id);
}
match self.vfds.remove(&vfd_id) {
Some(mut vfd) => {
self.in_queue.retain(|&(id, _)| id != vfd_id);
vfd.close()?;
Ok(WlResp::Ok)
}
None => Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
let sock = self.resource_bridge.as_ref().unwrap();
match get_resource_info(sock, request) {
Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
Ok(ResourceInfo::Fence { handle }) => Some(handle),
Err(ResourceBridgeError::InvalidResource(req)) => {
warn!("attempt to send non-existent gpu resource {}", req);
None
}
Err(e) => {
error!("{}", e);
// If there was an error with the resource bridge, it can no longer be
// trusted to continue to function.
self.resource_bridge = None;
None
}
}
}
fn send(
&mut self,
vfd_id: u32,
vfd_count: usize,
foreign_id: bool,
reader: &mut Reader,
) -> WlResult<WlResp> {
// First stage gathers and normalizes all id information from guest memory.
let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
kind: Le32::from(0),
payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
}; VIRTWL_SEND_MAX_ALLOCS];
for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
*vfd_id = if foreign_id {
if self.use_send_vfd_v2 {
reader.read_obj().map_err(WlError::ParseDesc)?
} else {
let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
CtrlVfdSendVfdV2 {
kind: vfd.kind,
payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
}
}
} else {
CtrlVfdSendVfdV2 {
kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
payload: CtrlVfdSendVfdV2Payload {
id: reader.read_obj().map_err(WlError::ParseDesc)?,
},
}
};
}
// Next stage collects corresponding file descriptors for each id.
let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
#[cfg(feature = "gpu")]
let mut bridged_files = Vec::new();
for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
match send_vfd_id.kind.to_native() {
VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
match self.vfds.get(&send_vfd_id.id().to_native()) {
Some(vfd) => match vfd.send_descriptor() {
Some(vfd_fd) => *descriptor = vfd_fd,
None => return Ok(WlResp::InvalidType),
},
None => {
warn!(
"attempt to send non-existant vfd 0x{:08x}",
send_vfd_id.id().to_native()
);
return Ok(WlResp::InvalidId);
}
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
match self.get_info(ResourceRequest::GetBuffer {
id: send_vfd_id.id().to_native(),
}) {
Some(handle) => {
*descriptor = handle.as_raw_descriptor();
bridged_files.push(handle.into());
}
None => return Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
match self.get_info(ResourceRequest::GetFence {
seqno: send_vfd_id.seqno().to_native(),
}) {
Some(handle) => {
*descriptor = handle.as_raw_descriptor();
bridged_files.push(handle.into());
}
None => return Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
if self.resource_bridge.is_some() =>
{
if self.signaled_fence.is_none() {
// If the guest is sending a signaled fence, we know a fence
// with seqno 0 must already be signaled.
match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
Some(handle) => self.signaled_fence = Some(handle),
None => return Ok(WlResp::InvalidId),
}
}
match self.signaled_fence.as_ref().unwrap().try_clone() {
Ok(dup) => {
*descriptor = dup.into_raw_descriptor();
// Safe because the fd comes from a valid SafeDescriptor.
let file = unsafe { File::from_raw_descriptor(*descriptor) };
bridged_files.push(file);
}
Err(_) => return Ok(WlResp::InvalidId),
}
}
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
| VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
| VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
let _ = self.resource_bridge.as_ref();
warn!("attempt to send foreign resource kind but feature is disabled");
}
kind => {
warn!("attempt to send unknown foreign resource kind: {}", kind);
return Ok(WlResp::InvalidId);
}
}
}
// Final stage sends file descriptors and data to the target vfd's socket.
match self.vfds.get_mut(&vfd_id) {
Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
WlResp::Ok => {}
_ => return Ok(WlResp::InvalidType),
},
None => return Ok(WlResp::InvalidId),
}
// The vfds with remote FDs need to be closed so that the local side can receive
// hangup events.
for &send_vfd_id in &send_vfd_ids[..vfd_count] {
if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
vfd.close_remote();
}
}
}
Ok(WlResp::Ok)
}
fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
let buf = match self.vfds.get_mut(&vfd_id) {
Some(vfd) => {
if vfd.is_fence {
if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
warn!("failed to remove hungup vfd from poll context: {}", e);
}
self.in_queue.push_back((vfd_id, WlRecv::Hup));
return Ok(());
} else {
vfd.recv(&mut self.in_file_queue)?
}
}
None => return Ok(()),
};
if self.in_file_queue.is_empty() && buf.is_empty() {
self.in_queue.push_back((vfd_id, WlRecv::Hup));
return Ok(());
}
for file in self.in_file_queue.drain(..) {
let vfd = WlVfd::from_file(self.vm.clone(), file)?;
if let Some(wait_descriptor) = vfd.wait_descriptor() {
self.wait_ctx
.add(wait_descriptor, self.next_vfd_id)
.map_err(WlError::WaitContextAdd)?;
}
self.vfds.insert(self.next_vfd_id, vfd);
self.in_queue.push_back((
vfd_id,
WlRecv::Vfd {
id: self.next_vfd_id,
},
));
self.next_vfd_id += 1;
}
self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
Ok(())
}
fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
let type_ = {
let mut type_reader = reader.clone();
type_reader.read_obj::<Le32>().map_err(WlError::ParseDesc)?
};
match type_.into() {
VIRTIO_WL_CMD_VFD_NEW => {
let ctrl = reader
.read_obj::<CtrlVfdNew>()
.map_err(WlError::ParseDesc)?;
self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
}
VIRTIO_WL_CMD_VFD_CLOSE => {
let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
self.close(ctrl.id.into())
}
VIRTIO_WL_CMD_VFD_SEND => {
let ctrl = reader
.read_obj::<CtrlVfdSend>()
.map_err(WlError::ParseDesc)?;
let foreign_id = false;
self.send(
ctrl.id.into(),
ctrl.vfd_count.to_native() as usize,
foreign_id,
reader,
)
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
let ctrl = reader
.read_obj::<CtrlVfdSend>()
.map_err(WlError::ParseDesc)?;
let foreign_id = true;
self.send(
ctrl.id.into(),
ctrl.vfd_count.to_native() as usize,
foreign_id,
reader,
)
}
VIRTIO_WL_CMD_VFD_NEW_CTX => {
let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
self.new_context(ctrl.id.into(), "")
}
VIRTIO_WL_CMD_VFD_NEW_PIPE => {
let ctrl = reader
.read_obj::<CtrlVfdNew>()
.map_err(WlError::ParseDesc)?;
self.new_pipe(ctrl.id.into(), ctrl.flags.into())
}
#[cfg(feature = "minigbm")]
VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
let ctrl = reader
.read_obj::<CtrlVfdNewDmabuf>()
.map_err(WlError::ParseDesc)?;
self.new_dmabuf(
ctrl.id.into(),
ctrl.width.into(),
ctrl.height.into(),
ctrl.format.into(),
)
}
#[cfg(feature = "minigbm")]
VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
let ctrl = reader
.read_obj::<CtrlVfdDmabufSync>()
.map_err(WlError::ParseDesc)?;
self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
}
VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
let ctrl = reader
.read_obj::<CtrlVfdNewCtxNamed>()
.map_err(WlError::ParseDesc)?;
let name_len = ctrl
.name
.iter()
.position(|x| x == &0)
.unwrap_or(ctrl.name.len());
let name =
std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
self.new_context(ctrl.id.into(), name)
}
op_type => {
warn!("unexpected command {}", op_type);
Ok(WlResp::InvalidCommand)
}
}
}
fn next_recv(&self) -> Option<WlResp> {
if let Some(q) = self.in_queue.front() {
match *q {
(vfd_id, WlRecv::Vfd { id }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
match self.vfds.get(&id) {
Some(vfd) => Some(WlResp::VfdNew {
id,
flags: vfd.flags(self.use_transition_flags),
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
resp: false,
}),
_ => Some(WlResp::VfdNew {
id,
flags: 0,
pfn: 0,
size: 0,
resp: false,
}),
}
} else {
Some(WlResp::VfdRecv {
id: self.current_recv_vfd.unwrap(),
data: &[],
vfds: &self.recv_vfds[..],
})
}
}
(vfd_id, WlRecv::Data { ref buf }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
Some(WlResp::VfdRecv {
id: vfd_id,
data: &buf[..],
vfds: &self.recv_vfds[..],
})
} else {
Some(WlResp::VfdRecv {
id: self.current_recv_vfd.unwrap(),
data: &[],
vfds: &self.recv_vfds[..],
})
}
}
(vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
}
} else {
None
}
}
fn pop_recv(&mut self) {
if let Some(q) = self.in_queue.front() {
match *q {
(vfd_id, WlRecv::Vfd { id }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
self.recv_vfds.push(id);
self.current_recv_vfd = Some(vfd_id);
} else {
self.recv_vfds.clear();
self.current_recv_vfd = None;
return;
}
}
(vfd_id, WlRecv::Data { .. }) => {
self.recv_vfds.clear();
self.current_recv_vfd = None;
if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
return;
}
}
(_, WlRecv::Hup) => {
self.recv_vfds.clear();
self.current_recv_vfd = None;
}
}
}
self.in_queue.pop_front();
}
fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
let addr = match (offset, self.address_offset) {
(Some(o), Some(address_offset)) => o + address_offset,
(Some(o), None) => *o,
// without shmem, 0 is the special address for "no_pfn"
(None, Some(_)) => 0,
// with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
(None, None) => WL_SHMEM_SIZE,
};
addr >> VIRTIO_WL_PFN_SHIFT
}
}
#[derive(ThisError, Debug, PartialEq, Eq)]
#[error("no descriptors available in queue")]
pub struct DescriptorsExhausted;
/// Handle incoming events and forward them to the VM over the input queue.
pub fn process_in_queue<I: SignalableInterrupt>(
interrupt: &I,
in_queue: &Rc<RefCell<Queue>>,
mem: &GuestMemory,
state: &mut WlState,
) -> ::std::result::Result<(), DescriptorsExhausted> {
state.process_wait_context();
let mut needs_interrupt = false;
let mut exhausted_queue = false;
let mut in_queue = in_queue.borrow_mut();
loop {
let mut desc = if let Some(d) = in_queue.peek(mem) {
d
} else {
exhausted_queue = true;
break;
};
let mut should_pop = false;
if let Some(in_resp) = state.next_recv() {
match encode_resp(&mut desc.writer, in_resp) {
Ok(()) => {
should_pop = true;
}
Err(e) => {
error!("failed to encode response to descriptor chain: {}", e);
}
}
let bytes_written = desc.writer.bytes_written() as u32;
needs_interrupt = true;
in_queue.pop_peeked(mem);
in_queue.add_used(mem, desc, bytes_written);
} else {
break;
}
if should_pop {
state.pop_recv();
}
}
if needs_interrupt {
in_queue.trigger_interrupt(mem, interrupt);
}
if exhausted_queue {
Err(DescriptorsExhausted)
} else {
Ok(())
}
}
/// Handle messages from the output queue and forward them to the display sever, if necessary.
pub fn process_out_queue<I: SignalableInterrupt>(
interrupt: &I,
out_queue: &Rc<RefCell<Queue>>,
mem: &GuestMemory,
state: &mut WlState,
) {
let mut needs_interrupt = false;
let mut out_queue = out_queue.borrow_mut();
while let Some(mut desc) = out_queue.pop(mem) {
let resp = match state.execute(&mut desc.reader) {
Ok(r) => r,
Err(e) => WlResp::Err(Box::new(e)),
};
match encode_resp(&mut desc.writer, resp) {
Ok(()) => {}
Err(e) => {
error!("failed to encode response to descriptor chain: {}", e);
}
}
let len = desc.writer.bytes_written() as u32;
out_queue.add_used(mem, desc, len);
needs_interrupt = true;
}
if needs_interrupt {
out_queue.trigger_interrupt(mem, interrupt);
}
}
struct Worker {
interrupt: Interrupt,
mem: GuestMemory,
in_queue: Rc<RefCell<Queue>>,
in_queue_evt: Event,
out_queue: Rc<RefCell<Queue>>,
out_queue_evt: Event,
state: WlState,
}
impl Worker {
fn new(
mem: GuestMemory,
interrupt: Interrupt,
in_queue: (Queue, Event),
out_queue: (Queue, Event),
wayland_paths: Map<String, PathBuf>,
mapper: Box<dyn SharedMemoryMapper>,
use_transition_flags: bool,
use_send_vfd_v2: bool,
resource_bridge: Option<Tube>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
address_offset: Option<u64>,
) -> Worker {
Worker {
interrupt,
mem,
in_queue: Rc::new(RefCell::new(in_queue.0)),
in_queue_evt: in_queue.1,
out_queue: Rc::new(RefCell::new(out_queue.0)),
out_queue_evt: out_queue.1,
state: WlState::new(
wayland_paths,
mapper,
use_transition_flags,
use_send_vfd_v2,
resource_bridge,
#[cfg(feature = "minigbm")]
gralloc,
address_offset,
),
}
}
fn run(mut self, kill_evt: Event) -> anyhow::Result<VirtioDeviceSaved> {
#[derive(EventToken)]
enum Token {
InQueue,
OutQueue,
Kill,
State,
InterruptResample,
}
let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
(&self.in_queue_evt, Token::InQueue),
(&self.out_queue_evt, Token::OutQueue),
(&kill_evt, Token::Kill),
(&self.state.wait_ctx, Token::State),
])
.context("failed creating WaitContext")?;
if let Some(resample_evt) = self.interrupt.get_resample_evt() {
wait_ctx
.add(resample_evt, Token::InterruptResample)
.context("failed adding resample event to WaitContext.")?;
}
let mut watching_state_ctx = true;
'wait: loop {
let events = match wait_ctx.wait() {
Ok(v) => v,
Err(e) => {
error!("failed waiting for events: {}", e);
break;
}
};
for event in &events {
match event.token {
Token::InQueue => {
let _ = self.in_queue_evt.wait();
if !watching_state_ctx {
if let Err(e) =
wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
{
error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
break;
}
watching_state_ctx = true;
}
}
Token::OutQueue => {
let _ = self.out_queue_evt.wait();
process_out_queue(
&self.interrupt,
&self.out_queue,
&self.mem,
&mut self.state,
);
}
Token::Kill => break 'wait,
Token::State => {
if let Err(DescriptorsExhausted) = process_in_queue(
&self.interrupt,
&self.in_queue,
&self.mem,
&mut self.state,
) {
if let Err(e) =
wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
{
error!(
"Failed to stop watching wait_ctx descriptor for WlState: {}",
e
);
break;
}
watching_state_ctx = false;
}
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
}
}
}
}
let in_queue = match Rc::try_unwrap(self.in_queue) {
Ok(queue_cell) => queue_cell.into_inner(),
Err(_) => panic!("failed to recover queue from worker"),
};
let out_queue = match Rc::try_unwrap(self.out_queue) {
Ok(queue_cell) => queue_cell.into_inner(),
Err(_) => panic!("failed to recover queue from worker"),
};
Ok(VirtioDeviceSaved {
queues: vec![in_queue, out_queue],
})
}
}
pub struct Wl {
worker_thread: Option<WorkerThread<anyhow::Result<VirtioDeviceSaved>>>,
wayland_paths: Map<String, PathBuf>,
mapper: Option<Box<dyn SharedMemoryMapper>>,
resource_bridge: Option<Tube>,
use_transition_flags: bool,
use_send_vfd_v2: bool,
use_shmem: bool,
base_features: u64,
#[cfg(feature = "minigbm")]
gralloc: Option<RutabagaGralloc>,
address_offset: Option<u64>,
}
impl Wl {
pub fn new(
base_features: u64,
wayland_paths: Map<String, PathBuf>,
resource_bridge: Option<Tube>,
) -> Result<Wl> {
Ok(Wl {
worker_thread: None,
wayland_paths,
mapper: None,
resource_bridge,
use_transition_flags: false,
use_send_vfd_v2: false,
use_shmem: false,
base_features,
#[cfg(feature = "minigbm")]
gralloc: None,
address_offset: None,
})
}
}
impl VirtioDevice for Wl {
fn keep_rds(&self) -> Vec<RawDescriptor> {
let mut keep_rds = Vec::new();
if let Some(mapper) = &self.mapper {
if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
keep_rds.push(raw_descriptor);
}
}
if let Some(resource_bridge) = &self.resource_bridge {
keep_rds.push(resource_bridge.as_raw_descriptor());
}
keep_rds
}
#[cfg(feature = "minigbm")]
fn on_device_sandboxed(&mut self) {
// Gralloc initialization can cause some GPU drivers to create their own threads
// and that must be done after sandboxing.
match RutabagaGralloc::new() {
Ok(g) => self.gralloc = Some(g),
Err(e) => {
error!("failed to initialize gralloc {:?}", e);
}
};
}
fn device_type(&self) -> DeviceType {
DeviceType::Wl
}
fn queue_max_sizes(&self) -> &[u16] {
QUEUE_SIZES
}
fn features(&self) -> u64 {
self.base_features
| 1 << VIRTIO_WL_F_TRANS_FLAGS
| 1 << VIRTIO_WL_F_SEND_FENCES
| 1 << VIRTIO_WL_F_USE_SHMEM
}
fn ack_features(&mut self, value: u64) {
if value & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0 {
self.use_transition_flags = true;
}
if value & (1 << VIRTIO_WL_F_SEND_FENCES) != 0 {
self.use_send_vfd_v2 = true;
}
if value & (1 << VIRTIO_WL_F_USE_SHMEM) != 0 {
self.use_shmem = true;
}
}
fn activate(
&mut self,
mem: GuestMemory,
interrupt: Interrupt,
mut queues: Vec<(Queue, Event)>,
) -> anyhow::Result<()> {
if queues.len() != QUEUE_SIZES.len() {
return Err(anyhow!(
"expected {} queues, got {}",
QUEUE_SIZES.len(),
queues.len()
));
}
let mapper = self.mapper.take().context("missing mapper")?;
let wayland_paths = self.wayland_paths.clone();
let use_transition_flags = self.use_transition_flags;
let use_send_vfd_v2 = self.use_send_vfd_v2;
let resource_bridge = self.resource_bridge.take();
#[cfg(feature = "minigbm")]
let gralloc = self
.gralloc
.take()
.expect("gralloc already passed to worker");
let address_offset = if !self.use_shmem {
self.address_offset
} else {
None
};
self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
Worker::new(
mem,
interrupt,
queues.remove(0),
queues.remove(0),
wayland_paths,
mapper,
use_transition_flags,
use_send_vfd_v2,
resource_bridge,
#[cfg(feature = "minigbm")]
gralloc,
address_offset,
)
.run(kill_evt)
}));
Ok(())
}
fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
Some(SharedMemoryRegion {
id: WL_SHMEM_ID,
length: WL_SHMEM_SIZE,
})
}
fn set_shared_memory_region_base(&mut self, shmem_base: GuestAddress) {
self.address_offset = Some(shmem_base.0);
}
fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
self.mapper = Some(mapper);
}
fn stop(&mut self) -> std::result::Result<Option<VirtioDeviceSaved>, VirtioError> {
if let Some(worker_thread) = self.worker_thread.take() {
let state = worker_thread.stop().map_err(VirtioError::InThreadFailure)?;
return Ok(Some(state));
}
Ok(None)
}
}
impl Suspendable for Wl {}