Update vhost-user-backend and its dependencies
vhost-user-backend -> 0.17.0
vhost -> 0.13.0
virtio-queue -> 0.14.0
vm-memory -> 0.16.1
uuid -> 1.11.0
virtio-bindings -> 0.2.4
Bug: 381144621
Test: build cuttlefish
Change-Id: Ia7bc95fed621fb6344ef962e7ae50d43a4d4d75f
diff --git a/crates/uuid/.cargo-checksum.json b/crates/uuid/.cargo-checksum.json
index 63a4c55..3c67d65 100644
--- a/crates/uuid/.cargo-checksum.json
+++ b/crates/uuid/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"97148562e0953fb78f1a174448530dc5fefedf983ca46c21c4bffb06fd997351","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"5ce601f007d7c2c3b13068a7e9938e08aca71944cb0b4179c627196b06a70a7a","src/builder.rs":"2d6e3bf66fc87e36c44b1a8c108dca70f01f6296ac0d50d0a788b69b5a3dff88","src/error.rs":"621799694111aab9d6e570290f4d1b341fe6be58d2b3425fa7891ada7575cab7","src/external.rs":"a0640bdb98de1c24fcc9851a438a5abe6f7e3acb195885c817a64fac25521b9d","src/external/arbitrary_support.rs":"7e7fbcc4b8af5878b71858a1f5fa31e85d84fc2fd159614f8d450ba1fe06ac28","src/external/borsh_support.rs":"b49d82a59653445ba26db46a1515294b1ab480c0671dbe5499dfd1fb02588b3b","src/external/serde_support.rs":"1263176b9916bf61fe3ab32140c22e2e757ea29ffff6f5459b1b720acbe2ed9d","src/external/slog_support.rs":"53c6251e424bdc9b6ba35defb4c723d4d34d44053badbf98e1c7c3e7c83fbb5e","src/fmt.rs":"3bf88d68d838bef81380a1e669a86eee46f24a8113effbd7b4e92da714ec97c7","src/lib.rs":"96d77474e7d9312cc287c5fad2fa44caf775ef0ce7da825e90c736cbcee615d4","src/macros.rs":"8fa9380a39e9952f6adc9eb858264fc09219a97215f4e54768d9e0ec7f520db7","src/md5.rs":"316d65b760ffa58beb6aa678be24359eb21a744e9e143bc99c11fe1907659245","src/parser.rs":"3f30a776ed1792107fed88c15fb393167283cc487efc647fb6504824a4fc3afb","src/rng.rs":"b9f69610560aa5b5d9b2eaa16fb2239515bd163da126cf5c392e5fb9b0296c3e","src/sha1.rs":"e1a9657e11f1ed1ede33c0655f9c2641059b7c24f17be4ac425c930cc216e019","src/timestamp.rs":"2b50321ee768cd9c3ad5bdf1e4924e8e5deaa8d1a031d24dcfaf2fec44530429","src/v1.rs":"123c4b23d0458e77e6c3ed6235e0c786436e53f53d1c8074cf80d77c2ae40611","src/v3.rs":"b7d605c4d2f56a9e57bfe779ef2488fa612c7cb28568f68252093f48ac2edef4","src/v4.rs":"4f06567460871348df0ff2825e9faad9d950a9337e409cb2b568c86118addb97","src/v5.rs":"64e3feb673c8411954f930188e756803b90b57d13ec36652cd2c669e81654333","src/v6.rs":"2dd938c5bf6d1960f4fb7d179a6163c1cfd76ab24f98aaca85d0f74940184645","src/v7.rs":"e46924c0c9c9b1ac6746180d78cd3531920fbd8379f1629abdcec7914bae36c8","src/v8.rs":"8705405fef52e4706c7cf99f7ed231dde733b7cd08ced79be9e29c949be2280f"},"package":"f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a"}
\ No newline at end of file
+{"files":{"Cargo.toml":"2a7f9c6958b513e041d63dfa988f4f0b48f6634d654f6fd3241ee1fd9e680aa3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"4e2fe3e90f75b929f05775e599470f4987e41c0423a7bcd6a7ee526c29354e9c","src/builder.rs":"aa0aaa943858ceea12b06025556c1332f3b57996f5c000545a5ce84c25aaa0ae","src/error.rs":"6a402cde971f7d186be3a184953bc6c89e8b8039fa95cd351e1e158fbe7378cb","src/external.rs":"a0640bdb98de1c24fcc9851a438a5abe6f7e3acb195885c817a64fac25521b9d","src/external/arbitrary_support.rs":"7e7fbcc4b8af5878b71858a1f5fa31e85d84fc2fd159614f8d450ba1fe06ac28","src/external/borsh_support.rs":"b49d82a59653445ba26db46a1515294b1ab480c0671dbe5499dfd1fb02588b3b","src/external/serde_support.rs":"b818f54e784143677146666a6737597bf95523d0de73fedc90b22d9c949b0997","src/external/slog_support.rs":"4fe1cc136b1eb5e27d8b308801bcd72872c129fd20ced1f2415af6760a83751b","src/fmt.rs":"3bf88d68d838bef81380a1e669a86eee46f24a8113effbd7b4e92da714ec97c7","src/lib.rs":"50eb1bd0151dea8bdc61a75f52c9144ddf7df6064dde50d0554af33921d37ebe","src/macros.rs":"dff4a00bcbc37912d38d58edc3edfb8552ba8bb936403b8b33fe7dc3c2041908","src/md5.rs":"316d65b760ffa58beb6aa678be24359eb21a744e9e143bc99c11fe1907659245","src/parser.rs":"838e4a5af613a1d9b9dd6ca4b3c13a42e65fdea35fc02c93c34a416387dbdb7c","src/rng.rs":"d9cdd08ca225f28c103276c985db0540bb8db877a4bcb5348cb4a2648b29883e","src/sha1.rs":"e1a9657e11f1ed1ede33c0655f9c2641059b7c24f17be4ac425c930cc216e019","src/timestamp.rs":"3e282b309e0e7f93518e8ac034e9c3a8e5edda6a6bc9bf3fb6105a8c5a50ff22","src/v1.rs":"9c8254742e58a1d75b8374260108516fc914e2641f83e3a8ada75f05a62a62d1","src/v3.rs":"287860f5376e35d5292959d65948bdb0bbdb4605e3d2e463742c5400075bbe76","src/v4.rs":"c2f2844791cdb2c9e0c90bf7d9d155b96572f1f77aa9586104ddb77d44a5aeea","src/v5.rs":"70799f332c043b3d3ddf4aee791aa448296a5e05122be434945076f9cb29517c","src/v6.rs":"7bd0a52aa316e145ad55b99b0ad46ad3234b0936ab61a4935300f053f2030a56","src/v7.rs":"5e38bf0068606c797f45bb0ad72133a18fc2a879ce595b4b205e75a66266d5e3","src/v8.rs":"15a4c3b81afcca4ec406192f2099fac0ad43d734e12672b02a693ddcc38b6684"},"package":"f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"}
\ No newline at end of file
diff --git a/crates/uuid/Android.bp b/crates/uuid/Android.bp
index b08a4a2..59d4211 100644
--- a/crates/uuid/Android.bp
+++ b/crates/uuid/Android.bp
@@ -18,7 +18,7 @@
host_supported: true,
crate_name: "uuid",
cargo_env_compat: true,
- cargo_pkg_version: "1.7.0",
+ cargo_pkg_version: "1.11.0",
crate_root: "src/lib.rs",
edition: "2018",
features: [
@@ -47,7 +47,7 @@
host_supported: true,
crate_name: "uuid",
cargo_env_compat: true,
- cargo_pkg_version: "1.7.0",
+ cargo_pkg_version: "1.11.0",
crate_root: "src/lib.rs",
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -81,7 +81,7 @@
name: "libuuid_nostd",
crate_name: "uuid",
cargo_env_compat: true,
- cargo_pkg_version: "1.7.0",
+ cargo_pkg_version: "1.11.0",
crate_root: "src/lib.rs",
edition: "2018",
apex_available: [
diff --git a/crates/uuid/Cargo.toml b/crates/uuid/Cargo.toml
index 320fd4c..4355c0b 100644
--- a/crates/uuid/Cargo.toml
+++ b/crates/uuid/Cargo.toml
@@ -13,19 +13,23 @@
edition = "2018"
rust-version = "1.60.0"
name = "uuid"
-version = "1.7.0"
+version = "1.11.0"
authors = [
"Ashley Mannix<ashleymannix@live.com.au>",
- "Christopher Armstrong",
"Dylan DPC<dylan.dpc@gmail.com>",
"Hunar Roop Kahlon<hunar.roop@gmail.com>",
]
+build = false
include = [
"src",
"README.md",
"LICENSE-APACHE",
"LICENSE-MIT",
]
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
description = "A library to generate and parse UUIDs."
homepage = "https://github.com/uuid-rs/uuid"
documentation = "https://docs.rs/uuid"
@@ -80,12 +84,16 @@
"v8",
]
+[lib]
+name = "uuid"
+path = "src/lib.rs"
+
[dependencies.arbitrary]
version = "1.1.3"
optional = true
[dependencies.atomic]
-version = "0.5"
+version = "0.6"
optional = true
default-features = false
@@ -132,15 +140,12 @@
optional = true
[dependencies.uuid-macro-internal]
-version = "1.7.0"
-optional = true
-
-[dependencies.wasm-bindgen]
-version = "0.2"
+version = "1.11.0"
optional = true
[dependencies.zerocopy]
-version = "0.6"
+version = "0.8"
+features = ["derive"]
optional = true
[dev-dependencies.bincode]
@@ -186,16 +191,17 @@
v4 = ["rng"]
v5 = ["sha1"]
v6 = ["atomic"]
-v7 = [
- "atomic",
- "rng",
-]
+v7 = ["rng"]
v8 = []
-[target."cfg(all(target_arch = \"wasm32\", target_vendor = \"unknown\", target_os = \"unknown\"))".dev-dependencies.wasm-bindgen-test]
+[target.'cfg(all(target_arch = "wasm32", target_vendor = "unknown", target_os = "unknown"))'.dependencies.wasm-bindgen]
+version = "0.2"
+optional = true
+
+[target.'cfg(all(target_arch = "wasm32", target_vendor = "unknown", target_os = "unknown"))'.dev-dependencies.wasm-bindgen-test]
version = "0.3"
-[target."cfg(target = \"wasm32-unknown-unknown\")".dev-dependencies.wasm-bindgen]
+[target.'cfg(target = "wasm32-unknown-unknown")'.dev-dependencies.wasm-bindgen]
version = "0.2"
[badges.is-it-maintained-issue-resolution]
@@ -206,3 +212,8 @@
[badges.maintenance]
status = "actively-developed"
+
+[lints.rust.unexpected_cfgs]
+level = "allow"
+priority = 0
+check-cfg = ["cfg(uuid_unstable)"]
diff --git a/crates/uuid/METADATA b/crates/uuid/METADATA
index 51da58e..1d6b68f 100644
--- a/crates/uuid/METADATA
+++ b/crates/uuid/METADATA
@@ -1,20 +1,17 @@
-# This project was upgraded with external_updater.
-# Usage: tools/external_updater/updater.sh update external/rust/crates/uuid
-# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
-
name: "uuid"
description: "A library to generate and parse UUIDs."
third_party {
+ version: "1.11.0"
license_type: NOTICE
last_upgrade_date {
year: 2024
- month: 2
- day: 7
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/uuid"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/uuid/uuid-1.7.0.crate"
- version: "1.7.0"
+ value: "https://static.crates.io/crates/uuid/uuid-1.11.0.crate"
+ version: "1.11.0"
}
}
diff --git a/crates/uuid/README.md b/crates/uuid/README.md
index a1aba43..feff9cf 100644
--- a/crates/uuid/README.md
+++ b/crates/uuid/README.md
@@ -28,7 +28,7 @@
```toml
[dependencies.uuid]
-version = "1.7.0"
+version = "1.11.0"
features = [
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
@@ -65,13 +65,13 @@
If you'd like to parse UUIDs _really_ fast, check out the [`uuid-simd`](https://github.com/nugine/uuid-simd)
library.
-For more details on using `uuid`, [see the library documentation](https://docs.rs/uuid/1.7.0/uuid).
+For more details on using `uuid`, [see the library documentation](https://docs.rs/uuid/1.11.0/uuid).
## References
-* [`uuid` library docs](https://docs.rs/uuid/1.7.0/uuid).
+* [`uuid` library docs](https://docs.rs/uuid/1.11.0/uuid).
* [Wikipedia: Universally Unique Identifier](http://en.wikipedia.org/wiki/Universally_unique_identifier).
-* [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace](http://tools.ietf.org/html/rfc4122).
+* [RFC 9562: Universally Unique IDentifiers (UUID)](https://www.ietf.org/rfc/rfc9562.html).
---
# License
diff --git a/crates/uuid/src/builder.rs b/crates/uuid/src/builder.rs
index 2dd68a2..e59cb43 100644
--- a/crates/uuid/src/builder.rs
+++ b/crates/uuid/src/builder.rs
@@ -54,7 +54,7 @@
///
/// # References
///
- /// * [Nil UUID in RFC4122](https://tools.ietf.org/html/rfc4122.html#section-4.1.7)
+ /// * [Nil UUID in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.9)
///
/// # Examples
///
@@ -80,7 +80,7 @@
///
/// # References
///
- /// * [Max UUID in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-5.4)
+ /// * [Max UUID in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.10)
///
/// # Examples
///
@@ -549,9 +549,11 @@
Builder(Uuid::from_bytes_le(b))
}
- /// Creates a `Builder` for a version 1 UUID using the supplied timestamp and node ID.
- pub const fn from_rfc4122_timestamp(ticks: u64, counter: u16, node_id: &[u8; 6]) -> Self {
- Builder(timestamp::encode_rfc4122_timestamp(ticks, counter, node_id))
+ /// Creates a `Builder` for a version 1 UUID using the supplied timestamp, counter, and node ID.
+ pub const fn from_gregorian_timestamp(ticks: u64, counter: u16, node_id: &[u8; 6]) -> Self {
+ Builder(timestamp::encode_gregorian_timestamp(
+ ticks, counter, node_id,
+ ))
}
/// Creates a `Builder` for a version 3 UUID using the supplied MD5 hashed bytes.
@@ -596,22 +598,24 @@
.with_version(Version::Sha1)
}
- /// Creates a `Builder` for a version 6 UUID using the supplied timestamp and node ID.
+ /// Creates a `Builder` for a version 6 UUID using the supplied timestamp, counter, and node ID.
///
/// This method will encode the ticks, counter, and node ID in a sortable UUID.
- pub const fn from_sorted_rfc4122_timestamp(
+ pub const fn from_sorted_gregorian_timestamp(
ticks: u64,
counter: u16,
node_id: &[u8; 6],
) -> Self {
- Builder(timestamp::encode_sorted_rfc4122_timestamp(
+ Builder(timestamp::encode_sorted_gregorian_timestamp(
ticks, counter, node_id,
))
}
- /// Creates a `Builder` for a version 7 UUID using the supplied Unix timestamp and random bytes.
+ /// Creates a `Builder` for a version 7 UUID using the supplied Unix timestamp and counter bytes.
///
- /// This method assumes the bytes are already sufficiently random.
+ /// This method will set the variant field within the counter bytes without attempting to shift
+ /// the data around it. Callers using the counter as a monotonic value should be careful not to
+ /// store significant data in the 2 least significant bits of the 3rd byte.
///
/// # Examples
///
@@ -636,10 +640,10 @@
/// # Ok(())
/// # }
/// ```
- pub const fn from_unix_timestamp_millis(millis: u64, random_bytes: &[u8; 10]) -> Self {
+ pub const fn from_unix_timestamp_millis(millis: u64, counter_random_bytes: &[u8; 10]) -> Self {
Builder(timestamp::encode_unix_timestamp_millis(
millis,
- random_bytes,
+ counter_random_bytes,
))
}
@@ -901,3 +905,26 @@
self.0
}
}
+
+#[doc(hidden)]
+impl Builder {
+ #[deprecated(
+ since = "1.11.0",
+ note = "use `Builder::from_gregorian_timestamp(ticks, counter, node_id)`"
+ )]
+ pub const fn from_rfc4122_timestamp(ticks: u64, counter: u16, node_id: &[u8; 6]) -> Self {
+ Builder::from_gregorian_timestamp(ticks, counter, node_id)
+ }
+
+ #[deprecated(
+ since = "1.11.0",
+ note = "use `Builder::from_sorted_gregorian_timestamp(ticks, counter, node_id)`"
+ )]
+ pub const fn from_sorted_rfc4122_timestamp(
+ ticks: u64,
+ counter: u16,
+ node_id: &[u8; 6],
+ ) -> Self {
+ Builder::from_sorted_gregorian_timestamp(ticks, counter, node_id)
+ }
+}
diff --git a/crates/uuid/src/error.rs b/crates/uuid/src/error.rs
index e9aecbf..30e0175 100644
--- a/crates/uuid/src/error.rs
+++ b/crates/uuid/src/error.rs
@@ -82,7 +82,7 @@
group_bounds[hyphen_count] = index;
}
hyphen_count += 1;
- } else if !matches!(byte, b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F') {
+ } else if !byte.is_ascii_hexdigit() {
// Non-hex char
return Error(ErrorKind::Char {
character: byte as char,
diff --git a/crates/uuid/src/external/serde_support.rs b/crates/uuid/src/external/serde_support.rs
index 5228daf..f389271 100644
--- a/crates/uuid/src/external/serde_support.rs
+++ b/crates/uuid/src/external/serde_support.rs
@@ -84,22 +84,22 @@
{
#[rustfmt::skip]
let bytes = [
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
- match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(16, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(0, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(1, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(2, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(3, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(4, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(5, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(6, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(7, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(8, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(9, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(10, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(11, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(12, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(13, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(14, &self)) },
+ match seq.next_element()? { Some(e) => e, None => return Err(A::Error::invalid_length(15, &self)) },
];
Ok(Uuid::from_bytes(bytes))
@@ -127,6 +127,33 @@
}
}
+enum ExpectedFormat {
+ Simple,
+ Braced,
+ Urn,
+}
+
+impl std::fmt::Display for ExpectedFormat {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let s = match self {
+ ExpectedFormat::Simple => "a simple Uuid string like 67e5504410b1426f9247bb680e5fe0c8",
+ ExpectedFormat::Braced => {
+ "a braced Uuid string like {67e55044-10b1-426f-9247-bb680e5fe0c8}"
+ }
+ ExpectedFormat::Urn => {
+ "a URN Uuid string like urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8"
+ }
+ };
+ f.write_str(s)
+ }
+}
+
+impl de::Expected for ExpectedFormat {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ <ExpectedFormat as std::fmt::Display>::fmt(self, formatter)
+ }
+}
+
pub mod compact {
//! Serialize a [`Uuid`] as a `[u8; 16]`.
//!
@@ -157,7 +184,7 @@
#[cfg(test)]
mod tests {
use serde_derive::*;
- use serde_test::{self, Configure};
+ use serde_test::Configure;
#[test]
fn test_serialize_compact() {
@@ -207,6 +234,401 @@
}
}
+/// Serialize from a [`Uuid`] as a `uuid::fmt::Simple`
+///
+/// [`Uuid`]: ../../struct.Uuid.html
+///
+/// ## Example
+///
+/// ```rust
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructA {
+/// // This will change both serailization and deserialization
+/// #[serde(with = "uuid::serde::simple")]
+/// id: uuid::Uuid,
+/// }
+///
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructB {
+/// // This will be serialized as uuid::fmt::Simple and deserialize from all valid formats
+/// #[serde(serialize_with = "uuid::serde::simple::serialize")]
+/// id: uuid::Uuid,
+/// }
+/// ```
+pub mod simple {
+ use serde::{de, Deserialize};
+
+ use crate::{parser::parse_simple, Uuid};
+
+ use super::ExpectedFormat;
+
+ /// Serialize from a [`Uuid`] as a `uuid::fmt::Simple`
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// #[derive(serde_derive::Serialize)]
+ /// struct Struct {
+ /// // This will be serialize as uuid::fmt::Simple
+ /// #[serde(serialize_with = "uuid::serde::simple::serialize")]
+ /// id: uuid::Uuid,
+ /// }
+ ///
+ /// ```
+ pub fn serialize<S>(u: &crate::Uuid, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ serde::Serialize::serialize(u.as_simple(), serializer)
+ }
+
+ /// Deserialize a simple Uuid string as a [`Uuid`]
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ pub fn deserialize<'de, D>(deserializer: D) -> Result<Uuid, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let s = <&str as Deserialize>::deserialize(deserializer)?;
+ let bytes = parse_simple(s.as_bytes()).map_err(|_| {
+ de::Error::invalid_value(de::Unexpected::Str(s), &ExpectedFormat::Simple)
+ })?;
+ Ok(Uuid::from_bytes(bytes))
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use serde::de::{self, Error};
+ use serde_test::{Readable, Token};
+
+ use crate::{external::serde_support::ExpectedFormat, Uuid};
+
+ const HYPHENATED_UUID_STR: &'static str = "f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4";
+ const SIMPLE_UUID_STR: &'static str = "f9168c5eceb24faab6bf329bf39fa1e4";
+
+ #[test]
+ fn test_serialize_as_simple() {
+ #[derive(serde_derive::Serialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+
+ let u = Struct(Uuid::parse_str(HYPHENATED_UUID_STR).unwrap());
+ serde_test::assert_ser_tokens(
+ &u,
+ &[
+ Token::NewtypeStruct { name: "Struct" },
+ Token::Str(SIMPLE_UUID_STR),
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_from_simple() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ let s = Struct(HYPHENATED_UUID_STR.parse().unwrap());
+ serde_test::assert_de_tokens::<Struct>(
+ &s,
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(SIMPLE_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_reject_hypenated() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ serde_test::assert_de_tokens_error::<Readable<Struct>>(
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(HYPHENATED_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ &format!(
+ "{}",
+ de::value::Error::invalid_value(
+ de::Unexpected::Str(HYPHENATED_UUID_STR),
+ &ExpectedFormat::Simple,
+ )
+ ),
+ );
+ }
+ }
+}
+
+/// Serialize from a [`Uuid`] as a `uuid::fmt::Braced`
+///
+/// [`Uuid`]: ../../struct.Uuid.html
+///
+/// ## Example
+///
+/// ```rust
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructA {
+/// // This will change both serailization and deserialization
+/// #[serde(with = "uuid::serde::braced")]
+/// id: uuid::Uuid,
+/// }
+///
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructB {
+/// // This will be serialized as uuid::fmt::Urn and deserialize from all valid formats
+/// #[serde(serialize_with = "uuid::serde::braced::serialize")]
+/// id: uuid::Uuid,
+/// }
+/// ```
+pub mod braced {
+ use serde::{de, Deserialize};
+
+ use crate::parser::parse_braced;
+
+ use super::ExpectedFormat;
+
+ /// Serialize from a [`Uuid`] as a `uuid::fmt::Braced`
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// #[derive(serde_derive::Serialize)]
+ /// struct Struct {
+ /// // This will be serialize as uuid::fmt::Braced
+ /// #[serde(serialize_with = "uuid::serde::braced::serialize")]
+ /// id: uuid::Uuid,
+ /// }
+ ///
+ /// ```
+ pub fn serialize<S>(u: &crate::Uuid, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ serde::Serialize::serialize(u.as_braced(), serializer)
+ }
+
+ /// Deserialize a braced Uuid string as a [`Uuid`]
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ pub fn deserialize<'de, D>(deserializer: D) -> Result<crate::Uuid, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let s = <&str as Deserialize>::deserialize(deserializer)?;
+ let bytes = parse_braced(s.as_bytes()).map_err(|_| {
+ de::Error::invalid_value(de::Unexpected::Str(s), &ExpectedFormat::Braced)
+ })?;
+ Ok(crate::Uuid::from_bytes(bytes))
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use serde::de::{self, Error};
+ use serde_test::{Readable, Token};
+
+ use crate::{external::serde_support::ExpectedFormat, Uuid};
+
+ const HYPHENATED_UUID_STR: &'static str = "f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4";
+ const BRACED_UUID_STR: &'static str = "{f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4}";
+
+ #[test]
+ fn test_serialize_as_braced() {
+ #[derive(serde_derive::Serialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+
+ let u = Struct(Uuid::parse_str(HYPHENATED_UUID_STR).unwrap());
+ serde_test::assert_ser_tokens(
+ &u,
+ &[
+ Token::NewtypeStruct { name: "Struct" },
+ Token::Str(BRACED_UUID_STR),
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_from_braced() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ let s = Struct(HYPHENATED_UUID_STR.parse().unwrap());
+ serde_test::assert_de_tokens::<Struct>(
+ &s,
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(BRACED_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_reject_hypenated() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ serde_test::assert_de_tokens_error::<Readable<Struct>>(
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(HYPHENATED_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ &format!(
+ "{}",
+ de::value::Error::invalid_value(
+ de::Unexpected::Str(HYPHENATED_UUID_STR),
+ &ExpectedFormat::Braced,
+ )
+ ),
+ );
+ }
+ }
+}
+
+/// Serialize from a [`Uuid`] as a `uuid::fmt::Urn`
+///
+/// [`Uuid`]: ../../struct.Uuid.html
+///
+/// ## Example
+///
+/// ```rust
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructA {
+/// // This will change both serailization and deserialization
+/// #[serde(with = "uuid::serde::urn")]
+/// id: uuid::Uuid,
+/// }
+///
+/// #[derive(serde_derive::Serialize, serde_derive::Deserialize)]
+/// struct StructB {
+/// // This will be serialized as uuid::fmt::Urn and deserialize from all valid formats
+/// #[serde(serialize_with = "uuid::serde::urn::serialize")]
+/// id: uuid::Uuid,
+/// }
+/// ```
+pub mod urn {
+ use serde::{de, Deserialize};
+
+ use crate::parser::parse_urn;
+
+ use super::ExpectedFormat;
+
+ /// Serialize from a [`Uuid`] as a `uuid::fmt::Urn`
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// #[derive(serde_derive::Serialize)]
+ /// struct Struct {
+ /// // This will be serialize as uuid::fmt::Urn
+ /// #[serde(serialize_with = "uuid::serde::urn::serialize")]
+ /// id: uuid::Uuid,
+ /// }
+ ///
+ /// ```
+ pub fn serialize<S>(u: &crate::Uuid, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ serde::Serialize::serialize(u.as_urn(), serializer)
+ }
+
+ /// Deserialize a urn Uuid string as a [`Uuid`]
+ ///
+ /// [`Uuid`]: ../../struct.Uuid.html
+ pub fn deserialize<'de, D>(deserializer: D) -> Result<crate::Uuid, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let s = <&str as Deserialize>::deserialize(deserializer)?;
+ let bytes = parse_urn(s.as_bytes())
+ .map_err(|_| de::Error::invalid_value(de::Unexpected::Str(s), &ExpectedFormat::Urn))?;
+ Ok(crate::Uuid::from_bytes(bytes))
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use serde::de::{self, Error};
+ use serde_test::{Readable, Token};
+
+ use crate::{external::serde_support::ExpectedFormat, Uuid};
+
+ const HYPHENATED_UUID_STR: &'static str = "f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4";
+ const URN_UUID_STR: &'static str = "urn:uuid:f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4";
+
+ #[test]
+ fn test_serialize_as_urn() {
+ #[derive(serde_derive::Serialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+
+ let u = Struct(Uuid::parse_str(HYPHENATED_UUID_STR).unwrap());
+ serde_test::assert_ser_tokens(
+ &u,
+ &[
+ Token::NewtypeStruct { name: "Struct" },
+ Token::Str(URN_UUID_STR),
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_from_urn() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ let s = Struct(HYPHENATED_UUID_STR.parse().unwrap());
+ serde_test::assert_de_tokens::<Struct>(
+ &s,
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(URN_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ );
+ }
+
+ #[test]
+ fn test_de_reject_hypenated() {
+ #[derive(PartialEq, Debug, serde_derive::Deserialize)]
+ struct Struct(#[serde(with = "super")] crate::Uuid);
+ serde_test::assert_de_tokens_error::<Readable<Struct>>(
+ &[
+ Token::TupleStruct {
+ name: "Struct",
+ len: 1,
+ },
+ Token::BorrowedStr(HYPHENATED_UUID_STR),
+ Token::TupleStructEnd,
+ ],
+ &format!(
+ "{}",
+ de::value::Error::invalid_value(
+ de::Unexpected::Str(HYPHENATED_UUID_STR),
+ &ExpectedFormat::Urn,
+ )
+ ),
+ );
+ }
+ }
+}
+
#[cfg(test)]
mod serde_tests {
use super::*;
diff --git a/crates/uuid/src/external/slog_support.rs b/crates/uuid/src/external/slog_support.rs
index 2d6e817..cb06255 100644
--- a/crates/uuid/src/external/slog_support.rs
+++ b/crates/uuid/src/external/slog_support.rs
@@ -26,7 +26,7 @@
mod tests {
use crate::tests::new;
- use slog::{self, crit, Drain};
+ use slog::{crit, Drain};
#[test]
fn test_slog_kv() {
diff --git a/crates/uuid/src/lib.rs b/crates/uuid/src/lib.rs
index 4c3c9b9..1316d62 100644
--- a/crates/uuid/src/lib.rs
+++ b/crates/uuid/src/lib.rs
@@ -30,8 +30,7 @@
//! practical purposes, it can be assumed that an unintentional collision would
//! be extremely unlikely.
//!
-//! UUIDs have a number of standardized encodings that are specified in [RFC4122](http://tools.ietf.org/html/rfc4122),
-//! with recent additions [in draft](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04).
+//! UUIDs have a number of standardized encodings that are specified in [RFC 9562](https://www.ietf.org/rfc/rfc9562.html).
//!
//! # Getting started
//!
@@ -39,7 +38,7 @@
//!
//! ```toml
//! [dependencies.uuid]
-//! version = "1.7.0"
+//! version = "1.11.0"
//! features = [
//! "v4", # Lets you generate random UUIDs
//! "fast-rng", # Use a faster (but still sufficiently random) RNG
@@ -84,8 +83,6 @@
//! * `v7` - Version 7 UUIDs using a Unix timestamp.
//! * `v8` - Version 8 UUIDs using user-defined data.
//!
-//! Versions that are in draft are also supported. See the _unstable features_ section for details.
-//!
//! This library also includes a [`Builder`] type that can be used to help construct UUIDs of any
//! version without any additional dependencies or features. It's a lower-level API than [`Uuid`]
//! that can be used when you need control over implicit requirements on things like a source
@@ -141,7 +138,7 @@
//!
//! ```toml
//! [dependencies.uuid]
-//! version = "1.7.0"
+//! version = "1.11.0"
//! features = [
//! "v4",
//! "v7",
@@ -156,7 +153,7 @@
//!
//! ```toml
//! [dependencies.uuid]
-//! version = "1.7.0"
+//! version = "1.11.0"
//! default-features = false
//! ```
//!
@@ -202,8 +199,7 @@
//! # References
//!
//! * [Wikipedia: Universally Unique Identifier](http://en.wikipedia.org/wiki/Universally_unique_identifier)
-//! * [RFC4122: A Universally Unique Identifier (UUID) URN Namespace](http://tools.ietf.org/html/rfc4122)
-//! * [Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04)
+//! * [RFC 9562: Universally Unique IDentifiers (UUID)](https://www.ietf.org/rfc/rfc9562.html).
//!
//! [`wasm-bindgen`]: https://crates.io/crates/wasm-bindgen
//! [`cargo-web`]: https://crates.io/crates/cargo-web
@@ -211,10 +207,11 @@
#![no_std]
#![deny(missing_debug_implementations, missing_docs)]
+#![allow(clippy::mixed_attributes_style)]
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
- html_root_url = "https://docs.rs/uuid/1.7.0"
+ html_root_url = "https://docs.rs/uuid/1.11.0"
)]
#[cfg(any(feature = "std", test))]
@@ -226,7 +223,7 @@
extern crate core as std;
#[cfg(all(uuid_unstable, feature = "zerocopy"))]
-use zerocopy::{AsBytes, FromBytes, Unaligned};
+use zerocopy::{IntoBytes, FromBytes, Immutable, KnownLayout, Unaligned};
mod builder;
mod error;
@@ -240,6 +237,9 @@
#[cfg(any(feature = "v1", feature = "v6"))]
pub use timestamp::context::Context;
+#[cfg(feature = "v7")]
+pub use timestamp::context::ContextV7;
+
#[cfg(feature = "v1")]
#[doc(hidden)]
// Soft-deprecated (Rust doesn't support deprecating re-exports)
@@ -294,7 +294,7 @@
///
/// # References
///
-/// * [Version in RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.3)
+/// * [Version Field in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-4.2)
#[derive(Clone, Copy, Debug, PartialEq)]
#[non_exhaustive]
#[repr(u8)]
@@ -325,14 +325,15 @@
///
/// # References
///
-/// * [Variant in RFC4122](http://tools.ietf.org/html/rfc4122#section-4.1.1)
+/// * [Variant Field in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-4.1)
#[derive(Clone, Copy, Debug, PartialEq)]
#[non_exhaustive]
#[repr(u8)]
pub enum Variant {
/// Reserved by the NCS for backward compatibility.
NCS = 0u8,
- /// As described in the RFC4122 Specification (default).
+ /// As described in the RFC 9562 Specification (default).
+ /// (for backward compatibility it is not yet renamed)
RFC4122,
/// Reserved by Microsoft for backward compatibility.
Microsoft,
@@ -437,7 +438,7 @@
#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[cfg_attr(
all(uuid_unstable, feature = "zerocopy"),
- derive(AsBytes, FromBytes, Unaligned)
+ derive(IntoBytes, FromBytes, KnownLayout, Immutable, Unaligned)
)]
#[cfg_attr(
feature = "borsh",
@@ -497,7 +498,7 @@
///
/// # References
///
- /// * [Variant in RFC4122](http://tools.ietf.org/html/rfc4122#section-4.1.1)
+ /// * [Variant Field in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-4.1)
pub const fn get_variant(&self) -> Variant {
match self.as_bytes()[8] {
x if x & 0x80 == 0x00 => Variant::NCS,
@@ -532,7 +533,7 @@
///
/// # References
///
- /// * [Version in RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.3)
+ /// * [Version Field in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-4.2)
pub const fn get_version_num(&self) -> usize {
(self.as_bytes()[6] >> 4) as usize
}
@@ -562,7 +563,7 @@
///
/// # References
///
- /// * [Version in RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.3)
+ /// * [Version Field in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-4.2)
pub const fn get_version(&self) -> Option<Version> {
match self.get_version_num() {
0 if self.is_nil() => Some(Version::Nil),
@@ -877,35 +878,25 @@
}
/// If the UUID is the correct version (v1, v6, or v7) this will return
- /// the timestamp and counter portion parsed from a V1 UUID.
- ///
- /// Returns `None` if the supplied UUID is not V1.
- ///
- /// The V1 timestamp format defined in RFC4122 specifies a 60-bit
- /// integer representing the number of 100-nanosecond intervals
- /// since 00:00:00.00, 15 Oct 1582.
- ///
- /// [`Timestamp`] offers several options for converting the raw RFC4122
- /// value into more commonly-used formats, such as a unix timestamp.
+ /// the timestamp in a version-agnostic [`Timestamp`]. For other versions
+ /// this will return `None`.
///
/// # Roundtripping
///
/// This method is unlikely to roundtrip a timestamp in a UUID due to the way
/// UUIDs encode timestamps. The timestamp returned from this method will be truncated to
/// 100ns precision for version 1 and 6 UUIDs, and to millisecond precision for version 7 UUIDs.
- ///
- /// [`Timestamp`]: v1/struct.Timestamp.html
pub const fn get_timestamp(&self) -> Option<Timestamp> {
match self.get_version() {
Some(Version::Mac) => {
- let (ticks, counter) = timestamp::decode_rfc4122_timestamp(self);
+ let (ticks, counter) = timestamp::decode_gregorian_timestamp(self);
- Some(Timestamp::from_rfc4122(ticks, counter))
+ Some(Timestamp::from_gregorian(ticks, counter))
}
Some(Version::SortMac) => {
- let (ticks, counter) = timestamp::decode_sorted_rfc4122_timestamp(self);
+ let (ticks, counter) = timestamp::decode_sorted_gregorian_timestamp(self);
- Some(Timestamp::from_rfc4122(ticks, counter))
+ Some(Timestamp::from_gregorian(ticks, counter))
}
Some(Version::SortRand) => {
let millis = timestamp::decode_unix_timestamp_millis(self);
@@ -913,12 +904,27 @@
let seconds = millis / 1000;
let nanos = ((millis % 1000) * 1_000_000) as u32;
- Some(Timestamp {
- seconds,
- nanos,
- #[cfg(any(feature = "v1", feature = "v6"))]
- counter: 0,
- })
+ Some(Timestamp::from_unix_time(seconds, nanos, 0, 0))
+ }
+ _ => None,
+ }
+ }
+
+ /// If the UUID is the correct version (v1, or v6) this will return the
+ /// node value as a 6-byte array. For other versions this will return `None`.
+ pub const fn get_node_id(&self) -> Option<[u8; 6]> {
+ match self.get_version() {
+ Some(Version::Mac) | Some(Version::SortMac) => {
+ let mut node_id = [0; 6];
+
+ node_id[0] = self.0[10];
+ node_id[1] = self.0[11];
+ node_id[2] = self.0[12];
+ node_id[3] = self.0[13];
+ node_id[4] = self.0[14];
+ node_id[5] = self.0[15];
+
+ Some(node_id)
}
_ => None,
}
@@ -932,6 +938,13 @@
}
}
+impl AsRef<Uuid> for Uuid {
+ #[inline]
+ fn as_ref(&self) -> &Uuid {
+ self
+ }
+}
+
impl AsRef<[u8]> for Uuid {
#[inline]
fn as_ref(&self) -> &[u8] {
@@ -963,7 +976,7 @@
//! to change the way a [`Uuid`](../struct.Uuid.html) is serialized
//! and deserialized.
- pub use crate::external::serde_support::compact;
+ pub use crate::external::serde_support::{braced, compact, simple, urn};
}
#[cfg(test)]
@@ -1253,6 +1266,43 @@
),
wasm_bindgen_test
)]
+ fn test_get_timestamp_unsupported_version() {
+ let uuid = new();
+
+ assert_ne!(Version::Mac, uuid.get_version().unwrap());
+ assert_ne!(Version::SortMac, uuid.get_version().unwrap());
+ assert_ne!(Version::SortRand, uuid.get_version().unwrap());
+
+ assert!(uuid.get_timestamp().is_none());
+ }
+
+ #[test]
+ #[cfg_attr(
+ all(
+ target_arch = "wasm32",
+ target_vendor = "unknown",
+ target_os = "unknown"
+ ),
+ wasm_bindgen_test
+ )]
+ fn test_get_node_id_unsupported_version() {
+ let uuid = new();
+
+ assert_ne!(Version::Mac, uuid.get_version().unwrap());
+ assert_ne!(Version::SortMac, uuid.get_version().unwrap());
+
+ assert!(uuid.get_node_id().is_none());
+ }
+
+ #[test]
+ #[cfg_attr(
+ all(
+ target_arch = "wasm32",
+ target_vendor = "unknown",
+ target_os = "unknown"
+ ),
+ wasm_bindgen_test
+ )]
fn test_get_variant() {
let uuid1 = new();
let uuid2 = Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap();
@@ -1745,7 +1795,7 @@
fn test_as_bytes() {
let u = new();
let ub = u.as_bytes();
- let ur = u.as_ref();
+ let ur: &[u8] = u.as_ref();
assert_eq!(ub.len(), 16);
assert_eq!(ur.len(), 16);
@@ -1767,7 +1817,7 @@
use crate::std::{convert::TryInto, vec::Vec};
let u = new();
- let ub = u.as_ref();
+ let ub: &[u8] = u.as_ref();
let v: Vec<u8> = u.into();
diff --git a/crates/uuid/src/macros.rs b/crates/uuid/src/macros.rs
index eb95725..865c44d 100644
--- a/crates/uuid/src/macros.rs
+++ b/crates/uuid/src/macros.rs
@@ -4,6 +4,13 @@
#[cfg(feature = "macro-diagnostics")]
#[macro_export]
macro_rules! uuid {
+ ($uuid:expr) => {{
+ const OUTPUT: $crate::Uuid = match $crate::Uuid::try_parse($uuid) {
+ $crate::__macro_support::Ok(u) => u,
+ $crate::__macro_support::Err(_) => panic!("invalid UUID"),
+ };
+ OUTPUT
+ }};
($uuid:literal) => {{
$crate::Uuid::from_bytes($crate::uuid_macro_internal::parse_lit!($uuid))
}};
@@ -13,7 +20,7 @@
#[cfg(not(feature = "macro-diagnostics"))]
#[macro_export]
macro_rules! uuid {
- ($uuid:literal) => {{
+ ($uuid:expr) => {{
const OUTPUT: $crate::Uuid = match $crate::Uuid::try_parse($uuid) {
$crate::__macro_support::Ok(u) => u,
$crate::__macro_support::Err(_) => panic!("invalid UUID"),
@@ -50,6 +57,12 @@
/// # use uuid::uuid;
/// let uuid = uuid!("urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4");
/// ```
+/// Using a const variable:
+/// ```
+/// # use uuid::uuid;
+/// const UUID_STR: &str = "12345678-1234-5678-1234-567812345678";
+/// let UUID = uuid!(UUID_STR);
+/// ```
///
/// ## Compilation Failures
///
@@ -71,22 +84,5 @@
/// | ^
/// ```
///
-/// Tokens that aren't string literals are also rejected:
-///
-/// ```compile_fail
-/// # use uuid::uuid;
-/// let uuid_str: &str = "550e8400e29b41d4a716446655440000";
-/// let uuid = uuid!(uuid_str);
-/// ```
-///
-/// Provides the following compilation error:
-///
-/// ```txt
-/// error: expected string literal
-/// |
-/// | let uuid = uuid!(uuid_str);
-/// | ^^^^^^^^
-/// ```
-///
/// [uuid::Uuid]: https://docs.rs/uuid/*/uuid/struct.Uuid.html
}
diff --git a/crates/uuid/src/parser.rs b/crates/uuid/src/parser.rs
index 0eabcfe..b400a6d 100644
--- a/crates/uuid/src/parser.rs
+++ b/crates/uuid/src/parser.rs
@@ -73,7 +73,7 @@
/// This function is similar to [`parse_str`], in fact `parse_str` shares
/// the same underlying parser. The difference is that if `try_parse`
/// fails, it won't generate very useful error messages. The `parse_str`
- /// function will eventually be deprecated in favor or `try_parse`.
+ /// function will eventually be deprecated in favor of `try_parse`.
///
/// To parse a UUID from a byte stream instead of a UTF8 string, see
/// [`try_parse_ascii`].
@@ -133,7 +133,7 @@
}
const fn try_parse(input: &[u8]) -> Result<[u8; 16], InvalidUuid> {
- let result = match (input.len(), input) {
+ match (input.len(), input) {
// Inputs of 32 bytes must be a non-hyphenated UUID
(32, s) => parse_simple(s),
// Hyphenated UUIDs may be wrapped in various ways:
@@ -146,21 +146,38 @@
parse_hyphenated(s)
}
// Any other shaped input is immediately invalid
- _ => Err(()),
- };
-
- match result {
- Ok(b) => Ok(b),
- Err(()) => Err(InvalidUuid(input)),
+ _ => Err(InvalidUuid(input)),
}
}
#[inline]
-const fn parse_simple(s: &[u8]) -> Result<[u8; 16], ()> {
+#[allow(dead_code)]
+pub(crate) const fn parse_braced(input: &[u8]) -> Result<[u8; 16], InvalidUuid> {
+ if let (38, [b'{', s @ .., b'}']) = (input.len(), input) {
+ parse_hyphenated(s)
+ } else {
+ Err(InvalidUuid(input))
+ }
+}
+
+#[inline]
+#[allow(dead_code)]
+pub(crate) const fn parse_urn(input: &[u8]) -> Result<[u8; 16], InvalidUuid> {
+ if let (45, [b'u', b'r', b'n', b':', b'u', b'u', b'i', b'd', b':', s @ ..]) =
+ (input.len(), input)
+ {
+ parse_hyphenated(s)
+ } else {
+ Err(InvalidUuid(input))
+ }
+}
+
+#[inline]
+pub(crate) const fn parse_simple(s: &[u8]) -> Result<[u8; 16], InvalidUuid> {
// This length check here removes all other bounds
// checks in this function
if s.len() != 32 {
- return Err(());
+ return Err(InvalidUuid(s));
}
let mut buf: [u8; 16] = [0; 16];
@@ -175,7 +192,7 @@
// We use `0xff` as a sentinel value to indicate
// an invalid hex character sequence (like the letter `G`)
if h1 | h2 == 0xff {
- return Err(());
+ return Err(InvalidUuid(s));
}
// The upper nibble needs to be shifted into position
@@ -188,11 +205,11 @@
}
#[inline]
-const fn parse_hyphenated(s: &[u8]) -> Result<[u8; 16], ()> {
+const fn parse_hyphenated(s: &[u8]) -> Result<[u8; 16], InvalidUuid> {
// This length check here removes all other bounds
// checks in this function
if s.len() != 36 {
- return Err(());
+ return Err(InvalidUuid(s));
}
// We look at two hex-encoded values (4 chars) at a time because
@@ -207,7 +224,7 @@
// First, ensure the hyphens appear in the right places
match [s[8], s[13], s[18], s[23]] {
[b'-', b'-', b'-', b'-'] => {}
- _ => return Err(()),
+ _ => return Err(InvalidUuid(s)),
}
let positions: [u8; 8] = [0, 4, 9, 14, 19, 24, 28, 32];
@@ -225,7 +242,7 @@
let h4 = HEX_TABLE[s[(i + 3) as usize] as usize];
if h1 | h2 | h3 | h4 == 0xff {
- return Err(());
+ return Err(InvalidUuid(s));
}
buf[j * 2] = SHL4_TABLE[h1 as usize] | h2;
@@ -523,6 +540,22 @@
}
#[test]
+ fn test_roundtrip_parse_urn() {
+ let uuid_orig = new();
+ let orig_str = uuid_orig.urn().to_string();
+ let uuid_out = Uuid::from_bytes(parse_urn(orig_str.as_bytes()).unwrap());
+ assert_eq!(uuid_orig, uuid_out);
+ }
+
+ #[test]
+ fn test_roundtrip_parse_braced() {
+ let uuid_orig = new();
+ let orig_str = uuid_orig.braced().to_string();
+ let uuid_out = Uuid::from_bytes(parse_braced(orig_str.as_bytes()).unwrap());
+ assert_eq!(uuid_orig, uuid_out);
+ }
+
+ #[test]
fn test_try_parse_ascii_non_utf8() {
assert!(Uuid::try_parse_ascii(b"67e55044-10b1-426f-9247-bb680e5\0e0c8").is_err());
}
diff --git a/crates/uuid/src/rng.rs b/crates/uuid/src/rng.rs
index dcfbb8d..2ae23cd 100644
--- a/crates/uuid/src/rng.rs
+++ b/crates/uuid/src/rng.rs
@@ -1,5 +1,5 @@
#[cfg(any(feature = "v4", feature = "v7"))]
-pub(crate) fn bytes() -> [u8; 16] {
+pub(crate) fn u128() -> u128 {
#[cfg(not(feature = "fast-rng"))]
{
let mut bytes = [0u8; 16];
@@ -9,7 +9,7 @@
panic!("could not retrieve random bytes for uuid: {}", err)
});
- bytes
+ u128::from_ne_bytes(bytes)
}
#[cfg(feature = "fast-rng")]
@@ -29,7 +29,27 @@
panic!("could not retrieve random bytes for uuid: {}", err)
});
- ((bytes[0] as u16) << 8) | (bytes[1] as u16)
+ u16::from_ne_bytes(bytes)
+ }
+
+ #[cfg(feature = "fast-rng")]
+ {
+ rand::random()
+ }
+}
+
+#[cfg(feature = "v7")]
+pub(crate) fn u64() -> u64 {
+ #[cfg(not(feature = "fast-rng"))]
+ {
+ let mut bytes = [0u8; 8];
+
+ getrandom::getrandom(&mut bytes).unwrap_or_else(|err| {
+ // NB: getrandom::Error has no source; this is adequate display
+ panic!("could not retrieve random bytes for uuid: {}", err)
+ });
+
+ u64::from_ne_bytes(bytes)
}
#[cfg(feature = "fast-rng")]
diff --git a/crates/uuid/src/timestamp.rs b/crates/uuid/src/timestamp.rs
index 27112d1..d147199 100644
--- a/crates/uuid/src/timestamp.rs
+++ b/crates/uuid/src/timestamp.rs
@@ -17,12 +17,15 @@
//!
//! # References
//!
-//! * [Timestamp in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.1.4)
-//! * [Timestamp in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-6.1)
+//! * [UUID Version 1 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.1)
+//! * [UUID Version 7 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.7)
+//! * [Timestamp Considerations in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.1)
+
+use core::cmp;
use crate::Uuid;
-/// The number of 100 nanosecond ticks between the RFC4122 epoch
+/// The number of 100 nanosecond ticks between the RFC 9562 epoch
/// (`1582-10-15 00:00:00`) and the Unix epoch (`1970-01-01 00:00:00`).
pub const UUID_TICKS_BETWEEN_EPOCHS: u64 = 0x01B2_1DD2_1381_4000;
@@ -34,141 +37,155 @@
///
/// # References
///
-/// * [Timestamp in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.1.4)
-/// * [Timestamp in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-6.1)
-/// * [Clock Sequence in RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.5)
+/// * [Timestamp Considerations in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.1)
+/// * [UUID Generator States in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.3)
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Timestamp {
- pub(crate) seconds: u64,
- pub(crate) nanos: u32,
- #[cfg(any(feature = "v1", feature = "v6"))]
- pub(crate) counter: u16,
+ seconds: u64,
+ subsec_nanos: u32,
+ counter: u128,
+ usable_counter_bits: u8,
}
impl Timestamp {
- /// Get a timestamp representing the current system time.
+ /// Get a timestamp representing the current system time and up to a 128-bit counter.
///
/// This method defers to the standard library's `SystemTime` type.
- ///
- /// # Panics
- ///
- /// This method will panic if calculating the elapsed time since the Unix epoch fails.
#[cfg(feature = "std")]
- pub fn now(context: impl ClockSequence<Output = u16>) -> Self {
- #[cfg(not(any(feature = "v1", feature = "v6")))]
- {
- let _ = context;
- }
+ pub fn now(context: impl ClockSequence<Output = impl Into<u128>>) -> Self {
+ let (seconds, subsec_nanos) = now();
- let (seconds, nanos) = now();
+ let (counter, seconds, subsec_nanos) =
+ context.generate_timestamp_sequence(seconds, subsec_nanos);
+ let counter = counter.into();
+ let usable_counter_bits = context.usable_bits() as u8;
Timestamp {
seconds,
- nanos,
- #[cfg(any(feature = "v1", feature = "v6"))]
- counter: context.generate_sequence(seconds, nanos),
- }
- }
-
- /// Construct a `Timestamp` from an RFC4122 timestamp and counter, as used
- /// in versions 1 and 6 UUIDs.
- ///
- /// # Overflow
- ///
- /// If conversion from RFC4122 ticks to the internal timestamp format would overflow
- /// it will wrap.
- pub const fn from_rfc4122(ticks: u64, counter: u16) -> Self {
- #[cfg(not(any(feature = "v1", feature = "v6")))]
- {
- let _ = counter;
- }
-
- let (seconds, nanos) = Self::rfc4122_to_unix(ticks);
-
- Timestamp {
- seconds,
- nanos,
- #[cfg(any(feature = "v1", feature = "v6"))]
+ subsec_nanos,
counter,
+ usable_counter_bits,
}
}
- /// Construct a `Timestamp` from a Unix timestamp, as used in version 7 UUIDs.
+ /// Construct a `Timestamp` from the number of 100 nanosecond ticks since 00:00:00.00,
+ /// 15 October 1582 (the date of Gregorian reform to the Christian calendar) and a 14-bit
+ /// counter, as used in versions 1 and 6 UUIDs.
///
/// # Overflow
///
- /// If conversion from RFC4122 ticks to the internal timestamp format would overflow
+ /// If conversion from RFC 9562 ticks to the internal timestamp format would overflow
/// it will wrap.
- pub fn from_unix(context: impl ClockSequence<Output = u16>, seconds: u64, nanos: u32) -> Self {
- #[cfg(not(any(feature = "v1", feature = "v6")))]
- {
- let _ = context;
+ pub const fn from_gregorian(ticks: u64, counter: u16) -> Self {
+ let (seconds, subsec_nanos) = Self::gregorian_to_unix(ticks);
- Timestamp { seconds, nanos }
- }
- #[cfg(any(feature = "v1", feature = "v6"))]
- {
- let counter = context.generate_sequence(seconds, nanos);
-
- Timestamp {
- seconds,
- nanos,
- counter,
- }
+ Timestamp {
+ seconds,
+ subsec_nanos,
+ counter: counter as u128,
+ usable_counter_bits: 14,
}
}
- /// Get the value of the timestamp as an RFC4122 timestamp and counter,
- /// as used in versions 1 and 6 UUIDs.
+ /// Construct a `Timestamp` from a Unix timestamp and up to a 128-bit counter, as used in version 7 UUIDs.
+ pub const fn from_unix_time(
+ seconds: u64,
+ subsec_nanos: u32,
+ counter: u128,
+ usable_counter_bits: u8,
+ ) -> Self {
+ Timestamp {
+ seconds,
+ subsec_nanos,
+ counter,
+ usable_counter_bits,
+ }
+ }
+
+ /// Construct a `Timestamp` from a Unix timestamp and up to a 128-bit counter, as used in version 7 UUIDs.
+ pub fn from_unix(
+ context: impl ClockSequence<Output = impl Into<u128>>,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> Self {
+ let (counter, seconds, subsec_nanos) =
+ context.generate_timestamp_sequence(seconds, subsec_nanos);
+ let counter = counter.into();
+ let usable_counter_bits = context.usable_bits() as u8;
+
+ Timestamp {
+ seconds,
+ subsec_nanos,
+ counter,
+ usable_counter_bits,
+ }
+ }
+
+ /// Get the value of the timestamp as the number of 100 nanosecond ticks since 00:00:00.00,
+ /// 15 October 1582 and a 14-bit counter, as used in versions 1 and 6 UUIDs.
///
/// # Overflow
///
- /// If conversion from RFC4122 ticks to the internal timestamp format would overflow
- /// it will wrap.
- #[cfg(any(feature = "v1", feature = "v6"))]
- pub const fn to_rfc4122(&self) -> (u64, u16) {
+ /// If conversion from the internal timestamp format to ticks would overflow
+ /// then it will wrap.
+ ///
+ /// If the internal counter is wider than 14 bits then it will be truncated to 14 bits.
+ pub const fn to_gregorian(&self) -> (u64, u16) {
(
- Self::unix_to_rfc4122_ticks(self.seconds, self.nanos),
- self.counter,
+ Self::unix_to_gregorian_ticks(self.seconds, self.subsec_nanos),
+ (self.counter as u16) & 0x3FFF,
)
}
- /// Get the value of the timestamp as a Unix timestamp, as used in version 7 UUIDs.
- ///
- /// # Overflow
- ///
- /// If conversion from RFC4122 ticks to the internal timestamp format would overflow
- /// it will wrap.
- pub const fn to_unix(&self) -> (u64, u32) {
- (self.seconds, self.nanos)
+ // NOTE: This method is not public; the usable counter bits are lost in a version 7 UUID
+ // so can't be reliably recovered.
+ #[cfg(feature = "v7")]
+ pub(crate) const fn counter(&self) -> (u128, u8) {
+ (self.counter, self.usable_counter_bits)
}
- #[cfg(any(feature = "v1", feature = "v6"))]
- const fn unix_to_rfc4122_ticks(seconds: u64, nanos: u32) -> u64 {
+ /// Get the value of the timestamp as a Unix timestamp, as used in version 7 UUIDs.
+ pub const fn to_unix(&self) -> (u64, u32) {
+ (self.seconds, self.subsec_nanos)
+ }
+
+ const fn unix_to_gregorian_ticks(seconds: u64, nanos: u32) -> u64 {
UUID_TICKS_BETWEEN_EPOCHS
.wrapping_add(seconds.wrapping_mul(10_000_000))
.wrapping_add(nanos as u64 / 100)
}
- const fn rfc4122_to_unix(ticks: u64) -> (u64, u32) {
+ const fn gregorian_to_unix(ticks: u64) -> (u64, u32) {
(
ticks.wrapping_sub(UUID_TICKS_BETWEEN_EPOCHS) / 10_000_000,
(ticks.wrapping_sub(UUID_TICKS_BETWEEN_EPOCHS) % 10_000_000) as u32 * 100,
)
}
+}
- #[deprecated(note = "use `to_unix` instead; this method will be removed in a future release")]
- /// Get the number of fractional nanoseconds in the Unix timestamp.
- ///
- /// This method is deprecated and probably doesn't do what you're expecting it to.
- /// It doesn't return the timestamp as nanoseconds since the Unix epoch, it returns
- /// the fractional seconds of the timestamp.
+#[doc(hidden)]
+impl Timestamp {
+ #[deprecated(since = "1.11.0", note = "use `Timestamp::from_gregorian(ticks, counter)`")]
+ pub const fn from_rfc4122(ticks: u64, counter: u16) -> Self {
+ Timestamp::from_gregorian(ticks, counter)
+ }
+
+ #[deprecated(since = "1.11.0", note = "use `Timestamp::to_gregorian()`")]
+ pub const fn to_rfc4122(&self) -> (u64, u16) {
+ self.to_gregorian()
+ }
+
+ #[deprecated(since = "1.2.0", note = "`Timestamp::to_unix_nanos()` is deprecated and will be removed: use `Timestamp::to_unix()`")]
pub const fn to_unix_nanos(&self) -> u32 {
- panic!("`Timestamp::to_unix_nanos` is deprecated and will be removed: use `Timestamp::to_unix` instead")
+ panic!("`Timestamp::to_unix_nanos()` is deprecated and will be removed: use `Timestamp::to_unix()`")
}
}
-pub(crate) const fn encode_rfc4122_timestamp(ticks: u64, counter: u16, node_id: &[u8; 6]) -> Uuid {
+pub(crate) const fn encode_gregorian_timestamp(
+ ticks: u64,
+ counter: u16,
+ node_id: &[u8; 6],
+) -> Uuid {
let time_low = (ticks & 0xFFFF_FFFF) as u32;
let time_mid = ((ticks >> 32) & 0xFFFF) as u16;
let time_high_and_version = (((ticks >> 48) & 0x0FFF) as u16) | (1 << 12);
@@ -187,7 +204,7 @@
Uuid::from_fields(time_low, time_mid, time_high_and_version, &d4)
}
-pub(crate) const fn decode_rfc4122_timestamp(uuid: &Uuid) -> (u64, u16) {
+pub(crate) const fn decode_gregorian_timestamp(uuid: &Uuid) -> (u64, u16) {
let bytes = uuid.as_bytes();
let ticks: u64 = ((bytes[6] & 0x0F) as u64) << 56
@@ -204,7 +221,7 @@
(ticks, counter)
}
-pub(crate) const fn encode_sorted_rfc4122_timestamp(
+pub(crate) const fn encode_sorted_gregorian_timestamp(
ticks: u64,
counter: u16,
node_id: &[u8; 6],
@@ -227,7 +244,7 @@
Uuid::from_fields(time_high, time_mid, time_low_and_version, &d4)
}
-pub(crate) const fn decode_sorted_rfc4122_timestamp(uuid: &Uuid) -> (u64, u16) {
+pub(crate) const fn decode_sorted_gregorian_timestamp(uuid: &Uuid) -> (u64, u16) {
let bytes = uuid.as_bytes();
let ticks: u64 = ((bytes[0]) as u64) << 52
@@ -244,25 +261,29 @@
(ticks, counter)
}
-pub(crate) const fn encode_unix_timestamp_millis(millis: u64, random_bytes: &[u8; 10]) -> Uuid {
+pub(crate) const fn encode_unix_timestamp_millis(
+ millis: u64,
+ counter_random_bytes: &[u8; 10],
+) -> Uuid {
let millis_high = ((millis >> 16) & 0xFFFF_FFFF) as u32;
let millis_low = (millis & 0xFFFF) as u16;
- let random_and_version =
- (random_bytes[1] as u16 | ((random_bytes[0] as u16) << 8) & 0x0FFF) | (0x7 << 12);
+ let counter_random_version = (counter_random_bytes[1] as u16
+ | ((counter_random_bytes[0] as u16) << 8) & 0x0FFF)
+ | (0x7 << 12);
let mut d4 = [0; 8];
- d4[0] = (random_bytes[2] & 0x3F) | 0x80;
- d4[1] = random_bytes[3];
- d4[2] = random_bytes[4];
- d4[3] = random_bytes[5];
- d4[4] = random_bytes[6];
- d4[5] = random_bytes[7];
- d4[6] = random_bytes[8];
- d4[7] = random_bytes[9];
+ d4[0] = (counter_random_bytes[2] & 0x3F) | 0x80;
+ d4[1] = counter_random_bytes[3];
+ d4[2] = counter_random_bytes[4];
+ d4[3] = counter_random_bytes[5];
+ d4[4] = counter_random_bytes[6];
+ d4[5] = counter_random_bytes[7];
+ d4[6] = counter_random_bytes[8];
+ d4[7] = counter_random_bytes[9];
- Uuid::from_fields(millis_high, millis_low, random_and_version, &d4)
+ Uuid::from_fields(millis_high, millis_low, counter_random_version, &d4)
}
pub(crate) const fn decode_unix_timestamp_millis(uuid: &Uuid) -> u64 {
@@ -307,6 +328,7 @@
#[cfg(all(
feature = "std",
+ not(miri),
any(
not(feature = "js"),
not(all(
@@ -324,12 +346,29 @@
(dur.as_secs(), dur.subsec_nanos())
}
-/// A counter that can be used by version 1 and version 6 UUIDs to support
+#[cfg(all(feature = "std", miri))]
+fn now() -> (u64, u32) {
+ use std::{sync::Mutex, time::Duration};
+
+ static TS: Mutex<u64> = Mutex::new(0);
+
+ let ts = Duration::from_nanos({
+ let mut ts = TS.lock().unwrap();
+ *ts += 1;
+ *ts
+ });
+
+ (ts.as_secs(), ts.subsec_nanos())
+}
+
+/// A counter that can be used by versions 1 and 6 UUIDs to support
/// the uniqueness of timestamps.
///
/// # References
///
-/// * [Clock Sequence in RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.5)
+/// * [UUID Version 1 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.1)
+/// * [UUID Version 6 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.6)
+/// * [UUID Generator States in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.3)
pub trait ClockSequence {
/// The type of sequence returned by this counter.
type Output;
@@ -337,14 +376,63 @@
/// Get the next value in the sequence to feed into a timestamp.
///
/// This method will be called each time a [`Timestamp`] is constructed.
+ ///
+ /// Any bits beyond [`ClockSequence::usable_bits`] in the output must be unset.
fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output;
+
+ /// Get the next value in the sequence, potentially also adjusting the timestamp.
+ ///
+ /// This method should be preferred over `generate_sequence`.
+ ///
+ /// Any bits beyond [`ClockSequence::usable_bits`] in the output must be unset.
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ (
+ self.generate_sequence(seconds, subsec_nanos),
+ seconds,
+ subsec_nanos,
+ )
+ }
+
+ /// The number of usable bits from the least significant bit in the result of [`ClockSequence::generate_sequence`]
+ /// or [`ClockSequence::generate_timestamp_sequence`].
+ ///
+ /// The number of usable bits must not exceed 128.
+ ///
+ /// The number of usable bits is not expected to change between calls. An implementation of `ClockSequence` should
+ /// always return the same value from this method.
+ fn usable_bits(&self) -> usize
+ where
+ Self::Output: Sized,
+ {
+ cmp::min(128, core::mem::size_of::<Self::Output>())
+ }
}
impl<'a, T: ClockSequence + ?Sized> ClockSequence for &'a T {
type Output = T::Output;
+
fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
(**self).generate_sequence(seconds, subsec_nanos)
}
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ (**self).generate_timestamp_sequence(seconds, subsec_nanos)
+ }
+
+ fn usable_bits(&self) -> usize
+ where
+ Self::Output: Sized,
+ {
+ (**self).usable_bits()
+ }
}
/// Default implementations for the [`ClockSequence`] trait.
@@ -352,12 +440,476 @@
use super::ClockSequence;
#[cfg(any(feature = "v1", feature = "v6"))]
- use atomic::{Atomic, Ordering};
+ mod v1_support {
+ use super::*;
+
+ use atomic::{Atomic, Ordering};
+
+ #[cfg(all(feature = "std", feature = "rng"))]
+ static CONTEXT: Context = Context {
+ count: Atomic::new(0),
+ };
+
+ #[cfg(all(feature = "std", feature = "rng"))]
+ static CONTEXT_INITIALIZED: Atomic<bool> = Atomic::new(false);
+
+ #[cfg(all(feature = "std", feature = "rng"))]
+ pub(crate) fn shared_context() -> &'static Context {
+ // If the context is in its initial state then assign it to a random value
+ // It doesn't matter if multiple threads observe `false` here and initialize the context
+ if CONTEXT_INITIALIZED
+ .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ CONTEXT.count.store(crate::rng::u16(), Ordering::Release);
+ }
+
+ &CONTEXT
+ }
+
+ /// A thread-safe, wrapping counter that produces 14-bit values.
+ ///
+ /// This type works by:
+ ///
+ /// 1. Atomically incrementing the counter value for each timestamp.
+ /// 2. Wrapping the counter back to zero if it overflows its 14-bit storage.
+ ///
+ /// This type should be used when constructing versions 1 and 6 UUIDs.
+ ///
+ /// This type should not be used when constructing version 7 UUIDs. When used to
+ /// construct a version 7 UUID, the 14-bit counter will be padded with random data.
+ /// Counter overflows are more likely with a 14-bit counter than they are with a
+ /// 42-bit counter when working at millisecond precision. This type doesn't attempt
+ /// to adjust the timestamp on overflow.
+ #[derive(Debug)]
+ pub struct Context {
+ count: Atomic<u16>,
+ }
+
+ impl Context {
+ /// Construct a new context that's initialized with the given value.
+ ///
+ /// The starting value should be a random number, so that UUIDs from
+ /// different systems with the same timestamps are less likely to collide.
+ /// When the `rng` feature is enabled, prefer the [`Context::new_random`] method.
+ pub const fn new(count: u16) -> Self {
+ Self {
+ count: Atomic::<u16>::new(count),
+ }
+ }
+
+ /// Construct a new context that's initialized with a random value.
+ #[cfg(feature = "rng")]
+ pub fn new_random() -> Self {
+ Self {
+ count: Atomic::<u16>::new(crate::rng::u16()),
+ }
+ }
+ }
+
+ impl ClockSequence for Context {
+ type Output = u16;
+
+ fn generate_sequence(&self, _seconds: u64, _nanos: u32) -> Self::Output {
+ // RFC 9562 reserves 2 bits of the clock sequence so the actual
+ // maximum value is smaller than `u16::MAX`. Since we unconditionally
+ // increment the clock sequence we want to wrap once it becomes larger
+ // than what we can represent in a "u14". Otherwise there'd be patches
+ // where the clock sequence doesn't change regardless of the timestamp
+ self.count.fetch_add(1, Ordering::AcqRel) & (u16::MAX >> 2)
+ }
+
+ fn usable_bits(&self) -> usize {
+ 14
+ }
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use crate::Timestamp;
+
+ use super::*;
+
+ #[test]
+ fn context() {
+ let seconds = 1_496_854_535;
+ let subsec_nanos = 812_946_000;
+
+ let context = Context::new(u16::MAX >> 2);
+
+ let ts = Timestamp::from_unix(&context, seconds, subsec_nanos);
+ assert_eq!(16383, ts.counter);
+ assert_eq!(14, ts.usable_counter_bits);
+
+ let seconds = 1_496_854_536;
+
+ let ts = Timestamp::from_unix(&context, seconds, subsec_nanos);
+ assert_eq!(0, ts.counter);
+
+ let seconds = 1_496_854_535;
+
+ let ts = Timestamp::from_unix(&context, seconds, subsec_nanos);
+ assert_eq!(1, ts.counter);
+ }
+ }
+ }
+
+ #[cfg(any(feature = "v1", feature = "v6"))]
+ pub use v1_support::*;
+
+ #[cfg(feature = "std")]
+ mod std_support {
+ use super::*;
+
+ use core::panic::{AssertUnwindSafe, RefUnwindSafe};
+ use std::{sync::Mutex, thread::LocalKey};
+
+ /// A wrapper for a context that uses thread-local storage.
+ pub struct ThreadLocalContext<C: 'static>(&'static LocalKey<C>);
+
+ impl<C> std::fmt::Debug for ThreadLocalContext<C> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("ThreadLocalContext").finish_non_exhaustive()
+ }
+ }
+
+ impl<C: 'static> ThreadLocalContext<C> {
+ /// Wrap a thread-local container with a context.
+ pub const fn new(local_key: &'static LocalKey<C>) -> Self {
+ ThreadLocalContext(local_key)
+ }
+ }
+
+ impl<C: ClockSequence + 'static> ClockSequence for ThreadLocalContext<C> {
+ type Output = C::Output;
+
+ fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
+ self.0
+ .with(|ctxt| ctxt.generate_sequence(seconds, subsec_nanos))
+ }
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ self.0
+ .with(|ctxt| ctxt.generate_timestamp_sequence(seconds, subsec_nanos))
+ }
+
+ fn usable_bits(&self) -> usize {
+ self.0.with(|ctxt| ctxt.usable_bits())
+ }
+ }
+
+ impl<C: ClockSequence> ClockSequence for AssertUnwindSafe<C> {
+ type Output = C::Output;
+
+ fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
+ self.0.generate_sequence(seconds, subsec_nanos)
+ }
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ self.0.generate_timestamp_sequence(seconds, subsec_nanos)
+ }
+
+ fn usable_bits(&self) -> usize
+ where
+ Self::Output: Sized,
+ {
+ self.0.usable_bits()
+ }
+ }
+
+ impl<C: ClockSequence + RefUnwindSafe> ClockSequence for Mutex<C> {
+ type Output = C::Output;
+
+ fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
+ self.lock()
+ .unwrap_or_else(|err| err.into_inner())
+ .generate_sequence(seconds, subsec_nanos)
+ }
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ self.lock()
+ .unwrap_or_else(|err| err.into_inner())
+ .generate_timestamp_sequence(seconds, subsec_nanos)
+ }
+
+ fn usable_bits(&self) -> usize
+ where
+ Self::Output: Sized,
+ {
+ self.lock()
+ .unwrap_or_else(|err| err.into_inner())
+ .usable_bits()
+ }
+ }
+ }
+
+ #[cfg(feature = "std")]
+ pub use std_support::*;
+
+ #[cfg(feature = "v7")]
+ mod v7_support {
+ use super::*;
+
+ use core::{cell::Cell, panic::RefUnwindSafe};
+
+ #[cfg(feature = "std")]
+ static CONTEXT_V7: SharedContextV7 =
+ SharedContextV7(std::sync::Mutex::new(ContextV7::new()));
+
+ #[cfg(feature = "std")]
+ pub(crate) fn shared_context_v7() -> &'static SharedContextV7 {
+ &CONTEXT_V7
+ }
+
+ const USABLE_BITS: usize = 42;
+
+ // Leave the most significant bit unset
+ // This guarantees the counter has at least 2,199,023,255,552
+ // values before it will overflow, which is exceptionally unlikely
+ // even in the worst case
+ const RESEED_MASK: u64 = u64::MAX >> 23;
+ const MAX_COUNTER: u64 = u64::MAX >> 22;
+
+ /// An unsynchronized, reseeding counter that produces 42-bit values.
+ ///
+ /// This type works by:
+ ///
+ /// 1. Reseeding the counter each millisecond with a random 41-bit value. The 42nd bit
+ /// is left unset so the counter can safely increment over the millisecond.
+ /// 2. Wrapping the counter back to zero if it overflows its 42-bit storage and adding a
+ /// millisecond to the timestamp.
+ ///
+ /// This type can be used when constructing version 7 UUIDs. When used to construct a
+ /// version 7 UUID, the 42-bit counter will be padded with random data. This type can
+ /// be used to maintain ordering of UUIDs within the same millisecond.
+ ///
+ /// This type should not be used when constructing version 1 or version 6 UUIDs.
+ /// When used to construct a version 1 or version 6 UUID, only the 14 least significant
+ /// bits of the counter will be used.
+ #[derive(Debug)]
+ pub struct ContextV7 {
+ last_reseed: Cell<LastReseed>,
+ counter: Cell<u64>,
+ }
+
+ #[derive(Debug, Default, Clone, Copy)]
+ struct LastReseed {
+ millis: u64,
+ ts_seconds: u64,
+ ts_subsec_nanos: u32,
+ }
+
+ impl LastReseed {
+ fn from_millis(millis: u64) -> Self {
+ LastReseed {
+ millis,
+ ts_seconds: millis / 1_000,
+ ts_subsec_nanos: (millis % 1_000) as u32 * 1_000_000,
+ }
+ }
+ }
+
+ impl RefUnwindSafe for ContextV7 {}
+
+ impl ContextV7 {
+ /// Construct a new context that will reseed its counter on the first
+ /// non-zero timestamp it receives.
+ pub const fn new() -> Self {
+ ContextV7 {
+ last_reseed: Cell::new(LastReseed {
+ millis: 0,
+ ts_seconds: 0,
+ ts_subsec_nanos: 0,
+ }),
+ counter: Cell::new(0),
+ }
+ }
+ }
+
+ impl ClockSequence for ContextV7 {
+ type Output = u64;
+
+ fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
+ self.generate_timestamp_sequence(seconds, subsec_nanos).0
+ }
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ let millis = (seconds * 1_000).saturating_add(subsec_nanos as u64 / 1_000_000);
+
+ let last_reseed = self.last_reseed.get();
+
+ // If the observed system time has shifted forwards then regenerate the counter
+ if millis > last_reseed.millis {
+ let last_reseed = LastReseed::from_millis(millis);
+ self.last_reseed.set(last_reseed);
+
+ let counter = crate::rng::u64() & RESEED_MASK;
+ self.counter.set(counter);
+
+ (counter, last_reseed.ts_seconds, last_reseed.ts_subsec_nanos)
+ }
+ // If the observed system time has not shifted forwards then increment the counter
+ else {
+ // If the incoming timestamp is earlier than the last observed one then
+ // use it instead. This may happen if the system clock jitters, or if the counter
+ // has wrapped and the timestamp is artificially incremented
+ let millis = ();
+ let _ = millis;
+
+ // Guaranteed to never overflow u64
+ let counter = self.counter.get() + 1;
+
+ // If the counter has not overflowed its 42-bit storage then return it
+ if counter <= MAX_COUNTER {
+ self.counter.set(counter);
+
+ (counter, last_reseed.ts_seconds, last_reseed.ts_subsec_nanos)
+ }
+ // Unlikely: If the counter has overflowed its 42-bit storage then wrap it
+ // and increment the timestamp. Until the observed system time shifts past
+ // this incremented value, all timestamps will use it to maintain monotonicity
+ else {
+ // Increment the timestamp by 1 milli
+ let last_reseed = LastReseed::from_millis(last_reseed.millis + 1);
+ self.last_reseed.set(last_reseed);
+
+ // Reseed the counter
+ let counter = crate::rng::u64() & RESEED_MASK;
+ self.counter.set(counter);
+
+ (counter, last_reseed.ts_seconds, last_reseed.ts_subsec_nanos)
+ }
+ }
+ }
+
+ fn usable_bits(&self) -> usize {
+ USABLE_BITS
+ }
+ }
+
+ #[cfg(feature = "std")]
+ pub(crate) struct SharedContextV7(std::sync::Mutex<ContextV7>);
+
+ #[cfg(feature = "std")]
+ impl ClockSequence for SharedContextV7 {
+ type Output = u64;
+
+ fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> Self::Output {
+ self.0.generate_sequence(seconds, subsec_nanos)
+ }
+
+ fn generate_timestamp_sequence(
+ &self,
+ seconds: u64,
+ subsec_nanos: u32,
+ ) -> (Self::Output, u64, u32) {
+ self.0.generate_timestamp_sequence(seconds, subsec_nanos)
+ }
+
+ fn usable_bits(&self) -> usize
+ where
+ Self::Output: Sized,
+ {
+ USABLE_BITS
+ }
+ }
+
+ #[cfg(test)]
+ mod tests {
+ use core::time::Duration;
+
+ use super::*;
+
+ use crate::Timestamp;
+
+ #[test]
+ fn context() {
+ let seconds = 1_496_854_535;
+ let subsec_nanos = 812_946_000;
+
+ let context = ContextV7::new();
+
+ let ts1 = Timestamp::from_unix(&context, seconds, subsec_nanos);
+ assert_eq!(42, ts1.usable_counter_bits);
+
+ // Backwards second
+ let seconds = 1_496_854_534;
+
+ let ts2 = Timestamp::from_unix(&context, seconds, subsec_nanos);
+
+ // The backwards time should be ignored
+ // The counter should still increment
+ assert_eq!(ts1.seconds, ts2.seconds);
+ assert_eq!(ts1.subsec_nanos, ts2.subsec_nanos);
+ assert_eq!(ts1.counter + 1, ts2.counter);
+
+ // Forwards second
+ let seconds = 1_496_854_536;
+
+ let ts3 = Timestamp::from_unix(&context, seconds, subsec_nanos);
+
+ // The counter should have reseeded
+ assert_ne!(ts2.counter + 1, ts3.counter);
+ assert_ne!(0, ts3.counter);
+ }
+
+ #[test]
+ fn context_wrap() {
+ let seconds = 1_496_854_535u64;
+ let subsec_nanos = 812_946_000u32;
+
+ let millis = (seconds * 1000).saturating_add(subsec_nanos as u64 / 1_000_000);
+
+ // This context will wrap
+ let context = ContextV7 {
+ last_reseed: Cell::new(LastReseed::from_millis(millis)),
+ counter: Cell::new(u64::MAX >> 22),
+ };
+
+ let ts = Timestamp::from_unix(&context, seconds, subsec_nanos);
+
+ // The timestamp should be incremented by 1ms
+ let expected_ts = Duration::new(seconds, subsec_nanos / 1_000_000 * 1_000_000)
+ + Duration::from_millis(1);
+ assert_eq!(expected_ts.as_secs(), ts.seconds);
+ assert_eq!(expected_ts.subsec_nanos(), ts.subsec_nanos);
+
+ // The counter should have reseeded
+ assert!(ts.counter < (u64::MAX >> 22) as u128);
+ assert_ne!(0, ts.counter);
+ }
+ }
+ }
+
+ #[cfg(feature = "v7")]
+ pub use v7_support::*;
/// An empty counter that will always return the value `0`.
///
- /// This type should be used when constructing timestamps for version 7 UUIDs,
- /// since they don't need a counter for uniqueness.
+ /// This type can be used when constructing version 7 UUIDs. When used to
+ /// construct a version 7 UUID, the entire counter segment of the UUID will be
+ /// filled with a random value. This type does not maintain ordering of UUIDs
+ /// within a millisecond but is efficient.
+ ///
+ /// This type should not be used when constructing version 1 or version 6 UUIDs.
+ /// When used to construct a version 1 or version 6 UUID, the counter
+ /// segment will remain zero.
#[derive(Debug, Clone, Copy, Default)]
pub struct NoContext;
@@ -367,72 +919,9 @@
fn generate_sequence(&self, _seconds: u64, _nanos: u32) -> Self::Output {
0
}
- }
- #[cfg(all(any(feature = "v1", feature = "v6"), feature = "std", feature = "rng"))]
- static CONTEXT: Context = Context {
- count: Atomic::new(0),
- };
-
- #[cfg(all(any(feature = "v1", feature = "v6"), feature = "std", feature = "rng"))]
- static CONTEXT_INITIALIZED: Atomic<bool> = Atomic::new(false);
-
- #[cfg(all(any(feature = "v1", feature = "v6"), feature = "std", feature = "rng"))]
- pub(crate) fn shared_context() -> &'static Context {
- // If the context is in its initial state then assign it to a random value
- // It doesn't matter if multiple threads observe `false` here and initialize the context
- if CONTEXT_INITIALIZED
- .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
- .is_ok()
- {
- CONTEXT.count.store(crate::rng::u16(), Ordering::Release);
- }
-
- &CONTEXT
- }
-
- /// A thread-safe, wrapping counter that produces 14-bit numbers.
- ///
- /// This type should be used when constructing version 1 and version 6 UUIDs.
- #[derive(Debug)]
- #[cfg(any(feature = "v1", feature = "v6"))]
- pub struct Context {
- count: Atomic<u16>,
- }
-
- #[cfg(any(feature = "v1", feature = "v6"))]
- impl Context {
- /// Construct a new context that's initialized with the given value.
- ///
- /// The starting value should be a random number, so that UUIDs from
- /// different systems with the same timestamps are less likely to collide.
- /// When the `rng` feature is enabled, prefer the [`Context::new_random`] method.
- pub const fn new(count: u16) -> Self {
- Self {
- count: Atomic::<u16>::new(count),
- }
- }
-
- /// Construct a new context that's initialized with a random value.
- #[cfg(feature = "rng")]
- pub fn new_random() -> Self {
- Self {
- count: Atomic::<u16>::new(crate::rng::u16()),
- }
- }
- }
-
- #[cfg(any(feature = "v1", feature = "v6"))]
- impl ClockSequence for Context {
- type Output = u16;
-
- fn generate_sequence(&self, _seconds: u64, _nanos: u32) -> Self::Output {
- // RFC4122 reserves 2 bits of the clock sequence so the actual
- // maximum value is smaller than `u16::MAX`. Since we unconditionally
- // increment the clock sequence we want to wrap once it becomes larger
- // than what we can represent in a "u14". Otherwise there'd be patches
- // where the clock sequence doesn't change regardless of the timestamp
- self.count.fetch_add(1, Ordering::AcqRel) & (u16::MAX >> 2)
+ fn usable_bits(&self) -> usize {
+ 0
}
}
}
@@ -457,12 +946,27 @@
),
wasm_bindgen_test
)]
- fn rfc4122_unix_does_not_panic() {
+ fn gregorian_unix_does_not_panic() {
// Ensure timestamp conversions never panic
- Timestamp::unix_to_rfc4122_ticks(u64::MAX, 0);
- Timestamp::unix_to_rfc4122_ticks(0, u32::MAX);
- Timestamp::unix_to_rfc4122_ticks(u64::MAX, u32::MAX);
+ Timestamp::unix_to_gregorian_ticks(u64::MAX, 0);
+ Timestamp::unix_to_gregorian_ticks(0, u32::MAX);
+ Timestamp::unix_to_gregorian_ticks(u64::MAX, u32::MAX);
- Timestamp::rfc4122_to_unix(u64::MAX);
+ Timestamp::gregorian_to_unix(u64::MAX);
+ }
+
+ #[test]
+ #[cfg_attr(
+ all(
+ target_arch = "wasm32",
+ target_vendor = "unknown",
+ target_os = "unknown"
+ ),
+ wasm_bindgen_test
+ )]
+ fn to_gregorian_truncates_to_usable_bits() {
+ let ts = Timestamp::from_gregorian(123, u16::MAX);
+
+ assert_eq!((123, u16::MAX >> 2), ts.to_gregorian());
}
}
diff --git a/crates/uuid/src/v1.rs b/crates/uuid/src/v1.rs
index 41febab..cdd6333 100644
--- a/crates/uuid/src/v1.rs
+++ b/crates/uuid/src/v1.rs
@@ -48,7 +48,7 @@
/// # Examples
///
/// A UUID can be created from a unix [`Timestamp`] with a
- /// [`ClockSequence`]. RFC4122 requires the clock sequence
+ /// [`ClockSequence`]. RFC 9562 requires the clock sequence
/// is seeded with a random value:
///
/// ```
@@ -66,12 +66,12 @@
/// );
/// ```
///
- /// The timestamp can also be created manually as per RFC4122:
+ /// The timestamp can also be created manually as per RFC 9562:
///
/// ```
/// # use uuid::{Uuid, Timestamp, Context, ClockSequence};
/// let context = Context::new(42);
- /// let ts = Timestamp::from_rfc4122(14976234442241191232, context.generate_sequence(0, 0));
+ /// let ts = Timestamp::from_gregorian(14976234442241191232, context.generate_sequence(0, 0));
///
/// let uuid = Uuid::new_v1(ts, &[1, 2, 3, 4, 5, 6]);
///
@@ -83,15 +83,15 @@
///
/// # References
///
- /// * [Version 1 UUIDs in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.2)
+ /// * [UUID Version 1 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.1)
///
/// [`Timestamp`]: v1/struct.Timestamp.html
/// [`ClockSequence`]: v1/trait.ClockSequence.html
/// [`Context`]: v1/struct.Context.html
pub fn new_v1(ts: Timestamp, node_id: &[u8; 6]) -> Self {
- let (ticks, counter) = ts.to_rfc4122();
+ let (ticks, counter) = ts.to_gregorian();
- Builder::from_rfc4122_timestamp(ticks, counter, node_id).into_uuid()
+ Builder::from_gregorian_timestamp(ticks, counter, node_id).into_uuid()
}
}
@@ -131,10 +131,12 @@
"20616934-4ba2-11e7-8000-010203040506"
);
- let ts = uuid.get_timestamp().unwrap().to_rfc4122();
+ let ts = uuid.get_timestamp().unwrap().to_gregorian();
assert_eq!(ts.0 - 0x01B2_1DD2_1381_4000, 14_968_545_358_129_460);
+ assert_eq!(Some(node), uuid.get_node_id(),);
+
// Ensure parsing the same UUID produces the same timestamp
let parsed = Uuid::parse_str("20616934-4ba2-11e7-8000-010203040506").unwrap();
@@ -142,6 +144,8 @@
uuid.get_timestamp().unwrap(),
parsed.get_timestamp().unwrap()
);
+
+ assert_eq!(uuid.get_node_id().unwrap(), parsed.get_node_id().unwrap(),);
}
#[test]
@@ -162,39 +166,4 @@
assert_eq!(uuid.get_version(), Some(Version::Mac));
assert_eq!(uuid.get_variant(), Variant::RFC4122);
}
-
- #[test]
- #[cfg_attr(
- all(
- target_arch = "wasm32",
- target_vendor = "unknown",
- target_os = "unknown"
- ),
- wasm_bindgen_test
- )]
- fn test_new_context() {
- let time: u64 = 1_496_854_535;
- let time_fraction: u32 = 812_946_000;
- let node = [1, 2, 3, 4, 5, 6];
-
- // This context will wrap
- let context = Context::new(u16::MAX >> 2);
-
- let uuid1 = Uuid::new_v1(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- let time: u64 = 1_496_854_536;
-
- let uuid2 = Uuid::new_v1(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- assert_eq!(uuid1.get_timestamp().unwrap().to_rfc4122().1, 16383);
- assert_eq!(uuid2.get_timestamp().unwrap().to_rfc4122().1, 0);
-
- let time = 1_496_854_535;
-
- let uuid3 = Uuid::new_v1(Timestamp::from_unix(&context, time, time_fraction), &node);
- let uuid4 = Uuid::new_v1(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- assert_eq!(uuid3.get_timestamp().unwrap().to_rfc4122().1, 1);
- assert_eq!(uuid4.get_timestamp().unwrap().to_rfc4122().1, 2);
- }
}
diff --git a/crates/uuid/src/v3.rs b/crates/uuid/src/v3.rs
index ed356d4..84a1e26 100644
--- a/crates/uuid/src/v3.rs
+++ b/crates/uuid/src/v3.rs
@@ -27,7 +27,8 @@
///
/// # References
///
- /// * [Version 3 and 5 UUIDs in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.3)
+ /// * [UUID Version 3 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.3)
+ /// * [Name-Based UUID Generation in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.5)
///
/// [`NAMESPACE_DNS`]: #associatedconstant.NAMESPACE_DNS
/// [`NAMESPACE_OID`]: #associatedconstant.NAMESPACE_OID
diff --git a/crates/uuid/src/v4.rs b/crates/uuid/src/v4.rs
index 3c42473..14d755e 100644
--- a/crates/uuid/src/v4.rs
+++ b/crates/uuid/src/v4.rs
@@ -26,12 +26,16 @@
///
/// # References
///
- /// * [Version 4 UUIDs in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.4)
+ /// * [UUID Version 4 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.4)
///
/// [`getrandom`]: https://crates.io/crates/getrandom
/// [from_random_bytes]: struct.Builder.html#method.from_random_bytes
pub fn new_v4() -> Uuid {
- crate::Builder::from_random_bytes(crate::rng::bytes()).into_uuid()
+ // This is an optimized method for generating random UUIDs that just masks
+ // out the bits for the version and variant and sets them both together
+ Uuid::from_u128(
+ crate::rng::u128() & 0xFFFFFFFFFFFF4FFFBFFFFFFFFFFFFFFF | 0x40008000000000000000,
+ )
}
}
diff --git a/crates/uuid/src/v5.rs b/crates/uuid/src/v5.rs
index 265aa1a..f29ce73 100644
--- a/crates/uuid/src/v5.rs
+++ b/crates/uuid/src/v5.rs
@@ -26,7 +26,8 @@
///
/// # References
///
- /// * [Version 3 and 5 UUIDs in RFC4122](https://www.rfc-editor.org/rfc/rfc4122#section-4.3)
+ /// * [UUID Version 5 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.5)
+ /// * [Name-Based UUID Generation in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-6.5)
///
/// [`NAMESPACE_DNS`]: struct.Uuid.html#associatedconst.NAMESPACE_DNS
/// [`NAMESPACE_OID`]: struct.Uuid.html#associatedconst.NAMESPACE_OID
diff --git a/crates/uuid/src/v6.rs b/crates/uuid/src/v6.rs
index 09c156f..05f27db 100644
--- a/crates/uuid/src/v6.rs
+++ b/crates/uuid/src/v6.rs
@@ -49,7 +49,7 @@
/// # Examples
///
/// A UUID can be created from a unix [`Timestamp`] with a
- /// [`ClockSequence`]. RFC4122 requires the clock sequence
+ /// [`ClockSequence`]. RFC 9562 requires the clock sequence
/// is seeded with a random value:
///
/// ```rust
@@ -66,13 +66,13 @@
/// );
/// ```
///
- /// The timestamp can also be created manually as per RFC4122:
+ /// The timestamp can also be created manually as per RFC 9562:
///
/// ```
/// # use uuid::{Uuid, Timestamp, Context, ClockSequence};
/// # fn random_seed() -> u16 { 42 }
/// let context = Context::new(random_seed());
- /// let ts = Timestamp::from_rfc4122(14976241191231231313, context.generate_sequence(0, 0) );
+ /// let ts = Timestamp::from_gregorian(14976241191231231313, context.generate_sequence(0, 0));
///
/// let uuid = Uuid::new_v6(ts, &[1, 2, 3, 4, 5, 6]);
///
@@ -84,15 +84,15 @@
///
/// # References
///
- /// * [Version 6 UUIDs in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-5.1)
+ /// * [UUID Version 6 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.6)
///
/// [`Timestamp`]: timestamp/struct.Timestamp.html
/// [`ClockSequence`]: timestamp/trait.ClockSequence.html
/// [`Context`]: timestamp/context/struct.Context.html
pub fn new_v6(ts: Timestamp, node_id: &[u8; 6]) -> Self {
- let (ticks, counter) = ts.to_rfc4122();
+ let (ticks, counter) = ts.to_gregorian();
- Builder::from_sorted_rfc4122_timestamp(ticks, counter, node_id).into_uuid()
+ Builder::from_sorted_gregorian_timestamp(ticks, counter, node_id).into_uuid()
}
}
@@ -133,10 +133,12 @@
"1e74ba22-0616-6934-8000-010203040506"
);
- let ts = uuid.get_timestamp().unwrap().to_rfc4122();
+ let ts = uuid.get_timestamp().unwrap().to_gregorian();
assert_eq!(ts.0 - 0x01B2_1DD2_1381_4000, 14_968_545_358_129_460);
+ assert_eq!(Some(node), uuid.get_node_id(),);
+
// Ensure parsing the same UUID produces the same timestamp
let parsed = Uuid::parse_str("1e74ba22-0616-6934-8000-010203040506").unwrap();
@@ -144,6 +146,8 @@
uuid.get_timestamp().unwrap(),
parsed.get_timestamp().unwrap()
);
+
+ assert_eq!(uuid.get_node_id().unwrap(), parsed.get_node_id().unwrap(),);
}
#[test]
@@ -164,39 +168,4 @@
assert_eq!(uuid.get_version(), Some(Version::SortMac));
assert_eq!(uuid.get_variant(), Variant::RFC4122);
}
-
- #[test]
- #[cfg_attr(
- all(
- target_arch = "wasm32",
- target_vendor = "unknown",
- target_os = "unknown"
- ),
- wasm_bindgen_test
- )]
- fn test_new_context() {
- let time: u64 = 1_496_854_535;
- let time_fraction: u32 = 812_946_000;
- let node = [1, 2, 3, 4, 5, 6];
-
- // This context will wrap
- let context = Context::new(u16::MAX >> 2);
-
- let uuid1 = Uuid::new_v6(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- let time: u64 = 1_496_854_536;
-
- let uuid2 = Uuid::new_v6(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- assert_eq!(uuid1.get_timestamp().unwrap().to_rfc4122().1, 16383);
- assert_eq!(uuid2.get_timestamp().unwrap().to_rfc4122().1, 0);
-
- let time = 1_496_854_535;
-
- let uuid3 = Uuid::new_v6(Timestamp::from_unix(&context, time, time_fraction), &node);
- let uuid4 = Uuid::new_v6(Timestamp::from_unix(&context, time, time_fraction), &node);
-
- assert_eq!(uuid3.get_timestamp().unwrap().counter, 1);
- assert_eq!(uuid4.get_timestamp().unwrap().counter, 2);
- }
}
diff --git a/crates/uuid/src/v7.rs b/crates/uuid/src/v7.rs
index ea8f474..44227b5 100644
--- a/crates/uuid/src/v7.rs
+++ b/crates/uuid/src/v7.rs
@@ -6,13 +6,16 @@
use crate::{rng, std::convert::TryInto, timestamp::Timestamp, Builder, Uuid};
impl Uuid {
- /// Create a new version 7 UUID using the current time value and random bytes.
+ /// Create a new version 7 UUID using the current time value.
///
/// This method is a convenient alternative to [`Uuid::new_v7`] that uses the current system time
- /// as the source timestamp.
+ /// as the source timestamp. All UUIDs generated through this method by the same process are
+ /// guaranteed to be ordered by their creation.
#[cfg(feature = "std")]
pub fn now_v7() -> Self {
- Self::new_v7(Timestamp::now(crate::NoContext))
+ Self::new_v7(Timestamp::now(
+ crate::timestamp::context::shared_context_v7(),
+ ))
}
/// Create a new version 7 UUID using a time value and random bytes.
@@ -41,22 +44,63 @@
/// );
/// ```
///
+ /// A v7 UUID can also be created with a counter to ensure batches of
+ /// UUIDs created together remain sortable:
+ ///
+ /// ```rust
+ /// # use uuid::{Uuid, Timestamp, ContextV7};
+ /// let context = ContextV7::new();
+ /// let uuid1 = Uuid::new_v7(Timestamp::from_unix(&context, 1497624119, 1234));
+ /// let uuid2 = Uuid::new_v7(Timestamp::from_unix(&context, 1497624119, 1234));
+ ///
+ /// assert!(uuid1 < uuid2);
+ /// ```
+ ///
/// # References
///
- /// * [Version 7 UUIDs in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-5.2)
+ /// * [UUID Version 7 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.7)
pub fn new_v7(ts: Timestamp) -> Self {
let (secs, nanos) = ts.to_unix();
let millis = (secs * 1000).saturating_add(nanos as u64 / 1_000_000);
- Builder::from_unix_timestamp_millis(millis, &rng::bytes()[..10].try_into().unwrap())
- .into_uuid()
+ let mut counter_and_random = rng::u128();
+
+ let (mut counter, counter_bits) = ts.counter();
+
+ debug_assert!(counter_bits <= 128);
+
+ let mut counter_bits = counter_bits as u32;
+
+ // If the counter intersects the variant field then shift around it.
+ // This ensures that any bits set in the counter that would intersect
+ // the variant are still preserved
+ if counter_bits > 12 {
+ let mask = u128::MAX << (counter_bits - 12);
+
+ counter = (counter & !mask) | ((counter & mask) << 2);
+
+ counter_bits += 2;
+ }
+
+ counter_and_random &= u128::MAX.overflowing_shr(counter_bits).0;
+ counter_and_random |= counter
+ .overflowing_shl(128u32.saturating_sub(counter_bits))
+ .0;
+
+ Builder::from_unix_timestamp_millis(
+ millis,
+ &counter_and_random.to_be_bytes()[..10].try_into().unwrap(),
+ )
+ .into_uuid()
}
}
#[cfg(test)]
mod tests {
use super::*;
- use crate::{std::string::ToString, NoContext, Variant, Version};
+
+ use crate::{std::string::ToString, ClockSequence, NoContext, Variant, Version};
+
#[cfg(all(
target_arch = "wasm32",
target_vendor = "unknown",
@@ -153,4 +197,45 @@
assert_eq!(ts.to_unix(), decoded_ts.to_unix());
}
+
+ #[test]
+ #[cfg_attr(
+ all(
+ target_arch = "wasm32",
+ target_vendor = "unknown",
+ target_os = "unknown"
+ ),
+ wasm_bindgen_test
+ )]
+ fn test_new_max_context() {
+ struct MaxContext;
+
+ #[cfg(test)]
+ impl ClockSequence for MaxContext {
+ type Output = u128;
+
+ fn generate_sequence(&self, _seconds: u64, _nanos: u32) -> Self::Output {
+ u128::MAX
+ }
+
+ fn usable_bits(&self) -> usize {
+ 128
+ }
+ }
+
+ let time: u64 = 1_496_854_535;
+ let time_fraction: u32 = 812_000_000;
+
+ // Ensure we don't overflow here
+ let ts = Timestamp::from_unix(MaxContext, time, time_fraction);
+
+ let uuid = Uuid::new_v7(ts);
+
+ assert_eq!(uuid.get_version(), Some(Version::SortRand));
+ assert_eq!(uuid.get_variant(), Variant::RFC4122);
+
+ let decoded_ts = uuid.get_timestamp().unwrap();
+
+ assert_eq!(ts.to_unix(), decoded_ts.to_unix());
+ }
}
diff --git a/crates/uuid/src/v8.rs b/crates/uuid/src/v8.rs
index b853ac7..dc3d19e 100644
--- a/crates/uuid/src/v8.rs
+++ b/crates/uuid/src/v8.rs
@@ -24,7 +24,7 @@
///
/// # References
///
- /// * [Version 8 UUIDs in Draft RFC: New UUID Formats, Version 4](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-04#section-5.3)
+ /// * [UUID Version 8 in RFC 9562](https://www.ietf.org/rfc/rfc9562.html#section-5.8)
pub fn new_v8(buf: [u8; 16]) -> Uuid {
Builder::from_custom_bytes(buf).into_uuid()
}
diff --git a/crates/vhost-user-backend/.cargo-checksum.json b/crates/vhost-user-backend/.cargo-checksum.json
index 04f91b7..dd631ac 100644
--- a/crates/vhost-user-backend/.cargo-checksum.json
+++ b/crates/vhost-user-backend/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"5339375d04fcee70bcbc374d82768bb1188314f4ca0f40fd67333fe923645390","Cargo.toml":"cb0dd18bec73dea0fbab55127e21312d0d450c8d4518aac4927c4950dc5ac3a4","README.md":"c8bb590dfea33f6865fa13415806740ddd4bcd5be62b27abdc26f868be95a18f","src/backend.rs":"aab2bdc646c01d35da9952dee18a7e6aada8bab11c23548935d1bee08fd50295","src/event_loop.rs":"b92509a8f39b6151a0c04f92c4e1c44119effcdf459eb203572033eb9a95daf7","src/handler.rs":"8702af825bc915950549dad72c4894e685590a3ad0b5a58b7b684bb441444be4","src/lib.rs":"49963cd0e1dc2c88b9c1af6bab1c8622027a543a6db7baa50837cfaec8680ca9","src/vring.rs":"d3136045c5d18351cd8a62f4baf86dae1f625788c088767e5c61950d68e9b041","tests/vhost-user-server.rs":"6775b6a8e1a5bb5997db84729a6099a975e607aad630c24cc0c727f7aaafb559"},"package":"ab069cdedaf18a0673766eb0a07a0f4ee3ed1b8e17fbfe4aafe5b988e2de1d01"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"e07e9b19298055d205a11f699001f7df0c693b858541ba13500bcdcdfd530761","Cargo.toml":"41095fea2428c486c2984195bf8e5082510e70340d346a0bb94a412ae6dceb4c","README.md":"8433f54ad84c328d39efeb6c525292e95439a537b3ce1e677cd22b1a018e4843","src/backend.rs":"d5df0355fd594da5890940bfa437df8da2fcab71d32f5309d4eac8348f80053b","src/bitmap.rs":"472a5f27cbbd225a2fa19e1a35feae1ba94fadbac1da4e484f91c7ff00ffd588","src/event_loop.rs":"792773de7a5192aef61558de310f466eac2e861c989d0efdb099e208ac288ba8","src/handler.rs":"4971aa35ea16f5dd0d3fec5b8233e8d3f5020faf7f7b2ae9f454c342a3d167e1","src/lib.rs":"d289603d79044227dd9b9d2c6302372126498b1c5c149ac0e808ffa96b1244b0","src/vring.rs":"7de7d7e5933904847a91f962d5648ac8a8420f79a0d7b5f7b8f312c842421058","tests/vhost-user-server.rs":"de9a530f6f2d4e930e54f2d70962b4cabba6d58720a16d75d33b9fbf8bbf1072"},"package":"daa03d476437d005abd2dec0970c468ed2a692e6a0604b834699680e171de942"}
\ No newline at end of file
diff --git a/crates/vhost-user-backend/Android.bp b/crates/vhost-user-backend/Android.bp
index 17da1db..d566488 100644
--- a/crates/vhost-user-backend/Android.bp
+++ b/crates/vhost-user-backend/Android.bp
@@ -17,9 +17,9 @@
name: "libvhost_user_backend",
crate_name: "vhost_user_backend",
cargo_env_compat: true,
- cargo_pkg_version: "0.10.1",
+ cargo_pkg_version: "0.17.0",
crate_root: "src/lib.rs",
- edition: "2018",
+ edition: "2021",
rustlibs: [
"liblibc",
"liblog_rust",
@@ -29,4 +29,5 @@
"libvm_memory_android",
"libvmm_sys_util",
],
+ compile_multilib: "first",
}
diff --git a/crates/vhost-user-backend/CHANGELOG.md b/crates/vhost-user-backend/CHANGELOG.md
index 4ae24f3..71b0d79 100644
--- a/crates/vhost-user-backend/CHANGELOG.md
+++ b/crates/vhost-user-backend/CHANGELOG.md
@@ -5,9 +5,86 @@
### Changed
+### Deprecated
+
### Fixed
-### Deprecated
+## v0.17.0
+
+### Added
+- [[#266]](https://github.com/rust-vmm/vhost/pull/266) Add support for `VHOST_USER_RESET_DEVICE`
+
+### Changed
+- [[#269]](https://github.com/rust-vmm/vhost/pull/269) Update vm-memory to 0.16.0 and virtio-queue to 0.13.0
+
+## v0.16.1
+
+### Fixed
+- [[#267](https://github.com/rust-vmm/vhost/pull/267)] Fix feature unification issues with gpu-socket feature.
+
+## v0.16.0 - yanked
+
+This version got yanked because the `gpu_socket` feature introduced in this
+release was causing problems
+(see [#265](https://github.com/rust-vmm/vhost/issues/265)).
+Starting with the next version (v0.16.1), the `gpu_socket` feature was removed.
+
+### Added
+- [[#241]](https://github.com/rust-vmm/vhost/pull/241) Add shared objects support
+- [[#239]](https://github.com/rust-vmm/vhost/pull/239) Add support for `VHOST_USER_GPU_SET_SOCKET`
+
+### Changed
+- [[#257]](https://github.com/rust-vmm/vhost/pull/257) Update virtio-queue version from 0.12.0 to 0.13.0 and vm-memory from 0.14.0 to 0.15.0.
+- [[#240]](https://github.com/rust-vmm/vhost/pull/240) Move the set of event_idx property from set_vring_base callback to set_features one
+
+## v0.15.0
+
+### Changed
+- [[#237]](https://github.com/rust-vmm/vhost/pull/237) Update virtio-queue dependency to 0.12.0
+
+## v0.14.0
+
+### Added
+- [[#203]](https://github.com/rust-vmm/vhost/pull/203) Add back-end's internal state migration support
+- [[#218]](https://github.com/rust-vmm/vhost/pull/218) Adding POSTCOPY support
+- [[#206]](https://github.com/rust-vmm/vhost/pull/206) Add bitmap support for tracking dirty pages during migration
+
+## v0.13.1
+
+### Fixed
+
+- [[#227]](https://github.com/rust-vmm/vhost/pull/227) vhost-user-backend: Fix SET_VRING_KICK should not disable the vring
+
+## v0.13.0
+
+### Changed
+- [[#224]](https://github.com/rust-vmm/vhost/pull/224) vhost-user-backend: bump up MAX_MEM_SLOTS to 509
+
+## v0.12.0
+
+### Fixed
+- [[#210](https://github.com/rust-vmm/vhost/pull/210)] Enable all vrings upon receipt of `VHOST_USER_SET_FEATURES`
+ message.
+- [[#212](https://github.com/rust-vmm/vhost/pull/212)] Validate queue index in `VhostUserHandler::set_vring_base`
+ to avoid potential out-of-bounds panic.
+
+### Changed
+- [[#214](https://github.com/rust-vmm/vhost/pull/214)] Avoid indexing the same Vec multiple times by locally caching the
+ result of `Vec:get`.
+- [[#219]](https://github.com/rust-vmm/vhost/pull/219) Update vmm-sys-util dependency to 0.12.1 and vm-memory dependency to 0.14.0.
+
+## v0.11.0
+
+### Added
+- [[#173]](https://github.com/rust-vmm/vhost/pull/173) vhost-user-backend: Added convenience function `serve`
+
+### Changed
+- [[#187]](https://github.com/rust-vmm/vhost/pull/187) Clean master slave
+ - Replaced master/slave with frontend/backend in the codebase and public API.
+- [[#192]](https://github.com/rust-vmm/vhost/pull/192) vhost-user-backend: remove return value from handle_event
+- [[#155]](https://github.com/rust-vmm/vhost/pull/155) Converted generic type
+ parameters of VhostUserBackend into associated types.
+- [[#116]](https://github.com/rust-vmm/vhost/pull/116) Upgrade to 2021 edition
## v0.10.1
diff --git a/crates/vhost-user-backend/Cargo.toml b/crates/vhost-user-backend/Cargo.toml
index e8709f7..af55313 100644
--- a/crates/vhost-user-backend/Cargo.toml
+++ b/crates/vhost-user-backend/Cargo.toml
@@ -10,10 +10,15 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
name = "vhost-user-backend"
-version = "0.10.1"
+version = "0.17.0"
authors = ["The Cloud Hypervisor Authors"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
description = "A framework to build vhost-user backend service daemon"
readme = "README.md"
keywords = [
@@ -23,55 +28,72 @@
license = "Apache-2.0"
repository = "https://github.com/rust-vmm/vhost"
+[lib]
+name = "vhost_user_backend"
+path = "src/lib.rs"
+
+[[test]]
+name = "vhost-user-server"
+path = "tests/vhost-user-server.rs"
+
[dependencies.libc]
version = "0.2.39"
[dependencies.log]
version = "0.4.17"
+[dependencies.userfaultfd]
+version = "0.8.1"
+optional = true
+
[dependencies.vhost]
-version = "0.8"
-features = ["vhost-user-slave"]
+version = "0.13.0"
+features = ["vhost-user-backend"]
[dependencies.virtio-bindings]
version = "0.2.1"
[dependencies.virtio-queue]
-version = "0.9.0"
+version = "0.14.0"
[dependencies.vm-memory]
-version = "0.12.0"
-features = [
- "backend-mmap",
- "backend-atomic",
-]
-
-[dependencies.vmm-sys-util]
-version = "0.11.0"
-
-[dev-dependencies.nix]
-version = "0.26"
-
-[dev-dependencies.tempfile]
-version = "3.2.0"
-
-[dev-dependencies.vhost]
-version = "0.8"
-features = [
- "test-utils",
- "vhost-user-master",
- "vhost-user-slave",
-]
-
-[dev-dependencies.vm-memory]
-version = "0.12.0"
+version = "0.16.0"
features = [
"backend-mmap",
"backend-atomic",
"backend-bitmap",
]
+[dependencies.vmm-sys-util]
+version = "0.12.1"
+
+[dev-dependencies.nix]
+version = "0.29"
+features = ["fs"]
+
+[dev-dependencies.tempfile]
+version = "3.2.0"
+
+[dev-dependencies.vhost]
+version = "0.13.0"
+features = [
+ "test-utils",
+ "vhost-user-frontend",
+ "vhost-user-backend",
+]
+
+[dev-dependencies.vm-memory]
+version = "0.16.0"
+features = [
+ "backend-mmap",
+ "backend-atomic",
+]
+
[features]
+postcopy = [
+ "vhost/postcopy",
+ "userfaultfd",
+]
xen = [
"vm-memory/xen",
"vhost/xen",
diff --git a/crates/vhost-user-backend/METADATA b/crates/vhost-user-backend/METADATA
index 4615151..36c42e5 100644
--- a/crates/vhost-user-backend/METADATA
+++ b/crates/vhost-user-backend/METADATA
@@ -1,17 +1,17 @@
name: "vhost-user-backend"
description: "A framework to build vhost-user backend service daemon"
third_party {
- version: "0.10.1"
+ version: "0.17.0"
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 8
- day: 23
+ year: 2024
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/vhost-user-backend"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/vhost-user-backend/vhost-user-backend-0.10.1.crate"
- version: "0.10.1"
+ value: "https://static.crates.io/crates/vhost-user-backend/vhost-user-backend-0.17.0.crate"
+ version: "0.17.0"
}
}
diff --git a/crates/vhost-user-backend/README.md b/crates/vhost-user-backend/README.md
index a526ab1..46b771c 100644
--- a/crates/vhost-user-backend/README.md
+++ b/crates/vhost-user-backend/README.md
@@ -29,14 +29,14 @@
### Create a `VhostUserDaemon` Instance
The `VhostUserDaemon::new()` creates an instance of `VhostUserDaemon` object. The client needs to
pass in an `VhostUserBackend` object, which will be used to configure the `VhostUserDaemon`
-instance, handle control messages from the vhost-user master and handle virtio requests from
+instance, handle control messages from the vhost-user frontend and handle virtio requests from
virtio queues. A group of working threads will be created to handle virtio requests from configured
virtio queues.
### Start the `VhostUserDaemon`
-The `VhostUserDaemon::start()` method waits for an incoming connection from the vhost-user masters
+The `VhostUserDaemon::start()` method waits for an incoming connection from the vhost-user frontends
on the `listener`. Once a connection is ready, a main thread will be created to handle vhost-user
-messages from the vhost-user master.
+messages from the vhost-user frontend.
### Stop the `VhostUserDaemon`
The `VhostUserDaemon::stop()` method waits for the main thread to exit. An exit event must be sent
@@ -98,6 +98,16 @@
}
```
+## Postcopy support
+
+To enabled POSTCOPY_* messages support there is a `postcopy` feature.
+Due to how Xen handles memory mappings the `postcopy` feature is not compatible
+with `xen` feature. Enabling both at the same time will result in a compilation error.
+
+`postcopy` feature enables optional `userfaultfd` dependency in order to create and
+interact with `userfaultfd` object. This requires access permission to `/dev/userfaultfd`
+file from the backend.
+
## Xen support
Supporting Xen requires special handling while mapping the guest memory. The
diff --git a/crates/vhost-user-backend/cargo_embargo.json b/crates/vhost-user-backend/cargo_embargo.json
index 2a9a0a7..1e76fc1 100644
--- a/crates/vhost-user-backend/cargo_embargo.json
+++ b/crates/vhost-user-backend/cargo_embargo.json
@@ -5,7 +5,8 @@
},
"package": {
"vhost-user-backend": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first"
}
},
"run_cargo": false
diff --git a/crates/vhost-user-backend/src/backend.rs b/crates/vhost-user-backend/src/backend.rs
index 43ab7b9..a9a0ea0 100644
--- a/crates/vhost-user-backend/src/backend.rs
+++ b/crates/vhost-user-backend/src/backend.rs
@@ -18,27 +18,31 @@
//! [VhostUserBackend]: trait.VhostUserBackend.html
//! [VhostUserBackendMut]: trait.VhostUserBackendMut.html
+use std::fs::File;
use std::io::Result;
use std::ops::Deref;
use std::sync::{Arc, Mutex, RwLock};
-use vhost::vhost_user::message::VhostUserProtocolFeatures;
-use vhost::vhost_user::Slave;
+use vhost::vhost_user::message::{
+ VhostTransferStateDirection, VhostTransferStatePhase, VhostUserProtocolFeatures,
+};
+use vhost::vhost_user::Backend;
use vm_memory::bitmap::Bitmap;
use vmm_sys_util::epoll::EventSet;
use vmm_sys_util::eventfd::EventFd;
+use vhost::vhost_user::GpuBackend;
+
use super::vring::VringT;
use super::GM;
/// Trait with interior mutability for vhost user backend servers to implement concrete services.
///
/// To support multi-threading and asynchronous IO, we enforce `Send + Sync` bound.
-pub trait VhostUserBackend<V, B = ()>: Send + Sync
-where
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
-{
+pub trait VhostUserBackend: Send + Sync {
+ type Bitmap: Bitmap + 'static;
+ type Vring: VringT<GM<Self::Bitmap>>;
+
/// Get number of queues supported.
fn num_queues(&self) -> usize;
@@ -54,6 +58,12 @@
/// Get available vhost protocol features.
fn protocol_features(&self) -> VhostUserProtocolFeatures;
+ /// Reset the emulated device state.
+ ///
+ /// A default implementation is provided as we cannot expect all backends to implement this
+ /// function.
+ fn reset_device(&self) {}
+
/// Enable or disable the virtio EVENT_IDX feature
fn set_event_idx(&self, enabled: bool);
@@ -74,13 +84,25 @@
}
/// Update guest memory regions.
- fn update_memory(&self, mem: GM<B>) -> Result<()>;
+ fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()>;
- /// Set handler for communicating with the master by the slave communication channel.
+ /// Set handler for communicating with the frontend by the backend communication channel.
///
/// A default implementation is provided as we cannot expect all backends to implement this
/// function.
- fn set_slave_req_fd(&self, _slave: Slave) {}
+ fn set_backend_req_fd(&self, _backend: Backend) {}
+
+ /// Set handler for communicating with the frontend by the gpu specific backend communication
+ /// channel.
+ ///
+ /// This function returns a `Result`, returning an error if the backend does not implement this
+ /// function.
+ fn set_gpu_socket(&self, _gpu_backend: GpuBackend) -> Result<()> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
+ ))
+ }
/// Get the map to map queue index to worker thread index.
///
@@ -93,9 +115,8 @@
/// Provide an optional exit EventFd for the specified worker thread.
///
- /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO
- /// events by using epoll with the specified `token`. When the returned EventFd is written to,
- /// the worker thread will exit.
+ /// The returned `EventFd` will be monitored for IO events. When the
+ /// returned EventFd is written to, the worker thread will exit.
fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
None
}
@@ -109,17 +130,47 @@
&self,
device_event: u16,
evset: EventSet,
- vrings: &[V],
+ vrings: &[Self::Vring],
thread_id: usize,
- ) -> Result<bool>;
+ ) -> Result<()>;
+
+ /// Initiate transfer of internal state for the purpose of migration to/from the back-end.
+ ///
+ /// Depending on `direction`, the state should either be saved (i.e. serialized and written to
+ /// `file`) or loaded (i.e. read from `file` and deserialized). The back-end can choose to use
+ /// a different channel than file. If so, it must return a File that the front-end can use.
+ /// Note that this function must not block during transfer, i.e. I/O to/from `file` must be
+ /// done outside of this function.
+ fn set_device_state_fd(
+ &self,
+ _direction: VhostTransferStateDirection,
+ _phase: VhostTransferStatePhase,
+ _file: File,
+ ) -> Result<Option<File>> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "back end does not support state transfer",
+ ))
+ }
+
+ /// After transferring internal state, check for any resulting errors, including potential
+ /// deserialization errors when loading state.
+ ///
+ /// Although this function return a `Result`, the front-end will not receive any details about
+ /// this error.
+ fn check_device_state(&self) -> Result<()> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "back end does not support state transfer",
+ ))
+ }
}
/// Trait without interior mutability for vhost user backend servers to implement concrete services.
-pub trait VhostUserBackendMut<V, B = ()>: Send + Sync
-where
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
-{
+pub trait VhostUserBackendMut: Send + Sync {
+ type Bitmap: Bitmap + 'static;
+ type Vring: VringT<GM<Self::Bitmap>>;
+
/// Get number of queues supported.
fn num_queues(&self) -> usize;
@@ -135,6 +186,12 @@
/// Get available vhost protocol features.
fn protocol_features(&self) -> VhostUserProtocolFeatures;
+ /// Reset the emulated device state.
+ ///
+ /// A default implementation is provided as we cannot expect all backends to implement this
+ /// function.
+ fn reset_device(&mut self) {}
+
/// Enable or disable the virtio EVENT_IDX feature
fn set_event_idx(&mut self, enabled: bool);
@@ -155,13 +212,25 @@
}
/// Update guest memory regions.
- fn update_memory(&mut self, mem: GM<B>) -> Result<()>;
+ fn update_memory(&mut self, mem: GM<Self::Bitmap>) -> Result<()>;
- /// Set handler for communicating with the master by the slave communication channel.
+ /// Set handler for communicating with the frontend by the backend communication channel.
///
/// A default implementation is provided as we cannot expect all backends to implement this
/// function.
- fn set_slave_req_fd(&mut self, _slave: Slave) {}
+ fn set_backend_req_fd(&mut self, _backend: Backend) {}
+
+ /// Set handler for communicating with the frontend by the gpu specific backend communication
+ /// channel.
+ ///
+ /// This function returns a `Result`, returning an error if the backend does not implement this
+ /// function.
+ fn set_gpu_socket(&mut self, _gpu_backend: GpuBackend) -> Result<()> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "backend does not support set_gpu_socket() / VHOST_USER_GPU_SET_SOCKET",
+ ))
+ }
/// Get the map to map queue index to worker thread index.
///
@@ -190,16 +259,44 @@
&mut self,
device_event: u16,
evset: EventSet,
- vrings: &[V],
+ vrings: &[Self::Vring],
thread_id: usize,
- ) -> Result<bool>;
+ ) -> Result<()>;
+
+ /// Initiate transfer of internal state for the purpose of migration to/from the back-end.
+ ///
+ /// Depending on `direction`, the state should either be saved (i.e. serialized and written to
+ /// `file`) or loaded (i.e. read from `file` and deserialized). Note that this function must
+ /// not block during transfer, i.e. I/O to/from `file` must be done outside of this function.
+ fn set_device_state_fd(
+ &mut self,
+ _direction: VhostTransferStateDirection,
+ _phase: VhostTransferStatePhase,
+ _file: File,
+ ) -> Result<Option<File>> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "back end does not support state transfer",
+ ))
+ }
+
+ /// After transferring internal state, check for any resulting errors, including potential
+ /// deserialization errors when loading state.
+ ///
+ /// Although this function return a `Result`, the front-end will not receive any details about
+ /// this error.
+ fn check_device_state(&self) -> Result<()> {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "back end does not support state transfer",
+ ))
+ }
}
-impl<T: VhostUserBackend<V, B>, V, B> VhostUserBackend<V, B> for Arc<T>
-where
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
-{
+impl<T: VhostUserBackend> VhostUserBackend for Arc<T> {
+ type Bitmap = T::Bitmap;
+ type Vring = T::Vring;
+
fn num_queues(&self) -> usize {
self.deref().num_queues()
}
@@ -220,6 +317,10 @@
self.deref().protocol_features()
}
+ fn reset_device(&self) {
+ self.deref().reset_device()
+ }
+
fn set_event_idx(&self, enabled: bool) {
self.deref().set_event_idx(enabled)
}
@@ -232,12 +333,16 @@
self.deref().set_config(offset, buf)
}
- fn update_memory(&self, mem: GM<B>) -> Result<()> {
+ fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
self.deref().update_memory(mem)
}
- fn set_slave_req_fd(&self, slave: Slave) {
- self.deref().set_slave_req_fd(slave)
+ fn set_backend_req_fd(&self, backend: Backend) {
+ self.deref().set_backend_req_fd(backend)
+ }
+
+ fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
+ self.deref().set_gpu_socket(gpu_backend)
}
fn queues_per_thread(&self) -> Vec<u64> {
@@ -252,19 +357,31 @@
&self,
device_event: u16,
evset: EventSet,
- vrings: &[V],
+ vrings: &[Self::Vring],
thread_id: usize,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.deref()
.handle_event(device_event, evset, vrings, thread_id)
}
+
+ fn set_device_state_fd(
+ &self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ file: File,
+ ) -> Result<Option<File>> {
+ self.deref().set_device_state_fd(direction, phase, file)
+ }
+
+ fn check_device_state(&self) -> Result<()> {
+ self.deref().check_device_state()
+ }
}
-impl<T: VhostUserBackendMut<V, B>, V, B> VhostUserBackend<V, B> for Mutex<T>
-where
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
-{
+impl<T: VhostUserBackendMut> VhostUserBackend for Mutex<T> {
+ type Bitmap = T::Bitmap;
+ type Vring = T::Vring;
+
fn num_queues(&self) -> usize {
self.lock().unwrap().num_queues()
}
@@ -285,6 +402,10 @@
self.lock().unwrap().protocol_features()
}
+ fn reset_device(&self) {
+ self.lock().unwrap().reset_device()
+ }
+
fn set_event_idx(&self, enabled: bool) {
self.lock().unwrap().set_event_idx(enabled)
}
@@ -297,12 +418,16 @@
self.lock().unwrap().set_config(offset, buf)
}
- fn update_memory(&self, mem: GM<B>) -> Result<()> {
+ fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
self.lock().unwrap().update_memory(mem)
}
- fn set_slave_req_fd(&self, slave: Slave) {
- self.lock().unwrap().set_slave_req_fd(slave)
+ fn set_backend_req_fd(&self, backend: Backend) {
+ self.lock().unwrap().set_backend_req_fd(backend)
+ }
+
+ fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
+ self.lock().unwrap().set_gpu_socket(gpu_backend)
}
fn queues_per_thread(&self) -> Vec<u64> {
@@ -317,20 +442,34 @@
&self,
device_event: u16,
evset: EventSet,
- vrings: &[V],
+ vrings: &[Self::Vring],
thread_id: usize,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.lock()
.unwrap()
.handle_event(device_event, evset, vrings, thread_id)
}
+
+ fn set_device_state_fd(
+ &self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ file: File,
+ ) -> Result<Option<File>> {
+ self.lock()
+ .unwrap()
+ .set_device_state_fd(direction, phase, file)
+ }
+
+ fn check_device_state(&self) -> Result<()> {
+ self.lock().unwrap().check_device_state()
+ }
}
-impl<T: VhostUserBackendMut<V, B>, V, B> VhostUserBackend<V, B> for RwLock<T>
-where
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
-{
+impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> {
+ type Bitmap = T::Bitmap;
+ type Vring = T::Vring;
+
fn num_queues(&self) -> usize {
self.read().unwrap().num_queues()
}
@@ -351,6 +490,10 @@
self.read().unwrap().protocol_features()
}
+ fn reset_device(&self) {
+ self.write().unwrap().reset_device()
+ }
+
fn set_event_idx(&self, enabled: bool) {
self.write().unwrap().set_event_idx(enabled)
}
@@ -363,12 +506,16 @@
self.write().unwrap().set_config(offset, buf)
}
- fn update_memory(&self, mem: GM<B>) -> Result<()> {
+ fn update_memory(&self, mem: GM<Self::Bitmap>) -> Result<()> {
self.write().unwrap().update_memory(mem)
}
- fn set_slave_req_fd(&self, slave: Slave) {
- self.write().unwrap().set_slave_req_fd(slave)
+ fn set_backend_req_fd(&self, backend: Backend) {
+ self.write().unwrap().set_backend_req_fd(backend)
+ }
+
+ fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
+ self.write().unwrap().set_gpu_socket(gpu_backend)
}
fn queues_per_thread(&self) -> Vec<u64> {
@@ -383,19 +530,35 @@
&self,
device_event: u16,
evset: EventSet,
- vrings: &[V],
+ vrings: &[Self::Vring],
thread_id: usize,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.write()
.unwrap()
.handle_event(device_event, evset, vrings, thread_id)
}
+
+ fn set_device_state_fd(
+ &self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ file: File,
+ ) -> Result<Option<File>> {
+ self.write()
+ .unwrap()
+ .set_device_state_fd(direction, phase, file)
+ }
+
+ fn check_device_state(&self) -> Result<()> {
+ self.read().unwrap().check_device_state()
+ }
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::VringRwLock;
+ use libc::EFD_NONBLOCK;
use std::sync::Mutex;
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
@@ -403,19 +566,33 @@
events: u64,
event_idx: bool,
acked_features: u64,
+ exit_event_fds: Vec<EventFd>,
}
impl MockVhostBackend {
pub fn new() -> Self {
- MockVhostBackend {
+ let mut backend = MockVhostBackend {
events: 0,
event_idx: false,
acked_features: 0,
- }
+ exit_event_fds: vec![],
+ };
+
+ // Create a event_fd for each thread. We make it NONBLOCKing in
+ // order to allow tests maximum flexibility in checking whether
+ // signals arrived or not.
+ backend.exit_event_fds = (0..backend.queues_per_thread().len())
+ .map(|_| EventFd::new(EFD_NONBLOCK).unwrap())
+ .collect();
+
+ backend
}
}
- impl VhostUserBackendMut<VringRwLock, ()> for MockVhostBackend {
+ impl VhostUserBackendMut for MockVhostBackend {
+ type Bitmap = ();
+ type Vring = VringRwLock;
+
fn num_queues(&self) -> usize {
2
}
@@ -436,6 +613,12 @@
VhostUserProtocolFeatures::all()
}
+ fn reset_device(&mut self) {
+ self.event_idx = false;
+ self.events = 0;
+ self.acked_features = 0;
+ }
+
fn set_event_idx(&mut self, enabled: bool) {
self.event_idx = enabled;
}
@@ -459,16 +642,19 @@
Ok(())
}
- fn set_slave_req_fd(&mut self, _slave: Slave) {}
+ fn set_backend_req_fd(&mut self, _backend: Backend) {}
fn queues_per_thread(&self) -> Vec<u64> {
vec![1, 1]
}
- fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
- let event_fd = EventFd::new(0).unwrap();
-
- Some(event_fd)
+ fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
+ Some(
+ self.exit_event_fds
+ .get(thread_index)?
+ .try_clone()
+ .expect("Could not clone exit eventfd"),
+ )
}
fn handle_event(
@@ -477,10 +663,10 @@
_evset: EventSet,
_vrings: &[VringRwLock],
_thread_id: usize,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.events += 1;
- Ok(false)
+ Ok(())
}
}
@@ -512,6 +698,11 @@
GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
);
backend.update_memory(mem).unwrap();
+
+ backend.reset_device();
+ assert!(backend.lock().unwrap().events == 0);
+ assert!(!backend.lock().unwrap().event_idx);
+ assert!(backend.lock().unwrap().acked_features == 0);
}
#[test]
@@ -547,5 +738,10 @@
backend
.handle_event(0x1, EventSet::IN, &[vring], 0)
.unwrap();
+
+ backend.reset_device();
+ assert!(backend.read().unwrap().events == 0);
+ assert!(!backend.read().unwrap().event_idx);
+ assert!(backend.read().unwrap().acked_features == 0);
}
}
diff --git a/crates/vhost-user-backend/src/bitmap.rs b/crates/vhost-user-backend/src/bitmap.rs
new file mode 100644
index 0000000..a9864b1
--- /dev/null
+++ b/crates/vhost-user-backend/src/bitmap.rs
@@ -0,0 +1,632 @@
+// Copyright (C) 2024 Red Hat, Inc.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+use std::ops::Index;
+use std::os::fd::{AsRawFd, BorrowedFd};
+use std::sync::atomic::{AtomicU8, Ordering};
+use std::sync::{Arc, RwLock};
+use std::{io, ptr};
+use vm_memory::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice};
+use vm_memory::mmap::NewBitmap;
+use vm_memory::{Address, GuestMemoryRegion};
+
+// Size in bytes of the `VHOST_LOG_PAGE`
+const LOG_PAGE_SIZE: usize = 0x1000;
+// Number of bits grouped together as a basic storage unit ("word") in the bitmap
+// (i.e., in this case one byte tracks 8 pages, one bit per page).
+const LOG_WORD_SIZE: usize = u8::BITS as usize;
+
+/// A `Bitmap` with an internal `Bitmap` that can be replaced at runtime
+pub trait BitmapReplace: Bitmap {
+ type InnerBitmap: MemRegionBitmap;
+
+ /// Replace the internal `Bitmap`
+ fn replace(&self, bitmap: Self::InnerBitmap);
+}
+
+/// A bitmap relative to a memory region
+pub trait MemRegionBitmap: Sized {
+ /// Creates a new bitmap relative to `region`, using the `logmem` as
+ /// backing memory for the bitmap
+ fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self>;
+}
+
+// TODO: This impl is a quick and dirty hack to allow the tests to continue using
+// `GuestMemoryMmap<()>`. Sadly this is exposed in the public API, but it should
+// be moved to an internal mock library.
+impl BitmapReplace for () {
+ type InnerBitmap = ();
+
+ // this implementation must not be used if the backend sets `VHOST_USER_PROTOCOL_F_LOG_SHMFD`
+ fn replace(&self, _bitmap: ()) {
+ panic!("The unit bitmap () must not be used if VHOST_USER_PROTOCOL_F_LOG_SHMFD is set");
+ }
+}
+
+impl MemRegionBitmap for () {
+ fn new<R: GuestMemoryRegion>(_region: &R, _logmem: Arc<MmapLogReg>) -> io::Result<Self> {
+ Err(io::Error::from(io::ErrorKind::Unsupported))
+ }
+}
+
+/// `BitmapMmapRegion` implements a bitmap tha can be replaced at runtime.
+/// The main use case is to support live migration on vhost-user backends
+/// (see `VHOST_USER_PROTOCOL_F_LOG_SHMFD` and `VHOST_USER_SET_LOG_BASE` in the vhost-user protocol
+/// specification). It uses a fixed memory page size of `VHOST_LOG_PAGE` bytes (i.e., `4096` bytes),
+/// so it converts addresses to page numbers before setting or clearing the bits.
+///
+/// To use this bitmap you need to define the memory as `GuestMemoryMmap<BitmapMmapRegion>`.
+///
+/// Note:
+/// This implementation uses `std::sync::RwLock`, the priority policy of the lock is dependent on
+/// the underlying operating system's implementation and does not guarantee any particular policy,
+/// in systems other than linux a thread trying to acquire the lock may starve.
+#[derive(Default, Debug, Clone)]
+pub struct BitmapMmapRegion {
+ // TODO: To avoid both reader and writer starvation we can replace the `std::sync::RwLock` with
+ // `parking_lot::RwLock`.
+ inner: Arc<RwLock<Option<AtomicBitmapMmap>>>,
+ base_address: usize, // The slice's base address
+}
+
+impl Bitmap for BitmapMmapRegion {
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ let inner = self.inner.read().unwrap();
+ if let Some(bitmap) = inner.as_ref() {
+ if let Some(absolute_offset) = self.base_address.checked_add(offset) {
+ bitmap.mark_dirty(absolute_offset, len);
+ }
+ }
+ }
+
+ fn dirty_at(&self, offset: usize) -> bool {
+ let inner = self.inner.read().unwrap();
+ inner
+ .as_ref()
+ .is_some_and(|bitmap| bitmap.dirty_at(self.base_address.saturating_add(offset)))
+ }
+
+ fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
+ Self {
+ inner: Arc::clone(&self.inner),
+ base_address: self.base_address.saturating_add(offset),
+ }
+ }
+}
+
+impl BitmapReplace for BitmapMmapRegion {
+ type InnerBitmap = AtomicBitmapMmap;
+
+ fn replace(&self, bitmap: AtomicBitmapMmap) {
+ let mut inner = self.inner.write().unwrap();
+ inner.replace(bitmap);
+ }
+}
+
+impl BitmapSlice for BitmapMmapRegion {}
+
+impl<'a> WithBitmapSlice<'a> for BitmapMmapRegion {
+ type S = Self;
+}
+
+impl NewBitmap for BitmapMmapRegion {
+ fn with_len(_len: usize) -> Self {
+ Self::default()
+ }
+}
+
+/// `AtomicBitmapMmap` implements a simple memory-mapped bitmap on the page level with test
+/// and set operations. The main use case is to support live migration on vhost-user backends
+/// (see `VHOST_USER_PROTOCOL_F_LOG_SHMFD` and `VHOST_USER_SET_LOG_BASE` in the vhost-user protocol
+/// specification). It uses a fixed memory page size of `LOG_PAGE_SIZE` bytes, so it converts
+/// addresses to page numbers before setting or clearing the bits.
+#[derive(Debug)]
+pub struct AtomicBitmapMmap {
+ logmem: Arc<MmapLogReg>,
+ pages_before_region: usize, // Number of pages to ignore from the start of the bitmap
+ number_of_pages: usize, // Number of total pages indexed in the bitmap for this region
+}
+
+// `AtomicBitmapMmap` implements a simple bitmap, it is page-size aware and relative
+// to a memory region. It handling the `log` memory mapped area. Each page is indexed
+// inside a word of `LOG_WORD_SIZE` bits, so even if the bitmap starts at the beginning of
+// the mapped area, the memory region does not necessarily have to start at the beginning of
+// that word.
+// Note: we don't implement `Bitmap` because we cannot implement `slice_at()`
+impl MemRegionBitmap for AtomicBitmapMmap {
+ // Creates a new memory-mapped bitmap for the memory region. This bitmap must fit within the
+ // log mapped memory.
+ fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self> {
+ let region_start_addr: usize = region.start_addr().raw_value().io_try_into()?;
+ let region_len: usize = region.len().io_try_into()?;
+ if region_len == 0 {
+ return Err(io::Error::from(io::ErrorKind::InvalidData));
+ }
+
+ // The size of the log should be large enough to cover all known guest addresses.
+ let region_end_addr = region_start_addr
+ .checked_add(region_len - 1)
+ .ok_or(io::Error::from(io::ErrorKind::InvalidData))?;
+ let region_end_log_word = page_word(page_number(region_end_addr));
+ if region_end_log_word >= logmem.len() {
+ return Err(io::Error::from(io::ErrorKind::InvalidData));
+ }
+
+ // The frontend sends a single bitmap (i.e., the log memory to be mapped using `fd`,
+ // `mmap_offset` and `mmap_size`) that covers the entire guest memory.
+ // However, since each memory region requires a bitmap relative to them, we have to
+ // adjust the offset and size, in number of pages, of this region.
+ let offset_pages = page_number(region_start_addr);
+ let size_page = page_number(region_len);
+
+ Ok(Self {
+ logmem,
+ pages_before_region: offset_pages,
+ number_of_pages: size_page,
+ })
+ }
+}
+
+impl AtomicBitmapMmap {
+ // Sets the memory range as dirty. The `offset` is relative to the memory region,
+ // so an offset of `0` references the start of the memory region. Any attempt to
+ // access beyond the end of the bitmap are simply ignored.
+ fn mark_dirty(&self, offset: usize, len: usize) {
+ if len == 0 {
+ return;
+ }
+
+ let first_page = page_number(offset);
+ let last_page = page_number(offset.saturating_add(len - 1));
+ for page in first_page..=last_page {
+ if page >= self.number_of_pages {
+ break; // ignore out of bound access
+ }
+
+ // get the absolute page number
+ let page = self.pages_before_region + page;
+ self.logmem[page_word(page)].fetch_or(1 << page_bit(page), Ordering::Relaxed);
+ }
+ }
+
+ // Check whether the specified offset is marked as dirty. The `offset` is relative
+ // to the memory region, so a `0` offset references the start of the memory region.
+ // Any attempt to access beyond the end of the bitmap are simply ignored.
+ fn dirty_at(&self, offset: usize) -> bool {
+ let page = page_number(offset);
+ if page >= self.number_of_pages {
+ return false; // ignore out of bound access
+ }
+
+ // get the absolute page number
+ let page = self.pages_before_region + page;
+ let page_bit = self.logmem[page_word(page)].load(Ordering::Relaxed) & (1 << page_bit(page));
+ page_bit != 0
+ }
+}
+
+/// `MmaplogReg` mmaps the frontend bitmap backing memory in the current process.
+#[derive(Debug)]
+pub struct MmapLogReg {
+ addr: *const AtomicU8,
+ len: usize,
+}
+
+// SAFETY: Send is not automatically implemented because the raw pointer.
+// No one besides `MmapLogReg` has the raw pointer, so we can safely transfer it to another thread.
+unsafe impl Send for MmapLogReg {}
+
+// SAFETY: Sync is not automatically implemented because the raw pointer.
+// `MmapLogReg` doesn't have any interior mutability and all access to `&AtomicU8`
+// are done through atomic operations.
+unsafe impl Sync for MmapLogReg {}
+
+impl MmapLogReg {
+ // Note: We could try to adjust the mapping area to only cover the memory region, but
+ // the region's starting address is not guarantee to be LOG_WORD_SIZE-page aligned
+ // which makes the implementation needlessly cumbersome.
+ // Note: The specification does not define whether the offset must be page-aligned or not.
+ // But, since we are receiving the offset from the frontend to be used to call mmap,
+ // we assume it is properly aligned (currently, qemu always send a 0 offset).
+ pub(crate) fn from_file(fd: BorrowedFd, offset: u64, len: u64) -> io::Result<Self> {
+ let offset: isize = offset.io_try_into()?;
+ let len: usize = len.io_try_into()?;
+
+ // Let's uphold the safety contract for `std::ptr::offset()`.
+ if len > isize::MAX as usize {
+ return Err(io::Error::from(io::ErrorKind::InvalidData));
+ }
+
+ // SAFETY: `fd` is a valid file descriptor and we are not using `libc::MAP_FIXED`.
+ let addr = unsafe {
+ libc::mmap(
+ ptr::null_mut(),
+ len as libc::size_t,
+ libc::PROT_READ | libc::PROT_WRITE,
+ libc::MAP_SHARED,
+ fd.as_raw_fd(),
+ offset as libc::off_t,
+ )
+ };
+
+ if addr == libc::MAP_FAILED {
+ return Err(io::Error::last_os_error());
+ }
+
+ Ok(Self {
+ addr: addr as *const AtomicU8,
+ len,
+ })
+ }
+
+ fn len(&self) -> usize {
+ self.len
+ }
+}
+
+impl Index<usize> for MmapLogReg {
+ type Output = AtomicU8;
+
+ // It's ok to get a reference to an atomic value.
+ fn index(&self, index: usize) -> &Self::Output {
+ assert!(index < self.len);
+ // Note: Instead of `&*` we can use `AtomicU8::from_ptr()` as soon it gets stabilized.
+ // SAFETY: `self.addr` is a valid and properly aligned pointer. Also, `self.addr` + `index`
+ // doesn't wrap around and is contained within the mapped memory region.
+ unsafe { &*self.addr.add(index) }
+ }
+}
+
+impl Drop for MmapLogReg {
+ fn drop(&mut self) {
+ // SAFETY: `addr` is properly aligned, also we are sure that this is the
+ // last reference alive and/or we have an exclusive access to this object.
+ unsafe {
+ libc::munmap(self.addr as *mut libc::c_void, self.len as libc::size_t);
+ }
+ }
+}
+
+trait IoTryInto<T: TryFrom<Self>>: Sized {
+ fn io_try_into(self) -> io::Result<T>;
+}
+
+impl<TySrc, TyDst> IoTryInto<TyDst> for TySrc
+where
+ TyDst: TryFrom<TySrc>,
+ <TyDst as TryFrom<TySrc>>::Error: Send + Sync + std::error::Error + 'static,
+{
+ fn io_try_into(self) -> io::Result<TyDst> {
+ self.try_into()
+ .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
+ }
+}
+
+#[inline]
+// Get the page number corresponding to the address `addr`
+fn page_number(addr: usize) -> usize {
+ addr / LOG_PAGE_SIZE
+}
+
+#[inline]
+// Get the word within the bitmap of the page.
+// Each page is indexed inside a word of `LOG_WORD_SIZE` bits.
+fn page_word(page: usize) -> usize {
+ page / LOG_WORD_SIZE
+}
+
+#[inline]
+// Get the bit index inside a word of `LOG_WORD_SIZE` bits
+fn page_bit(page: usize) -> usize {
+ page % LOG_WORD_SIZE
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs::File;
+ use std::io::Write;
+ use std::os::fd::AsFd;
+ use vm_memory::{GuestAddress, GuestRegionMmap};
+ use vmm_sys_util::tempfile::TempFile;
+
+ // Helper method to check whether a specified range is clean.
+ pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
+ (start..start + len).all(|offset| !b.dirty_at(offset))
+ }
+
+ // Helper method to check whether a specified range is dirty.
+ pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
+ (start..start + len).all(|offset| b.dirty_at(offset))
+ }
+
+ fn tmp_file(len: usize) -> File {
+ let mut f = TempFile::new().unwrap().into_file();
+ let buf = vec![0; len];
+ f.write_all(buf.as_ref()).unwrap();
+ f
+ }
+
+ fn test_all(b: &BitmapMmapRegion, len: usize) {
+ assert!(range_is_clean(b, 0, len), "The bitmap should be clean");
+
+ b.mark_dirty(0, len);
+ assert!(range_is_dirty(b, 0, len), "The bitmap should be dirty");
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_region_bigger_than_log() {
+ // Let's create a log memory area to track 8 pages,
+ // since 1 bit correspond to 1 page, we need a 1-byte log memory area.
+ let mmap_offset: u64 = 0;
+ let mmap_size = 1; // // 1 byte = 8 bits/pages
+ let f = tmp_file(mmap_size);
+
+ // A guest memory region of 16 pages
+ let region_start_addr = GuestAddress(mmap_offset);
+ let region_len = LOG_PAGE_SIZE * 16;
+ let region: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ let log = AtomicBitmapMmap::new(®ion, logmem);
+
+ assert!(log.is_err());
+ }
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_log_and_region_same_size() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ // A 32-page guest memory region
+ let region_start_addr = GuestAddress::new(mmap_offset);
+ let region_len = LOG_PAGE_SIZE * 32;
+ let region: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ let log = AtomicBitmapMmap::new(®ion, logmem);
+ assert!(log.is_ok());
+ let log = log.unwrap();
+
+ let bitmap = BitmapMmapRegion::default();
+ bitmap.replace(log);
+
+ test_all(&bitmap, region_len);
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_region_smaller_than_log() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ // A 16-page guest memory region
+ let region_start_addr = GuestAddress::new(mmap_offset);
+ let region_len = LOG_PAGE_SIZE * 16;
+ let region: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ let log = AtomicBitmapMmap::new(®ion, logmem);
+ assert!(log.is_ok());
+ let log = log.unwrap();
+
+ let bitmap = BitmapMmapRegion::default();
+
+ bitmap.replace(log);
+
+ test_all(&bitmap, region_len);
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_region_smaller_than_one_word() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ // A 6-page guest memory region
+ let region_start_addr = GuestAddress::new(mmap_offset);
+ let region_len = LOG_PAGE_SIZE * 6;
+ let region: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ let log = AtomicBitmapMmap::new(®ion, logmem);
+ assert!(log.is_ok());
+ let log = log.unwrap();
+
+ let bitmap = BitmapMmapRegion::default();
+ bitmap.replace(log);
+
+ test_all(&bitmap, region_len);
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_two_regions_overlapping_word_first_dirty() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ // A 11-page guest memory region
+ let region0_start_addr = GuestAddress::new(mmap_offset);
+ let region0_len = LOG_PAGE_SIZE * 11;
+ let region0: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
+
+ let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
+ assert!(log0.is_ok());
+ let log0 = log0.unwrap();
+ let bitmap0 = BitmapMmapRegion::default();
+ bitmap0.replace(log0);
+
+ // A 1-page guest memory region
+ let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
+ let region1_len = LOG_PAGE_SIZE;
+ let region1: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
+
+ let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
+ assert!(log1.is_ok());
+ let log1 = log1.unwrap();
+
+ let bitmap1 = BitmapMmapRegion::default();
+ bitmap1.replace(log1);
+
+ // Both regions should be clean
+ assert!(
+ range_is_clean(&bitmap0, 0, region0_len),
+ "The bitmap0 should be clean"
+ );
+ assert!(
+ range_is_clean(&bitmap1, 0, region1_len),
+ "The bitmap1 should be clean"
+ );
+
+ // Marking region 0, region 1 should continue be clean
+ bitmap0.mark_dirty(0, region0_len);
+
+ assert!(
+ range_is_dirty(&bitmap0, 0, region0_len),
+ "The bitmap0 should be dirty"
+ );
+ assert!(
+ range_is_clean(&bitmap1, 0, region1_len),
+ "The bitmap1 should be clean"
+ );
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_two_regions_overlapping_word_second_dirty() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ // A 11-page guest memory region
+ let region0_start_addr = GuestAddress::new(mmap_offset);
+ let region0_len = LOG_PAGE_SIZE * 11;
+ let region0: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
+
+ let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
+ assert!(log0.is_ok());
+ let log0 = log0.unwrap();
+
+ let bitmap0 = BitmapMmapRegion::default();
+ bitmap0.replace(log0);
+
+ // A 1-page guest memory region
+ let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
+ let region1_len = LOG_PAGE_SIZE;
+ let region1: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
+
+ let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
+ assert!(log1.is_ok());
+ let log1 = log1.unwrap();
+
+ let bitmap1 = BitmapMmapRegion::default();
+ bitmap1.replace(log1);
+
+ // Both regions should be clean
+ assert!(
+ range_is_clean(&bitmap0, 0, region0_len),
+ "The bitmap0 should be clean"
+ );
+ assert!(
+ range_is_clean(&bitmap1, 0, region1_len),
+ "The bitmap1 should be clean"
+ );
+
+ // Marking region 1, region 0 should continue be clean
+ bitmap1.mark_dirty(0, region1_len);
+
+ assert!(
+ range_is_dirty(&bitmap1, 0, region1_len),
+ "The bitmap0 should be dirty"
+ );
+ assert!(
+ range_is_clean(&bitmap0, 0, region0_len),
+ "The bitmap1 should be clean"
+ );
+ }
+
+ #[test]
+ #[cfg(not(miri))] // Miri cannot mmap files
+ fn test_bitmap_region_slice() {
+ // A log memory area able to track 32 pages
+ let mmap_offset: u64 = 0;
+ let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
+ let f = tmp_file(mmap_size);
+
+ // A 32-page guest memory region
+ let region_start_addr = GuestAddress::new(mmap_offset);
+ let region_len = LOG_PAGE_SIZE * 32;
+ let region: GuestRegionMmap<()> =
+ GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
+
+ let logmem =
+ Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
+
+ let log = AtomicBitmapMmap::new(®ion, logmem);
+ assert!(log.is_ok());
+ let log = log.unwrap();
+
+ let bitmap = BitmapMmapRegion::default();
+ bitmap.replace(log);
+
+ assert!(
+ range_is_clean(&bitmap, 0, region_len),
+ "The bitmap should be clean"
+ );
+
+ // Let's get a slice of half the bitmap
+ let slice_len = region_len / 2;
+ let slice = bitmap.slice_at(slice_len);
+ assert!(
+ range_is_clean(&slice, 0, slice_len),
+ "The slice should be clean"
+ );
+
+ slice.mark_dirty(0, slice_len);
+ assert!(
+ range_is_dirty(&slice, 0, slice_len),
+ "The slice should be dirty"
+ );
+ assert!(
+ range_is_clean(&bitmap, 0, slice_len),
+ "The first half of the bitmap should be clean"
+ );
+ assert!(
+ range_is_dirty(&bitmap, slice_len, region_len - slice_len),
+ "The last half of the bitmap should be dirty"
+ );
+ }
+}
diff --git a/crates/vhost-user-backend/src/event_loop.rs b/crates/vhost-user-backend/src/event_loop.rs
index f10aad3..6cc1c2c 100644
--- a/crates/vhost-user-backend/src/event_loop.rs
+++ b/crates/vhost-user-backend/src/event_loop.rs
@@ -8,13 +8,11 @@
use std::marker::PhantomData;
use std::os::unix::io::{AsRawFd, RawFd};
-use vm_memory::bitmap::Bitmap;
use vmm_sys_util::epoll::{ControlOperation, Epoll, EpollEvent, EventSet};
use vmm_sys_util::eventfd::EventFd;
use super::backend::VhostUserBackend;
use super::vring::VringT;
-use super::GM;
/// Errors related to vring epoll event handling.
#[derive(Debug)]
@@ -58,16 +56,16 @@
/// - add file descriptors to be monitored by the epoll fd
/// - remove registered file descriptors from the epoll fd
/// - run the event loop to handle pending events on the epoll fd
-pub struct VringEpollHandler<S, V, B> {
+pub struct VringEpollHandler<T: VhostUserBackend> {
epoll: Epoll,
- backend: S,
- vrings: Vec<V>,
+ backend: T,
+ vrings: Vec<T::Vring>,
thread_id: usize,
exit_event_fd: Option<EventFd>,
- phantom: PhantomData<B>,
+ phantom: PhantomData<T::Bitmap>,
}
-impl<S, V, B> VringEpollHandler<S, V, B> {
+impl<T: VhostUserBackend> VringEpollHandler<T> {
/// Send `exit event` to break the event loop.
pub fn send_exit_event(&self) {
if let Some(eventfd) = self.exit_event_fd.as_ref() {
@@ -76,14 +74,16 @@
}
}
-impl<S, V, B> VringEpollHandler<S, V, B>
+impl<T> VringEpollHandler<T>
where
- S: VhostUserBackend<V, B>,
- V: VringT<GM<B>>,
- B: Bitmap + 'static,
+ T: VhostUserBackend,
{
/// Create a `VringEpollHandler` instance.
- pub(crate) fn new(backend: S, vrings: Vec<V>, thread_id: usize) -> VringEpollResult<Self> {
+ pub(crate) fn new(
+ backend: T,
+ vrings: Vec<T::Vring>,
+ thread_id: usize,
+ ) -> VringEpollResult<Self> {
let epoll = Epoll::new().map_err(VringEpollError::EpollCreateFd)?;
let exit_event_fd = backend.exit_event(thread_id);
@@ -211,11 +211,13 @@
self.backend
.handle_event(device_event, evset, &self.vrings, self.thread_id)
- .map_err(VringEpollError::HandleEventBackendHandling)
+ .map_err(VringEpollError::HandleEventBackendHandling)?;
+
+ Ok(false)
}
}
-impl<S, V, B> AsRawFd for VringEpollHandler<S, V, B> {
+impl<T: VhostUserBackend> AsRawFd for VringEpollHandler<T> {
fn as_raw_fd(&self) -> RawFd {
self.epoll.as_raw_fd()
}
diff --git a/crates/vhost-user-backend/src/handler.rs b/crates/vhost-user-backend/src/handler.rs
index 262bf6c..9217870 100644
--- a/crates/vhost-user-backend/src/handler.rs
+++ b/crates/vhost-user-backend/src/handler.rs
@@ -6,23 +6,33 @@
use std::error;
use std::fs::File;
use std::io;
+use std::os::fd::AsFd;
+#[cfg(feature = "postcopy")]
+use std::os::fd::FromRawFd;
use std::os::unix::io::AsRawFd;
use std::sync::Arc;
use std::thread;
+use crate::bitmap::{BitmapReplace, MemRegionBitmap, MmapLogReg};
+#[cfg(feature = "postcopy")]
+use userfaultfd::{Uffd, UffdBuilder};
use vhost::vhost_user::message::{
- VhostUserConfigFlags, VhostUserMemoryRegion, VhostUserProtocolFeatures,
- VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags,
- VhostUserVringState,
+ VhostTransferStateDirection, VhostTransferStatePhase, VhostUserConfigFlags, VhostUserLog,
+ VhostUserMemoryRegion, VhostUserProtocolFeatures, VhostUserSingleMemoryRegion,
+ VhostUserVirtioFeatures, VhostUserVringAddrFlags, VhostUserVringState,
};
+use vhost::vhost_user::GpuBackend;
use vhost::vhost_user::{
- Error as VhostUserError, Result as VhostUserResult, Slave, VhostUserSlaveReqHandlerMut,
+ Backend, Error as VhostUserError, Result as VhostUserResult, VhostUserBackendReqHandlerMut,
};
+
use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
use virtio_queue::{Error as VirtQueError, QueueT};
-use vm_memory::bitmap::Bitmap;
use vm_memory::mmap::NewBitmap;
-use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap};
+use vm_memory::{
+ GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap, GuestMemoryRegion,
+ GuestRegionMmap,
+};
use vmm_sys_util::epoll::EventSet;
use super::backend::VhostUserBackend;
@@ -31,7 +41,10 @@
use super::vring::VringT;
use super::GM;
-const MAX_MEM_SLOTS: u64 = 32;
+// vhost in the kernel usually supports 509 mem slots.
+// The 509 used to be the KVM limit, it supported 512, but 3 were used
+// for internal purposes (nowadays, it supports more than that).
+const MAX_MEM_SLOTS: u64 = 509;
#[derive(Debug)]
/// Errors related to vhost-user handler.
@@ -68,15 +81,18 @@
/// Result of vhost-user handler operations.
pub type VhostUserHandlerResult<T> = std::result::Result<T, VhostUserHandlerError>;
+#[derive(Debug)]
struct AddrMapping {
+ #[cfg(feature = "postcopy")]
+ local_addr: u64,
vmm_addr: u64,
size: u64,
gpa_base: u64,
}
-pub struct VhostUserHandler<S, V, B: Bitmap + 'static> {
- backend: S,
- handlers: Vec<Arc<VringEpollHandler<S, V, B>>>,
+pub struct VhostUserHandler<T: VhostUserBackend> {
+ backend: T,
+ handlers: Vec<Arc<VringEpollHandler<T>>>,
owned: bool,
features_acked: bool,
acked_features: u64,
@@ -85,26 +101,28 @@
max_queue_size: usize,
queues_per_thread: Vec<u64>,
mappings: Vec<AddrMapping>,
- atomic_mem: GM<B>,
- vrings: Vec<V>,
+ atomic_mem: GM<T::Bitmap>,
+ vrings: Vec<T::Vring>,
+ #[cfg(feature = "postcopy")]
+ uffd: Option<Uffd>,
worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>,
}
// Ensure VhostUserHandler: Clone + Send + Sync + 'static.
-impl<S, V, B> VhostUserHandler<S, V, B>
+impl<T> VhostUserHandler<T>
where
- S: VhostUserBackend<V, B> + Clone + 'static,
- V: VringT<GM<B>> + Clone + Send + Sync + 'static,
- B: Bitmap + Clone + Send + Sync + 'static,
+ T: VhostUserBackend + Clone + 'static,
+ T::Vring: Clone + Send + Sync + 'static,
+ T::Bitmap: Clone + Send + Sync + 'static,
{
- pub(crate) fn new(backend: S, atomic_mem: GM<B>) -> VhostUserHandlerResult<Self> {
+ pub(crate) fn new(backend: T, atomic_mem: GM<T::Bitmap>) -> VhostUserHandlerResult<Self> {
let num_queues = backend.num_queues();
let max_queue_size = backend.max_queue_size();
let queues_per_thread = backend.queues_per_thread();
let mut vrings = Vec::new();
for _ in 0..num_queues {
- let vring = V::new(atomic_mem.clone(), max_queue_size as u16)
+ let vring = T::Vring::new(atomic_mem.clone(), max_queue_size as u16)
.map_err(VhostUserHandlerError::CreateVring)?;
vrings.push(vring);
}
@@ -146,12 +164,14 @@
mappings: Vec::new(),
atomic_mem,
vrings,
+ #[cfg(feature = "postcopy")]
+ uffd: None,
worker_threads,
})
}
}
-impl<S, V, B: Bitmap> VhostUserHandler<S, V, B> {
+impl<T: VhostUserBackend> VhostUserHandler<T> {
pub(crate) fn send_exit_event(&self) {
for handler in self.handlers.iter() {
handler.send_exit_event();
@@ -169,17 +189,15 @@
}
}
-impl<S, V, B> VhostUserHandler<S, V, B>
+impl<T> VhostUserHandler<T>
where
- S: VhostUserBackend<V, B>,
- V: VringT<GM<B>>,
- B: Bitmap,
+ T: VhostUserBackend,
{
- pub(crate) fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, V, B>>> {
+ pub(crate) fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<T>>> {
self.handlers.clone()
}
- fn vring_needs_init(&self, vring: &V) -> bool {
+ fn vring_needs_init(&self, vring: &T::Vring) -> bool {
let vring_state = vring.get_ref();
// If the vring wasn't initialized and we already have an EventFd for
@@ -187,7 +205,7 @@
!vring_state.get_queue().ready() && vring_state.get_kick().is_some()
}
- fn initialize_vring(&self, vring: &V, index: u8) -> VhostUserResult<()> {
+ fn initialize_vring(&self, vring: &T::Vring, index: u8) -> VhostUserResult<()> {
assert!(vring.get_ref().get_kick().is_some());
if let Some(fd) = vring.get_ref().get_kick() {
@@ -203,7 +221,7 @@
}
}
- self.vrings[index as usize].set_queue_ready(true);
+ vring.set_queue_ready(true);
Ok(())
}
@@ -218,11 +236,9 @@
}
}
-impl<S, V, B> VhostUserSlaveReqHandlerMut for VhostUserHandler<S, V, B>
+impl<T: VhostUserBackend> VhostUserBackendReqHandlerMut for VhostUserHandler<T>
where
- S: VhostUserBackend<V, B>,
- V: VringT<GM<B>>,
- B: NewBitmap + Clone,
+ T::Bitmap: BitmapReplace + NewBitmap + Clone,
{
fn set_owner(&mut self) -> VhostUserResult<()> {
if self.owned {
@@ -240,6 +256,19 @@
Ok(())
}
+ fn reset_device(&mut self) -> VhostUserResult<()> {
+ // Disable all vrings
+ for vring in self.vrings.iter_mut() {
+ vring.set_enabled(false);
+ }
+
+ // Reset device state, retain protocol state
+ self.features_acked = false;
+ self.acked_features = 0;
+ self.backend.reset_device();
+ Ok(())
+ }
+
fn get_features(&mut self) -> VhostUserResult<u64> {
Ok(self.backend.features())
}
@@ -252,19 +281,25 @@
self.acked_features = features;
self.features_acked = true;
- // If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
- // the ring is initialized in an enabled state.
- // If VHOST_USER_F_PROTOCOL_FEATURES has been negotiated,
- // the ring is initialized in a disabled state. Client must not
- // pass data to/from the backend until ring is enabled by
- // VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has
- // been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
- let vring_enabled =
- self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0;
- for vring in self.vrings.iter_mut() {
- vring.set_enabled(vring_enabled);
+ // Upon receiving a `VHOST_USER_SET_FEATURES` message from the front-end without
+ // `VHOST_USER_F_PROTOCOL_FEATURES` set, the back-end must enable all rings immediately.
+ // While processing the rings (whether they are enabled or not), the back-end must support
+ // changing some configuration aspects on the fly.
+ // (see https://qemu-project.gitlab.io/qemu/interop/vhost-user.html#ring-states)
+ //
+ // Note: If `VHOST_USER_F_PROTOCOL_FEATURES` has been negotiated we must leave
+ // the vrings in their current state.
+ if self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
+ for vring in self.vrings.iter_mut() {
+ vring.set_enabled(true);
+ }
}
+ let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0;
+ for vring in self.vrings.iter_mut() {
+ vring.set_queue_event_idx(event_idx);
+ }
+ self.backend.set_event_idx(event_idx);
self.backend.acked_features(self.acked_features);
Ok(())
@@ -281,20 +316,21 @@
let mut mappings: Vec<AddrMapping> = Vec::new();
for (region, file) in ctx.iter().zip(files) {
- regions.push(
- GuestRegionMmap::new(
- region.mmap_region(file)?,
- GuestAddress(region.guest_phys_addr),
- )
- .map_err(|e| {
- VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
- })?,
- );
+ let guest_region = GuestRegionMmap::new(
+ region.mmap_region(file)?,
+ GuestAddress(region.guest_phys_addr),
+ )
+ .map_err(|e| {
+ VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
+ })?;
mappings.push(AddrMapping {
+ #[cfg(feature = "postcopy")]
+ local_addr: guest_region.as_ptr() as u64,
vmm_addr: region.user_addr,
size: region.memory_size,
gpa_base: region.guest_phys_addr,
});
+ regions.push(guest_region);
}
let mem = GuestMemoryMmap::from_regions(regions).map_err(|e| {
@@ -316,10 +352,15 @@
}
fn set_vring_num(&mut self, index: u32, num: u32) -> VhostUserResult<()> {
- if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size {
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
+
+ if num == 0 || num as usize > self.max_queue_size {
return Err(VhostUserError::InvalidParam);
}
- self.vrings[index as usize].set_queue_size(num as u16);
+ vring.set_queue_size(num as u16);
Ok(())
}
@@ -332,9 +373,10 @@
available: u64,
_log: u64,
) -> VhostUserResult<()> {
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
if !self.mappings.is_empty() {
let desc_table = self.vmm_va_to_gpa(descriptor).map_err(|e| {
@@ -346,7 +388,7 @@
let used_ring = self.vmm_va_to_gpa(used).map_err(|e| {
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;
- self.vrings[index as usize]
+ vring
.set_queue_info(desc_table, avail_ring, used_ring)
.map_err(|_| VhostUserError::InvalidParam)?;
@@ -360,10 +402,10 @@
// Note: I'm not sure why QEMU's vhost-user library sets the 'user' index here,
// _probably_ to make sure that the VQ is already configured. A better solution would
// be to receive the 'used' index in SET_VRING_BASE, as is done when using packed VQs.
- let idx = self.vrings[index as usize]
+ let idx = vring
.queue_used_idx()
- .map_err(|_| VhostUserError::SlaveInternalError)?;
- self.vrings[index as usize].set_queue_next_used(idx);
+ .map_err(|_| VhostUserError::BackendInternalError)?;
+ vring.set_queue_next_used(idx);
Ok(())
} else {
@@ -372,28 +414,30 @@
}
fn set_vring_base(&mut self, index: u32, base: u32) -> VhostUserResult<()> {
- let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0;
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
- self.vrings[index as usize].set_queue_next_avail(base as u16);
- self.vrings[index as usize].set_queue_event_idx(event_idx);
- self.backend.set_event_idx(event_idx);
+ vring.set_queue_next_avail(base as u16);
Ok(())
}
fn get_vring_base(&mut self, index: u32) -> VhostUserResult<VhostUserVringState> {
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
// Quote from vhost-user specification:
// Client must start ring upon receiving a kick (that is, detecting
// that file descriptor is readable) on the descriptor specified by
// VHOST_USER_SET_VRING_KICK, and stop ring upon receiving
// VHOST_USER_GET_VRING_BASE.
- self.vrings[index as usize].set_queue_ready(false);
+ vring.set_queue_ready(false);
- if let Some(fd) = self.vrings[index as usize].get_ref().get_kick() {
+ if let Some(fd) = vring.get_ref().get_kick() {
for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() {
let shifted_queues_mask = queues_mask >> index;
if shifted_queues_mask & 1u64 == 1u64 {
@@ -406,52 +450,55 @@
}
}
- let next_avail = self.vrings[index as usize].queue_next_avail();
+ let next_avail = vring.queue_next_avail();
- self.vrings[index as usize].set_kick(None);
- self.vrings[index as usize].set_call(None);
+ vring.set_kick(None);
+ vring.set_call(None);
Ok(VhostUserVringState::new(index, u32::from(next_avail)))
}
fn set_vring_kick(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> {
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
// SAFETY: EventFd requires that it has sole ownership of its fd. So
// does File, so this is safe.
// Ideally, we'd have a generic way to refer to a uniquely-owned fd,
// such as that proposed by Rust RFC #3128.
- self.vrings[index as usize].set_kick(file);
+ vring.set_kick(file);
- if self.vring_needs_init(&self.vrings[index as usize]) {
- self.initialize_vring(&self.vrings[index as usize], index)?;
+ if self.vring_needs_init(vring) {
+ self.initialize_vring(vring, index)?;
}
Ok(())
}
fn set_vring_call(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> {
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
- self.vrings[index as usize].set_call(file);
+ vring.set_call(file);
- if self.vring_needs_init(&self.vrings[index as usize]) {
- self.initialize_vring(&self.vrings[index as usize], index)?;
+ if self.vring_needs_init(vring) {
+ self.initialize_vring(vring, index)?;
}
Ok(())
}
fn set_vring_err(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> {
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
- self.vrings[index as usize].set_err(file);
+ vring.set_err(file);
Ok(())
}
@@ -461,7 +508,7 @@
}
fn set_protocol_features(&mut self, features: u64) -> VhostUserResult<()> {
- // Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must
+ // Note: backend that reported VHOST_USER_F_PROTOCOL_FEATURES must
// support this message even before VHOST_USER_SET_FEATURES was
// called.
self.acked_protocol_features = features;
@@ -477,15 +524,16 @@
// has been negotiated.
self.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
- if index as usize >= self.num_queues {
- return Err(VhostUserError::InvalidParam);
- }
+ let vring = self
+ .vrings
+ .get(index as usize)
+ .ok_or_else(|| VhostUserError::InvalidParam)?;
- // Slave must not pass data to/from the backend until ring is
+ // Backend must not pass data to/from the backend until ring is
// enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
// or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
// with parameter 0.
- self.vrings[index as usize].set_enabled(enable);
+ vring.set_enabled(enable);
Ok(())
}
@@ -510,12 +558,20 @@
.map_err(VhostUserError::ReqHandlerError)
}
- fn set_slave_req_fd(&mut self, slave: Slave) {
+ fn set_backend_req_fd(&mut self, backend: Backend) {
if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() != 0 {
- slave.set_reply_ack_flag(true);
+ backend.set_reply_ack_flag(true);
}
+ if self.acked_protocol_features & VhostUserProtocolFeatures::SHARED_OBJECT.bits() != 0 {
+ backend.set_shared_object_flag(true);
+ }
+ self.backend.set_backend_req_fd(backend);
+ }
- self.backend.set_slave_req_fd(slave);
+ fn set_gpu_socket(&mut self, gpu_backend: GpuBackend) -> VhostUserResult<()> {
+ self.backend
+ .set_gpu_socket(gpu_backend)
+ .map_err(VhostUserError::ReqHandlerError)
}
fn get_inflight_fd(
@@ -555,6 +611,14 @@
})?,
);
+ let addr_mapping = AddrMapping {
+ #[cfg(feature = "postcopy")]
+ local_addr: guest_region.as_ptr() as u64,
+ vmm_addr: region.user_addr,
+ size: region.memory_size,
+ gpa_base: region.guest_phys_addr,
+ };
+
let mem = self
.atomic_mem
.memory()
@@ -571,11 +635,7 @@
VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
})?;
- self.mappings.push(AddrMapping {
- vmm_addr: region.user_addr,
- size: region.memory_size,
- gpa_base: region.guest_phys_addr,
- });
+ self.mappings.push(addr_mapping);
Ok(())
}
@@ -602,9 +662,129 @@
Ok(())
}
+
+ fn set_device_state_fd(
+ &mut self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ file: File,
+ ) -> VhostUserResult<Option<File>> {
+ self.backend
+ .set_device_state_fd(direction, phase, file)
+ .map_err(VhostUserError::ReqHandlerError)
+ }
+
+ fn check_device_state(&mut self) -> VhostUserResult<()> {
+ self.backend
+ .check_device_state()
+ .map_err(VhostUserError::ReqHandlerError)
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advice(&mut self) -> VhostUserResult<File> {
+ let mut uffd_builder = UffdBuilder::new();
+
+ let uffd = uffd_builder
+ .close_on_exec(true)
+ .non_blocking(true)
+ .user_mode_only(false)
+ .create()
+ .map_err(|e| {
+ VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
+ })?;
+
+ // We need to duplicate the uffd fd because we need both
+ // to return File with fd and store fd inside uffd.
+ //
+ // SAFETY:
+ // We know that uffd is correctly created.
+ // This means fd inside uffd is also a valid fd.
+ // Duplicating a valid fd is safe.
+ let uffd_dup = unsafe { libc::dup(uffd.as_raw_fd()) };
+ if uffd_dup < 0 {
+ return Err(VhostUserError::ReqHandlerError(io::Error::last_os_error()));
+ }
+
+ // SAFETY:
+ // We know that uffd_dup is a valid fd.
+ let uffd_file = unsafe { File::from_raw_fd(uffd_dup) };
+
+ self.uffd = Some(uffd);
+
+ Ok(uffd_file)
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&mut self) -> VhostUserResult<()> {
+ let Some(ref uffd) = self.uffd else {
+ return Err(VhostUserError::ReqHandlerError(io::Error::new(
+ io::ErrorKind::Other,
+ "No registered UFFD handler",
+ )));
+ };
+
+ for mapping in self.mappings.iter() {
+ uffd.register(
+ mapping.local_addr as *mut libc::c_void,
+ mapping.size as usize,
+ )
+ .map_err(|e| {
+ VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))
+ })?;
+ }
+
+ Ok(())
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&mut self) -> VhostUserResult<()> {
+ self.uffd = None;
+ Ok(())
+ }
+
+ // Sets logging (i.e., bitmap) shared memory space.
+ //
+ // During live migration, the front-end may need to track the modifications the back-end
+ // makes to the memory mapped regions. The front-end should mark the dirty pages in a log.
+ // Once it complies to this logging, it may declare the `VHOST_F_LOG_ALL` vhost feature.
+ //
+ // If the backend has the `VHOST_USER_PROTOCOL_F_LOG_SHMFD` protocol feature it may receive
+ // the `VHOST_USER_SET_LOG_BASE` message. The log memory file descriptor is provided in `file`,
+ // the size and offset of shared memory area are provided in the `VhostUserLog` message.
+ //
+ // See https://qemu-project.gitlab.io/qemu/interop/vhost-user.html#migration.
+ // TODO: We ignore the `LOG_ALL` flag on `SET_FEATURES`, so we will continue marking pages as
+ // dirty even if the migration fails. We need to disable the logging after receiving a
+ // `SET_FEATURE` without the `LOG_ALL` flag.
+ fn set_log_base(&mut self, log: &VhostUserLog, file: File) -> VhostUserResult<()> {
+ let mem = self.atomic_mem.memory();
+
+ let logmem = Arc::new(
+ MmapLogReg::from_file(file.as_fd(), log.mmap_offset, log.mmap_size)
+ .map_err(VhostUserError::ReqHandlerError)?,
+ );
+
+ // Let's create all bitmaps first before replacing them, in case any of them fails
+ let mut bitmaps = Vec::new();
+ for region in mem.iter() {
+ let bitmap = <<T as VhostUserBackend>::Bitmap as BitmapReplace>::InnerBitmap::new(
+ region,
+ Arc::clone(&logmem),
+ )
+ .map_err(VhostUserError::ReqHandlerError)?;
+
+ bitmaps.push((region, bitmap));
+ }
+
+ for (region, bitmap) in bitmaps {
+ region.bitmap().replace(bitmap);
+ }
+
+ Ok(())
+ }
}
-impl<S, V, B: Bitmap> Drop for VhostUserHandler<S, V, B> {
+impl<T: VhostUserBackend> Drop for VhostUserHandler<T> {
fn drop(&mut self) {
// Signal all working threads to exit.
self.send_exit_event();
diff --git a/crates/vhost-user-backend/src/lib.rs b/crates/vhost-user-backend/src/lib.rs
index c65a19e..d6cfbf9 100644
--- a/crates/vhost-user-backend/src/lib.rs
+++ b/crates/vhost-user-backend/src/lib.rs
@@ -9,11 +9,11 @@
extern crate log;
use std::fmt::{Display, Formatter};
+use std::path::Path;
use std::sync::{Arc, Mutex};
use std::thread;
-use vhost::vhost_user::{Error as VhostUserError, Listener, SlaveListener, SlaveReqHandler};
-use vm_memory::bitmap::Bitmap;
+use vhost::vhost_user::{BackendListener, BackendReqHandler, Error as VhostUserError, Listener};
use vm_memory::mmap::NewBitmap;
use vm_memory::{GuestMemoryAtomic, GuestMemoryMmap};
@@ -28,11 +28,20 @@
mod handler;
pub use self::handler::VhostUserHandlerError;
+pub mod bitmap;
+use crate::bitmap::BitmapReplace;
+
mod vring;
pub use self::vring::{
VringMutex, VringRwLock, VringState, VringStateGuard, VringStateMutGuard, VringT,
};
+/// Due to the way `xen` handles memory mappings we can not combine it with
+/// `postcopy` feature which relies on persistent memory mappings. Thus we
+/// disallow enabling both features at the same time.
+#[cfg(all(feature = "postcopy", feature = "xen"))]
+compile_error!("Both `postcopy` and `xen` features can not be enabled at the same time.");
+
/// An alias for `GuestMemoryAtomic<GuestMemoryMmap<B>>` to simplify code.
type GM<B> = GuestMemoryAtomic<GuestMemoryMmap<B>>;
@@ -41,10 +50,12 @@
pub enum Error {
/// Failed to create a new vhost-user handler.
NewVhostUserHandler(VhostUserHandlerError),
- /// Failed creating vhost-user slave listener.
- CreateSlaveListener(VhostUserError),
- /// Failed creating vhost-user slave handler.
- CreateSlaveReqHandler(VhostUserError),
+ /// Failed creating vhost-user backend listener.
+ CreateBackendListener(VhostUserError),
+ /// Failed creating vhost-user backend handler.
+ CreateBackendReqHandler(VhostUserError),
+ /// Failed creating listener socket
+ CreateVhostUserListener(VhostUserError),
/// Failed starting daemon thread.
StartDaemon(std::io::Error),
/// Failed waiting for daemon thread.
@@ -57,8 +68,13 @@
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
Error::NewVhostUserHandler(e) => write!(f, "cannot create vhost user handler: {}", e),
- Error::CreateSlaveListener(e) => write!(f, "cannot create slave listener: {}", e),
- Error::CreateSlaveReqHandler(e) => write!(f, "cannot create slave req handler: {}", e),
+ Error::CreateBackendListener(e) => write!(f, "cannot create backend listener: {}", e),
+ Error::CreateBackendReqHandler(e) => {
+ write!(f, "cannot create backend req handler: {}", e)
+ }
+ Error::CreateVhostUserListener(e) => {
+ write!(f, "cannot create vhost-user listener: {}", e)
+ }
Error::StartDaemon(e) => write!(f, "failed to start daemon: {}", e),
Error::WaitDaemon(_e) => write!(f, "failed to wait for daemon exit"),
Error::HandleRequest(e) => write!(f, "failed to handle request: {}", e),
@@ -73,17 +89,17 @@
///
/// This structure is the public API the backend is allowed to interact with in order to run
/// a fully functional vhost-user daemon.
-pub struct VhostUserDaemon<S, V, B: Bitmap + 'static = ()> {
+pub struct VhostUserDaemon<T: VhostUserBackend> {
name: String,
- handler: Arc<Mutex<VhostUserHandler<S, V, B>>>,
+ handler: Arc<Mutex<VhostUserHandler<T>>>,
main_thread: Option<thread::JoinHandle<Result<()>>>,
}
-impl<S, V, B> VhostUserDaemon<S, V, B>
+impl<T> VhostUserDaemon<T>
where
- S: VhostUserBackend<V, B> + Clone + 'static,
- V: VringT<GM<B>> + Clone + Send + Sync + 'static,
- B: NewBitmap + Clone + Send + Sync,
+ T: VhostUserBackend + Clone + 'static,
+ T::Bitmap: BitmapReplace + NewBitmap + Clone + Send + Sync,
+ T::Vring: Clone + Send + Sync,
{
/// Create the daemon instance, providing the backend implementation of `VhostUserBackend`.
///
@@ -92,8 +108,8 @@
/// but they get to be registered later during the sequence.
pub fn new(
name: String,
- backend: S,
- atomic_mem: GuestMemoryAtomic<GuestMemoryMmap<B>>,
+ backend: T,
+ atomic_mem: GuestMemoryAtomic<GuestMemoryMmap<T::Bitmap>>,
) -> Result<Self> {
let handler = Arc::new(Mutex::new(
VhostUserHandler::new(backend, atomic_mem).map_err(Error::NewVhostUserHandler)?,
@@ -114,7 +130,7 @@
/// it acts as a client or a server.
fn start_daemon(
&mut self,
- mut handler: SlaveReqHandler<Mutex<VhostUserHandler<S, V, B>>>,
+ mut handler: BackendReqHandler<Mutex<VhostUserHandler<T>>>,
) -> Result<()> {
let handle = thread::Builder::new()
.name(self.name.clone())
@@ -133,9 +149,9 @@
/// that should be terminating once the other end of the socket (the VMM)
/// hangs up.
pub fn start_client(&mut self, socket_path: &str) -> Result<()> {
- let slave_handler = SlaveReqHandler::connect(socket_path, self.handler.clone())
- .map_err(Error::CreateSlaveReqHandler)?;
- self.start_daemon(slave_handler)
+ let backend_handler = BackendReqHandler::connect(socket_path, self.handler.clone())
+ .map_err(Error::CreateBackendReqHandler)?;
+ self.start_daemon(backend_handler)
}
/// Listen to the vhost-user socket and run a dedicated thread handling all requests coming
@@ -143,22 +159,25 @@
///
/// This runs in an infinite loop that should be terminating once the other end of the socket
/// (the VMM) disconnects.
+ ///
+ /// *Note:* A convenience function [VhostUserDaemon::serve] exists that
+ /// may be a better option than this for simple use-cases.
// TODO: the current implementation has limitations that only one incoming connection will be
// handled from the listener. Should it be enhanced to support reconnection?
pub fn start(&mut self, listener: Listener) -> Result<()> {
- let mut slave_listener = SlaveListener::new(listener, self.handler.clone())
- .map_err(Error::CreateSlaveListener)?;
- let slave_handler = self.accept(&mut slave_listener)?;
- self.start_daemon(slave_handler)
+ let mut backend_listener = BackendListener::new(listener, self.handler.clone())
+ .map_err(Error::CreateBackendListener)?;
+ let backend_handler = self.accept(&mut backend_listener)?;
+ self.start_daemon(backend_handler)
}
fn accept(
&self,
- slave_listener: &mut SlaveListener<Mutex<VhostUserHandler<S, V, B>>>,
- ) -> Result<SlaveReqHandler<Mutex<VhostUserHandler<S, V, B>>>> {
+ backend_listener: &mut BackendListener<Mutex<VhostUserHandler<T>>>,
+ ) -> Result<BackendReqHandler<Mutex<VhostUserHandler<T>>>> {
loop {
- match slave_listener.accept() {
- Err(e) => return Err(Error::CreateSlaveListener(e)),
+ match backend_listener.accept() {
+ Err(e) => return Err(Error::CreateBackendListener(e)),
Ok(Some(v)) => return Ok(v),
Ok(None) => continue,
}
@@ -166,6 +185,9 @@
}
/// Wait for the thread handling the vhost-user socket connection to terminate.
+ ///
+ /// *Note:* A convenience function [VhostUserDaemon::serve] exists that
+ /// may be a better option than this for simple use-cases.
pub fn wait(&mut self) -> Result<()> {
if let Some(handle) = self.main_thread.take() {
match handle.join().map_err(Error::WaitDaemon)? {
@@ -178,11 +200,47 @@
}
}
+ /// Bind to socket, handle a single connection and shutdown
+ ///
+ /// This is a convenience function that provides an easy way to handle the
+ /// following actions without needing to call the low-level functions:
+ /// - Create a listener
+ /// - Start listening
+ /// - Handle a single event
+ /// - Send the exit event to all handler threads
+ ///
+ /// Internal `Err` results that indicate a device disconnect will be treated
+ /// as success and `Ok(())` will be returned in those cases.
+ ///
+ /// *Note:* See [VhostUserDaemon::start] and [VhostUserDaemon::wait] if you
+ /// need more flexibility.
+ pub fn serve<P: AsRef<Path>>(&mut self, socket: P) -> Result<()> {
+ let listener = Listener::new(socket, true).map_err(Error::CreateVhostUserListener)?;
+
+ self.start(listener)?;
+ let result = self.wait();
+
+ // Regardless of the result, we want to signal worker threads to exit
+ self.handler.lock().unwrap().send_exit_event();
+
+ // For this convenience function we are not treating certain "expected"
+ // outcomes as error. Disconnects and partial messages can be usual
+ // behaviour seen from quitting guests.
+ match &result {
+ Err(e) => match e {
+ Error::HandleRequest(VhostUserError::Disconnected) => Ok(()),
+ Error::HandleRequest(VhostUserError::PartialMessage) => Ok(()),
+ _ => result,
+ },
+ _ => result,
+ }
+ }
+
/// Retrieve the vring epoll handler.
///
/// This is necessary to perform further actions like registering and unregistering some extra
/// event file descriptors.
- pub fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, V, B>>> {
+ pub fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<T>>> {
// Do not expect poisoned lock.
self.handler.lock().unwrap().get_epoll_handlers()
}
@@ -192,8 +250,10 @@
mod tests {
use super::backend::tests::MockVhostBackend;
use super::*;
+ use libc::EAGAIN;
use std::os::unix::net::{UnixListener, UnixStream};
use std::sync::Barrier;
+ use std::time::Duration;
use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
#[test]
@@ -209,26 +269,24 @@
let barrier = Arc::new(Barrier::new(2));
let tmpdir = tempfile::tempdir().unwrap();
- let mut path = tmpdir.path().to_path_buf();
- path.push("socket");
+ let path = tmpdir.path().join("socket");
- let barrier2 = barrier.clone();
- let path1 = path.clone();
- let thread = thread::spawn(move || {
- barrier2.wait();
- let socket = UnixStream::connect(&path1).unwrap();
- barrier2.wait();
- drop(socket)
+ thread::scope(|s| {
+ s.spawn(|| {
+ barrier.wait();
+ let socket = UnixStream::connect(&path).unwrap();
+ barrier.wait();
+ drop(socket)
+ });
+
+ let listener = Listener::new(&path, false).unwrap();
+ barrier.wait();
+ daemon.start(listener).unwrap();
+ barrier.wait();
+ // Above process generates a `HandleRequest(PartialMessage)` error.
+ daemon.wait().unwrap_err();
+ daemon.wait().unwrap();
});
-
- let listener = Listener::new(&path, false).unwrap();
- barrier.wait();
- daemon.start(listener).unwrap();
- barrier.wait();
- // Above process generates a `HandleRequest(PartialMessage)` error.
- daemon.wait().unwrap_err();
- daemon.wait().unwrap();
- thread.join().unwrap();
}
#[test]
@@ -244,27 +302,70 @@
let barrier = Arc::new(Barrier::new(2));
let tmpdir = tempfile::tempdir().unwrap();
- let mut path = tmpdir.path().to_path_buf();
- path.push("socket");
+ let path = tmpdir.path().join("socket");
- let barrier2 = barrier.clone();
- let path1 = path.clone();
- let thread = thread::spawn(move || {
- let listener = UnixListener::bind(&path1).unwrap();
- barrier2.wait();
- let (stream, _) = listener.accept().unwrap();
- barrier2.wait();
- drop(stream)
+ thread::scope(|s| {
+ s.spawn(|| {
+ let listener = UnixListener::bind(&path).unwrap();
+ barrier.wait();
+ let (stream, _) = listener.accept().unwrap();
+ barrier.wait();
+ drop(stream)
+ });
+
+ barrier.wait();
+ daemon
+ .start_client(path.as_path().to_str().unwrap())
+ .unwrap();
+ barrier.wait();
+ // Above process generates a `HandleRequest(PartialMessage)` error.
+ daemon.wait().unwrap_err();
+ daemon.wait().unwrap();
+ });
+ }
+
+ #[test]
+ fn test_daemon_serve() {
+ let mem = GuestMemoryAtomic::new(
+ GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
+ );
+ let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
+ let mut daemon = VhostUserDaemon::new("test".to_owned(), backend.clone(), mem).unwrap();
+ let tmpdir = tempfile::tempdir().unwrap();
+ let socket_path = tmpdir.path().join("socket");
+
+ thread::scope(|s| {
+ s.spawn(|| {
+ let _ = daemon.serve(&socket_path);
+ });
+
+ // We have no way to wait for when the server becomes available...
+ // So we will have to spin!
+ while !socket_path.exists() {
+ thread::sleep(Duration::from_millis(10));
+ }
+
+ // Check that no exit events got triggered yet
+ for thread_id in 0..backend.queues_per_thread().len() {
+ let fd = backend.exit_event(thread_id).unwrap();
+ // Reading from exit fd should fail since nothing was written yet
+ assert_eq!(
+ fd.read().unwrap_err().raw_os_error().unwrap(),
+ EAGAIN,
+ "exit event should not have been raised yet!"
+ );
+ }
+
+ let socket = UnixStream::connect(&socket_path).unwrap();
+ // disconnect immediately again
+ drop(socket);
});
- barrier.wait();
- daemon
- .start_client(path.as_path().to_str().unwrap())
- .unwrap();
- barrier.wait();
- // Above process generates a `HandleRequest(PartialMessage)` error.
- daemon.wait().unwrap_err();
- daemon.wait().unwrap();
- thread.join().unwrap();
+ // Check that exit events got triggered
+ let backend = backend.lock().unwrap();
+ for thread_id in 0..backend.queues_per_thread().len() {
+ let fd = backend.exit_event(thread_id).unwrap();
+ assert!(fd.read().is_ok(), "No exit event was raised!");
+ }
}
}
diff --git a/crates/vhost-user-backend/src/vring.rs b/crates/vhost-user-backend/src/vring.rs
index 13e08ac..948f687 100644
--- a/crates/vhost-user-backend/src/vring.rs
+++ b/crates/vhost-user-backend/src/vring.rs
@@ -46,7 +46,7 @@
/// Add an used descriptor into the used queue.
fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>;
- /// Notify the vhost-user master that used descriptors have been put into the used queue.
+ /// Notify the vhost-user frontend that used descriptors have been put into the used queue.
fn signal_used_queue(&self) -> io::Result<()>;
/// Enable event notification for queue.
@@ -145,7 +145,7 @@
.add_used(self.mem.memory().deref(), desc_index, len)
}
- /// Notify the vhost-user master that used descriptors have been put into the used queue.
+ /// Notify the vhost-user frontend that used descriptors have been put into the used queue.
pub fn signal_used_queue(&self) -> io::Result<()> {
if let Some(call) = self.call.as_ref() {
call.write(1)
diff --git a/crates/vhost-user-backend/tests/vhost-user-server.rs b/crates/vhost-user-backend/tests/vhost-user-server.rs
index f6fdea7..bcb49d8 100644
--- a/crates/vhost-user-backend/tests/vhost-user-server.rs
+++ b/crates/vhost-user-backend/tests/vhost-user-server.rs
@@ -1,7 +1,7 @@
use std::ffi::CString;
use std::fs::File;
use std::io::Result;
-use std::os::unix::io::{AsRawFd, FromRawFd};
+use std::os::unix::io::AsRawFd;
use std::os::unix::net::UnixStream;
use std::path::Path;
use std::sync::{Arc, Barrier, Mutex};
@@ -10,7 +10,7 @@
use vhost::vhost_user::message::{
VhostUserConfigFlags, VhostUserHeaderFlag, VhostUserInflight, VhostUserProtocolFeatures,
};
-use vhost::vhost_user::{Listener, Master, Slave, VhostUserMaster};
+use vhost::vhost_user::{Backend, Frontend, Listener, VhostUserFrontend};
use vhost::{VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
use vhost_user_backend::{VhostUserBackendMut, VhostUserDaemon, VringRwLock};
use vm_memory::{
@@ -26,6 +26,8 @@
}
impl MockVhostBackend {
+ const SUPPORTED_FEATURES: u64 = 0xffff_ffff_ffff_ffff;
+
fn new() -> Self {
MockVhostBackend {
events: 0,
@@ -35,7 +37,10 @@
}
}
-impl VhostUserBackendMut<VringRwLock, ()> for MockVhostBackend {
+impl VhostUserBackendMut for MockVhostBackend {
+ type Bitmap = ();
+ type Vring = VringRwLock;
+
fn num_queues(&self) -> usize {
2
}
@@ -45,7 +50,7 @@
}
fn features(&self) -> u64 {
- 0xffff_ffff_ffff_ffff
+ Self::SUPPORTED_FEATURES
}
fn acked_features(&mut self, features: u64) {
@@ -56,6 +61,12 @@
VhostUserProtocolFeatures::all()
}
+ fn reset_device(&mut self) {
+ self.events = 0;
+ self.event_idx = false;
+ self.acked_features = 0;
+ }
+
fn set_event_idx(&mut self, enabled: bool) {
self.event_idx = enabled;
}
@@ -81,7 +92,7 @@
Ok(())
}
- fn set_slave_req_fd(&mut self, _slave: Slave) {}
+ fn set_backend_req_fd(&mut self, _backend: Backend) {}
fn queues_per_thread(&self) -> Vec<u64> {
vec![1, 1]
@@ -99,53 +110,53 @@
_evset: EventSet,
_vrings: &[VringRwLock],
_thread_id: usize,
- ) -> Result<bool> {
+ ) -> Result<()> {
self.events += 1;
- Ok(false)
+ Ok(())
}
}
-fn setup_master(path: &Path, barrier: Arc<Barrier>) -> Master {
+fn setup_frontend(path: &Path, barrier: Arc<Barrier>) -> Frontend {
barrier.wait();
- let mut master = Master::connect(path, 1).unwrap();
- master.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
+ let mut frontend = Frontend::connect(path, 1).unwrap();
+ frontend.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
// Wait before issue service requests.
barrier.wait();
- let features = master.get_features().unwrap();
- let proto = master.get_protocol_features().unwrap();
- master.set_features(features).unwrap();
- master.set_protocol_features(proto).unwrap();
+ let features = frontend.get_features().unwrap();
+ let proto = frontend.get_protocol_features().unwrap();
+ frontend.set_features(features).unwrap();
+ frontend.set_protocol_features(proto).unwrap();
assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
- master
+ frontend
}
fn vhost_user_client(path: &Path, barrier: Arc<Barrier>) {
barrier.wait();
- let mut master = Master::connect(path, 1).unwrap();
- master.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
+ let mut frontend = Frontend::connect(path, 1).unwrap();
+ frontend.set_hdr_flags(VhostUserHeaderFlag::NEED_REPLY);
// Wait before issue service requests.
barrier.wait();
- let features = master.get_features().unwrap();
- let proto = master.get_protocol_features().unwrap();
- master.set_features(features).unwrap();
- master.set_protocol_features(proto).unwrap();
+ let features = frontend.get_features().unwrap();
+ let proto = frontend.get_protocol_features().unwrap();
+ frontend.set_features(features).unwrap();
+ frontend.set_protocol_features(proto).unwrap();
assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
- let queue_num = master.get_queue_num().unwrap();
+ let queue_num = frontend.get_queue_num().unwrap();
assert_eq!(queue_num, 2);
- master.set_owner().unwrap();
- //master.set_owner().unwrap_err();
- master.reset_owner().unwrap();
- master.reset_owner().unwrap();
- master.set_owner().unwrap();
+ frontend.set_owner().unwrap();
+ //frontend.set_owner().unwrap_err();
+ frontend.reset_owner().unwrap();
+ frontend.reset_owner().unwrap();
+ frontend.set_owner().unwrap();
- master.set_features(features).unwrap();
- master.set_protocol_features(proto).unwrap();
+ frontend.set_features(features).unwrap();
+ frontend.set_protocol_features(proto).unwrap();
assert!(proto.contains(VhostUserProtocolFeatures::REPLY_ACK));
let memfd = nix::sys::memfd::memfd_create(
@@ -153,8 +164,7 @@
nix::sys::memfd::MemFdCreateFlag::empty(),
)
.unwrap();
- // SAFETY: Safe because we panic before if memfd is not valid.
- let file = unsafe { File::from_raw_fd(memfd) };
+ let file = File::from(memfd);
file.set_len(0x100000).unwrap();
let file_offset = FileOffset::new(file, 0);
let mem = GuestMemoryMmap::<()>::from_ranges_with_files(&[(
@@ -173,9 +183,9 @@
0,
fd.file().as_raw_fd(),
)];
- master.set_mem_table(®ions).unwrap();
+ frontend.set_mem_table(®ions).unwrap();
- master.set_vring_num(0, 256).unwrap();
+ frontend.set_vring_num(0, 256).unwrap();
let config = VringConfigData {
queue_max_size: 256,
@@ -186,39 +196,53 @@
avail_ring_addr: addr + 0x20000,
log_addr: None,
};
- master.set_vring_addr(0, &config).unwrap();
+ frontend.set_vring_addr(0, &config).unwrap();
let eventfd = EventFd::new(0).unwrap();
- master.set_vring_kick(0, &eventfd).unwrap();
- master.set_vring_call(0, &eventfd).unwrap();
- master.set_vring_err(0, &eventfd).unwrap();
- master.set_vring_enable(0, true).unwrap();
+ frontend.set_vring_kick(0, &eventfd).unwrap();
+ frontend.set_vring_call(0, &eventfd).unwrap();
+ frontend.set_vring_err(0, &eventfd).unwrap();
+ frontend.set_vring_enable(0, true).unwrap();
let buf = [0u8; 8];
- let (_cfg, data) = master
+ let (_cfg, data) = frontend
.get_config(0x200, 8, VhostUserConfigFlags::empty(), &buf)
.unwrap();
assert_eq!(&data, &[0xa5u8; 8]);
- master
+ frontend
.set_config(0x200, VhostUserConfigFlags::empty(), &data)
.unwrap();
let (tx, _rx) = UnixStream::pair().unwrap();
- master.set_slave_request_fd(&tx).unwrap();
+ frontend.set_backend_request_fd(&tx).unwrap();
- let state = master.get_vring_base(0).unwrap();
- master.set_vring_base(0, state as u16).unwrap();
+ let state = frontend.get_vring_base(0).unwrap();
+ frontend.set_vring_base(0, state as u16).unwrap();
- assert_eq!(master.get_max_mem_slots().unwrap(), 32);
+ assert_eq!(frontend.get_max_mem_slots().unwrap(), 509);
let region = VhostUserMemoryRegionInfo::new(0x800000, 0x100000, addr, 0, fd.file().as_raw_fd());
- master.add_mem_region(®ion).unwrap();
- master.remove_mem_region(®ion).unwrap();
+ frontend.add_mem_region(®ion).unwrap();
+ frontend.remove_mem_region(®ion).unwrap();
}
-fn vhost_user_server(cb: fn(&Path, Arc<Barrier>)) {
+/// Provide a vhost-user back-end for front-end testing.
+///
+/// Set up a `MockVhostBackend` vhost-user back-end and run `cb` in a thread, passing the
+/// vhost-user socket's path and a barrier to await request processing. `cb` is supposed to run
+/// the front-end tests.
+///
+/// After request processing has begun, run `server_fn`, passing both a reference to the back-end
+/// and the same barrier as given to `cb`. `server_fn` may perform additional back-end tests while
+/// `cb` is still run in its thread.
+///
+/// After `server_fn` is done, await `cb` (joining its thread), and return.
+fn vhost_user_server_with_fn<F: FnOnce(Arc<Mutex<MockVhostBackend>>, Arc<Barrier>)>(
+ cb: fn(&Path, Arc<Barrier>),
+ server_fn: F,
+) {
let mem = GuestMemoryAtomic::new(GuestMemoryMmap::<()>::new());
let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
- let mut daemon = VhostUserDaemon::new("test".to_owned(), backend, mem).unwrap();
+ let mut daemon = VhostUserDaemon::new("test".to_owned(), backend.clone(), mem).unwrap();
let barrier = Arc::new(Barrier::new(2));
let tmpdir = tempfile::tempdir().unwrap();
@@ -234,19 +258,25 @@
daemon.start(listener).unwrap();
barrier.wait();
+ server_fn(backend, barrier);
+
// handle service requests from clients.
thread.join().unwrap();
}
+fn vhost_user_server(cb: fn(&Path, Arc<Barrier>)) {
+ vhost_user_server_with_fn(cb, |_, _| {})
+}
+
#[test]
fn test_vhost_user_server() {
vhost_user_server(vhost_user_client);
}
fn vhost_user_enable(path: &Path, barrier: Arc<Barrier>) {
- let master = setup_master(path, barrier);
- master.set_owner().unwrap();
- master.set_owner().unwrap_err();
+ let frontend = setup_frontend(path, barrier);
+ frontend.set_owner().unwrap();
+ frontend.set_owner().unwrap_err();
}
#[test]
@@ -255,7 +285,7 @@
}
fn vhost_user_set_inflight(path: &Path, barrier: Arc<Barrier>) {
- let mut master = setup_master(path, barrier);
+ let mut frontend = setup_frontend(path, barrier);
let eventfd = EventFd::new(0).unwrap();
// No implementation for inflight_fd yet.
let inflight = VhostUserInflight {
@@ -264,7 +294,7 @@
num_queues: 1,
queue_size: 256,
};
- master
+ frontend
.set_inflight_fd(&inflight, eventfd.as_raw_fd())
.unwrap_err();
}
@@ -275,7 +305,7 @@
}
fn vhost_user_get_inflight(path: &Path, barrier: Arc<Barrier>) {
- let mut master = setup_master(path, barrier);
+ let mut frontend = setup_frontend(path, barrier);
// No implementation for inflight_fd yet.
let inflight = VhostUserInflight {
mmap_size: 0x100000,
@@ -283,10 +313,76 @@
num_queues: 1,
queue_size: 256,
};
- assert!(master.get_inflight_fd(&inflight).is_err());
+ assert!(frontend.get_inflight_fd(&inflight).is_err());
}
#[test]
fn test_vhost_user_get_inflight() {
vhost_user_server(vhost_user_get_inflight);
}
+
+#[cfg(feature = "postcopy")]
+fn vhost_user_postcopy_advise(path: &Path, barrier: Arc<Barrier>) {
+ let mut frontend = setup_frontend(path, barrier);
+ let _uffd_file = frontend.postcopy_advise().unwrap();
+}
+
+#[cfg(feature = "postcopy")]
+fn vhost_user_postcopy_listen(path: &Path, barrier: Arc<Barrier>) {
+ let mut frontend = setup_frontend(path, barrier);
+ let _uffd_file = frontend.postcopy_advise().unwrap();
+ frontend.postcopy_listen().unwrap();
+}
+
+#[cfg(feature = "postcopy")]
+fn vhost_user_postcopy_end(path: &Path, barrier: Arc<Barrier>) {
+ let mut frontend = setup_frontend(path, barrier);
+ let _uffd_file = frontend.postcopy_advise().unwrap();
+ frontend.postcopy_listen().unwrap();
+ frontend.postcopy_end().unwrap();
+}
+
+// These tests need an access to the `/dev/userfaultfd`
+// in order to pass.
+#[cfg(feature = "postcopy")]
+#[test]
+fn test_vhost_user_postcopy() {
+ vhost_user_server(vhost_user_postcopy_advise);
+ vhost_user_server(vhost_user_postcopy_listen);
+ vhost_user_server(vhost_user_postcopy_end);
+}
+
+fn vhost_user_reset_device(path: &Path, barrier: Arc<Barrier>) {
+ let mut frontend = setup_frontend(path, barrier.clone());
+
+ // Signal that we are about to reset
+ barrier.wait();
+ // Wait until server has checked non-reset state
+ barrier.wait();
+
+ frontend.reset_device().unwrap();
+
+ // Signal reset is done
+ barrier.wait();
+}
+
+#[test]
+fn test_vhost_user_reset_device() {
+ vhost_user_server_with_fn(vhost_user_reset_device, |backend, barrier| {
+ // Wait until `vhost_user_reset_device()` is before reset
+ barrier.wait();
+ // Check non-reset state
+ assert!(backend.lock().unwrap().acked_features == MockVhostBackend::SUPPORTED_FEATURES);
+ // Set up some arbitrary internal state
+ backend.lock().unwrap().events = 42;
+
+ // Allow reset
+ barrier.wait();
+ // Wait for reset to be done
+ barrier.wait();
+
+ // Check reset state
+ assert!(backend.lock().unwrap().acked_features == 0);
+ assert!(backend.lock().unwrap().events == 0);
+ });
+}
diff --git a/crates/vhost/.cargo-checksum.json b/crates/vhost/.cargo-checksum.json
index 06d371d..d4f2cf7 100644
--- a/crates/vhost/.cargo-checksum.json
+++ b/crates/vhost/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"d614c3d7cd36bdf41799ef75e83356f873e1cbe42bc6f721ed988717befb8249","Cargo.toml":"31622981822291abce263d5c2897a4cbc0e41533b0df24c9f0abcc1d19848d1e","README.md":"05f6d4a61cd62ee3f29c5a3539d9bfcde89ab688470671e708c6dc6ff8f2c035","docs/vhost_architecture.drawio":"e9818449236c3be43a422de9cd3b41f01c6e93ee0ad313c4e80324fa2ec36ddb","docs/vhost_architecture.png":"aa45f1ba1fa502ddf255954b805bd7a647eddc5f9432ccfca04c14926c54b249","src/backend.rs":"7cad7c15fd36d4c321554f3b45d62753723c97ef756fe36e0b8f1768beee45f6","src/lib.rs":"a525a1a8bbdbf1d3ae191e8c45af38418592648d522c82b3556d8bd13f7c2488","src/net.rs":"ad00efd5889bfd44f9f7ffcd82ce1be3d08c67e495ace529c2b08369b334630b","src/vdpa.rs":"1c639a3c5e01b6c625af3cefa9d48503a7652934399f8045687541adcf2de918","src/vhost_kern/mod.rs":"2e4ff270ead939ddba984f5c21e9fd9b7487d2188f9c9e0adee606d43d2f937e","src/vhost_kern/net.rs":"de8f3677da64695599107aff02fb673f8dcca72849e211d8bdc33424e65ae3b3","src/vhost_kern/vdpa.rs":"86456fcd7a4cbe0a44176055e0ec1ff238d7e2a0e76e81cec3a2f758c1c11ee7","src/vhost_kern/vhost_binding.rs":"e4e6b2ff1eb8144c1ec31b16cfdcee65789606946fa619dc904657d5f59181ca","src/vhost_kern/vsock.rs":"6ec9de9e755f9f60191e925c08013f67788fac396f023efb24fbdb29b83d4fae","src/vhost_user/connection.rs":"0afc7ac62e69ed7ad4f3d48cf2774dce07fc7a4bc2318dfadf69a02de21181f2","src/vhost_user/dummy_slave.rs":"7984b06c2ab52396cc786929532a10dfc3b05721037d97915ef3c4d1ed2e03fb","src/vhost_user/master.rs":"679a43147f3f3e042e634eac018f7bd1b46c3ae7e20be6905c2b43cb984f2f91","src/vhost_user/master_req_handler.rs":"3f708a9637c327a136fed013d646f284e9672cd2d17d6a8216c4b1fcf699de59","src/vhost_user/message.rs":"122390bc8d6aea4f78e3bfe4a9427969259a804cf215885cd98fb2bf21f030a4","src/vhost_user/mod.rs":"7e3e3b793dc551a4e326386428f28481d49a77caa8dbebb435b02b270c8d8a51","src/vhost_user/slave.rs":"d02c22aaf4b4e18a16c7c9ba51c030526839a4d9ba2870d1dd4af22f5d91352e","src/vhost_user/slave_req.rs":"37ca14bc3a76dd5f7afd5929c8e31f1d50401d66402a9b11dc7fbac1405e50fb","src/vhost_user/slave_req_handler.rs":"03a117fa3c8590fc22f130b8c1497a0303f271728cff2b70563f929625c81770","src/vsock.rs":"319e82be4a3fd837de1e0d71433cb29bf235947d4b29fdc058757374ac8209b9"},"package":"61957aeb36daf0b00b87fff9c10dd28a161bd35ab157553d340d183b3d8756e6"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"23783ca268b8f6aefb3a2061e96f9630e0ac5bc852cef014c18be3947d112540","Cargo.toml":"6e859093adb2dc8e2c5fbe4994b1e9b5c4e82f757f9d5f27db9f47ddf6a68cf7","README.md":"8f1250ec8d2145a22727d1489bf214357fef603c0457a28bacf6ca58636f3371","docs/vhost_architecture.drawio":"c04b7d3e3f9deb59dca0f3aaebfcd4a62efd5f89e39d13a9bfc1668b0d2f6829","docs/vhost_architecture.png":"aa45f1ba1fa502ddf255954b805bd7a647eddc5f9432ccfca04c14926c54b249","src/backend.rs":"7cad7c15fd36d4c321554f3b45d62753723c97ef756fe36e0b8f1768beee45f6","src/lib.rs":"2704892984078b345376dba175a60b5bd57c55c85e624896d5b7eb4123f339ce","src/net.rs":"ad00efd5889bfd44f9f7ffcd82ce1be3d08c67e495ace529c2b08369b334630b","src/vdpa.rs":"1c639a3c5e01b6c625af3cefa9d48503a7652934399f8045687541adcf2de918","src/vhost_kern/mod.rs":"9816abd4da16d7b98ac7869a6ef4a98812987bf98864d6f53f46b20e7b2e88a0","src/vhost_kern/net.rs":"de8f3677da64695599107aff02fb673f8dcca72849e211d8bdc33424e65ae3b3","src/vhost_kern/vdpa.rs":"6bcd63a7ada615a8d6fa02cd5311e065c71c2a18a4263ac2c8285f87635290da","src/vhost_kern/vhost_binding.rs":"098e8ade3a77b4ba1d69ec5e2a42b345969a46d93958fd3157f019296f4fb68d","src/vhost_kern/vsock.rs":"6ec9de9e755f9f60191e925c08013f67788fac396f023efb24fbdb29b83d4fae","src/vhost_user/backend.rs":"4e4c63c1483b8211212944ecc807e00b5ecc0fedd3ca06de3f1cb73d7e791444","src/vhost_user/backend_req.rs":"4a2a9e0b499b0ba1e0171add83e70f3fbadb06a207f53bf3a5f520d6ad87de8d","src/vhost_user/backend_req_handler.rs":"8937fe26018b9589030efa816ffaea38550efa5c4b1a79e7545423058f3c8de6","src/vhost_user/connection.rs":"efdd022c5611caf0b1597405756cf0e049a6ddb0c92fe8aeaca96c87255c25e2","src/vhost_user/dummy_backend.rs":"4fe9f2c5d25b2aa6afb087f72a5293506342717c4e564d6b03208f38df8b8f1f","src/vhost_user/frontend.rs":"4ac46b96e6618e6ab2318871b8093d5060df4b178c327be2d48557ef1b17445c","src/vhost_user/frontend_req_handler.rs":"c74e529bcb70dd35bdf5d844b8acb00c5a28173ac9db934543a496d2a904428f","src/vhost_user/gpu_backend_req.rs":"24ca12e25132826fc64ef2fa3f015eb49a3e3de443e6e9913da55076ad4a0354","src/vhost_user/gpu_message.rs":"fb3cf4b34d03b513019c75b7fbf6978e46661f518d4f837efe99cb62179966a1","src/vhost_user/message.rs":"b58886ab563c1e198c7a9e04fbbcaff74d5dc4b6ae7e1aa67a5963641141bd03","src/vhost_user/mod.rs":"b99cfcdae27991cd9059780a34241ecc95df06e61792af3b5c98cf23ad59a45d","src/vsock.rs":"319e82be4a3fd837de1e0d71433cb29bf235947d4b29fdc058757374ac8209b9"},"package":"bce0aad4d8776cb64f1ac591e908a561c50ba6adac4416296efee590b155623f"}
\ No newline at end of file
diff --git a/crates/vhost/Android.bp b/crates/vhost/Android.bp
index bb44824..259613b 100644
--- a/crates/vhost/Android.bp
+++ b/crates/vhost/Android.bp
@@ -17,18 +17,20 @@
name: "libvhost_android",
crate_name: "vhost",
cargo_env_compat: true,
- cargo_pkg_version: "0.8.1",
+ cargo_pkg_version: "0.13.0",
crate_root: "src/lib.rs",
- edition: "2018",
+ edition: "2021",
features: [
"default",
"vhost-user",
- "vhost-user-slave",
+ "vhost-user-backend",
],
rustlibs: [
- "libbitflags-1.3.2",
+ "libbitflags",
"liblibc",
+ "libuuid",
"libvm_memory_android",
"libvmm_sys_util",
],
+ compile_multilib: "first",
}
diff --git a/crates/vhost/CHANGELOG.md b/crates/vhost/CHANGELOG.md
index af8c430..f0c1083 100644
--- a/crates/vhost/CHANGELOG.md
+++ b/crates/vhost/CHANGELOG.md
@@ -5,9 +5,72 @@
### Changed
+### Deprecated
+
### Fixed
-### Deprecated
+## [0.13.0]
+
+### Added
+- [[#266]](https://github.com/rust-vmm/vhost/pull/266) Add support for `VHOST_USER_RESET_DEVICE`
+
+### Changed
+- [[#269]](https://github.com/rust-vmm/vhost/pull/269) Update vm-memory to 0.16.0 and virtio-queue to 0.13.0
+
+## [0.12.1]
+
+### Fixed
+- [[#267](https://github.com/rust-vmm/vhost/pull/267)] Fix feature unification issues with gpu-socket feature.
+
+## [0.12.0] - yanked
+
+This version got yanked because the `gpu_socket` feature introduced in this
+release was causing problems
+(see [#265](https://github.com/rust-vmm/vhost/issues/265)).
+Starting with the next version (v0.12.1), the `gpu_socket` feature was removed.
+
+### Added
+- [[#241]](https://github.com/rust-vmm/vhost/pull/241) Add shared objects support
+- [[#239]](https://github.com/rust-vmm/vhost/pull/239) Add support for `VHOST_USER_GPU_SET_SOCKET`
+
+### Changed
+- [[#257]](https://github.com/rust-vmm/vhost/pull/257) Update vm-memory from 0.14.0 to 0.15.0.
+- [[#243]](https://github.com/rust-vmm/vhost/pull/243) Ignore unknown bits in `VHOST_USER_GET_PROTOCOL_FEATURES` response.
+
+### Remove
+- [[#246]](https://github.com/rust-vmm/vhost/pull/246) Remove support for FS_* requests
+
+## [0.11.0]
+
+### Added
+- [[#203]](https://github.com/rust-vmm/vhost/pull/203) Add back-end's internal state migration support
+- [[#218]](https://github.com/rust-vmm/vhost/pull/218) Adding POSTCOPY support
+- [[#206]](https://github.com/rust-vmm/vhost/pull/206) Add bitmap support for tracking dirty pages during migration
+
+## [0.10.0]
+
+### Changed
+- [[#219]](https://github.com/rust-vmm/vhost/pull/219) Update vmm-sys-util dependency to 0.12.1.
+
+### Remove
+- [[#202](https://github.com/rust-vmm/vhost/pull/202)] Do not expose for internal-usage-only `NOOP` and `MAX_CMD` requests.
+- [[#205](https://github.com/rust-vmm/vhost/pull/205)] Remove some commented out code.
+
+### Fixed
+- [[#208](https://github.com/rust-vmm/vhost/pull/208)] Fix various message structs being `repr(Rust)` instead of `repr(C)`.
+
+## [0.9.0]
+
+### Changed
+- [[#187]](https://github.com/rust-vmm/vhost/pull/187) Clean master slave
+ - Replaced master/slave with frontend/backend in the codebase and public API.
+ - Replaced master/slave with frontend/backend in the crate features.
+- Updated dependency bitflags from 1.0 to 2.4
+- [[#116]](https://github.com/rust-vmm/vhost/pull/116) Upgrade to 2021 edition
+
+### Fixed
+- [[#184]](https://github.com/rust-vmm/vhost/pull/184) Safety fixes
+- [[#186]](https://github.com/rust-vmm/vhost/pull/186) vhost: Fix clippy warnings.
## [0.8.1]
diff --git a/crates/vhost/Cargo.toml b/crates/vhost/Cargo.toml
index 48f7e96..81a2c94 100644
--- a/crates/vhost/Cargo.toml
+++ b/crates/vhost/Cargo.toml
@@ -10,10 +10,15 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
name = "vhost"
-version = "0.8.1"
+version = "0.13.0"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
description = "a pure rust library for vdpa, vhost and vhost-user"
documentation = "https://docs.rs/vhost"
readme = "README.md"
@@ -29,33 +34,46 @@
[package.metadata.docs.rs]
all-features = true
+[lib]
+name = "vhost"
+path = "src/lib.rs"
+
[dependencies.bitflags]
-version = "1.0"
+version = "2.4"
[dependencies.libc]
version = "0.2.39"
+[dependencies.uuid]
+version = "1.8.0"
+features = [
+ "v4",
+ "fast-rng",
+ "macro-diagnostics",
+]
+
[dependencies.vm-memory]
-version = "0.12.0"
+version = "0.16.0"
features = ["backend-mmap"]
[dependencies.vmm-sys-util]
-version = "0.11.0"
+version = "0.12.1"
[dev-dependencies.serial_test]
-version = "0.5"
+version = "3.0"
[dev-dependencies.tempfile]
version = "3.2.0"
[features]
default = []
+postcopy = []
test-utils = []
vhost-kern = []
vhost-net = ["vhost-kern"]
vhost-user = []
-vhost-user-master = ["vhost-user"]
-vhost-user-slave = ["vhost-user"]
+vhost-user-backend = ["vhost-user"]
+vhost-user-frontend = ["vhost-user"]
vhost-vdpa = ["vhost-kern"]
vhost-vsock = []
xen = ["vm-memory/xen"]
diff --git a/crates/vhost/METADATA b/crates/vhost/METADATA
index 66b8c7f..aa54ad5 100644
--- a/crates/vhost/METADATA
+++ b/crates/vhost/METADATA
@@ -1,17 +1,17 @@
name: "vhost"
description: "a pure rust library for vdpa, vhost and vhost-user"
third_party {
- version: "0.8.1"
+ version: "0.13.0"
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 9
- day: 6
+ year: 2024
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/vhost"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/vhost/vhost-0.8.1.crate"
- version: "0.8.1"
+ value: "https://static.crates.io/crates/vhost/vhost-0.13.0.crate"
+ version: "0.13.0"
}
}
diff --git a/crates/vhost/README.md b/crates/vhost/README.md
index 6f5f0e5..101b4d7 100644
--- a/crates/vhost/README.md
+++ b/crates/vhost/README.md
@@ -8,7 +8,7 @@
The main relationship among Traits and Structs exported by the `vhost` crate is as below:
-
+
## Kernel-based vHost Backend Drivers
The vhost drivers in Linux provide in-kernel virtio device emulation. Normally
the hypervisor userspace process emulates I/O accesses from the guest.
@@ -26,11 +26,17 @@
It uses communication over a Unix domain socket to share file descriptors in
the ancillary data of the message.
-The protocol defines two sides of the communication, master and slave.
-Master is the application that shares its virtqueues, slave is the consumer
-of the virtqueues. Master and slave can be either a client (i.e. connecting)
+The protocol defines two sides of the communication, frontend and backend.
+Frontend is the application that shares its virtqueues, backend is the consumer
+of the virtqueues. Frontend and backend can be either a client (i.e. connecting)
or server (listening) in the socket communication.
+## Postcopy support
+
+To enabled POSTCOPY_* messages support there is a `postcopy` feature.
+Due to how Xen handles memory mappings the `postcopy` feature is not compatible
+with `xen` feature. Enabling both at the same time will result in a compilation error.
+
## Xen support
Supporting Xen requires special handling while mapping the guest memory. The
diff --git a/crates/vhost/cargo_embargo.json b/crates/vhost/cargo_embargo.json
index 3f73d27..6516f19 100644
--- a/crates/vhost/cargo_embargo.json
+++ b/crates/vhost/cargo_embargo.json
@@ -2,16 +2,16 @@
"features": [
"default",
"vhost-user",
- "vhost-user-slave"
+ "vhost-user-backend"
],
"module_name_overrides": {
- "libbitflags": "libbitflags-1.3.2",
"libvhost": "libvhost_android",
"libvm_memory": "libvm_memory_android"
},
"package": {
"vhost": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first"
}
},
"run_cargo": false
diff --git a/crates/vhost/docs/vhost_architecture.drawio b/crates/vhost/docs/vhost_architecture.drawio
index 8c669d8..610aff4 100644
--- a/crates/vhost/docs/vhost_architecture.drawio
+++ b/crates/vhost/docs/vhost_architecture.drawio
@@ -13,10 +13,10 @@
<mxCell id="44" value="" style="rounded=0;whiteSpace=wrap;html=1;labelBackgroundColor=none;sketch=0;fontSize=25;fontColor=#FF00FF;fillColor=none;strokeColor=#4D4D4D;strokeWidth=5;" vertex="1" parent="1">
<mxGeometry x="-10" y="37" width="1250" height="670" as="geometry"/>
</mxCell>
- <mxCell id="2" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;">MasterReqHandler</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
+ <mxCell id="2" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;">FrontendReqHandler</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
<mxGeometry x="830" y="477" width="220" height="50" as="geometry"/>
</mxCell>
- <mxCell id="4" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;">VhostUserMasterReqHandler</pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
+ <mxCell id="4" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;">VhostUserFrontendReqHandler</pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
<mxGeometry x="840" y="597" width="360" height="60" as="geometry"/>
</mxCell>
<mxCell id="6" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" edge="1" parent="1" source="5" target="2">
@@ -27,10 +27,10 @@
</Array>
</mxGeometry>
</mxCell>
- <mxCell id="5" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">Slave</pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
+ <mxCell id="5" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">Backend</pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" parent="1" vertex="1">
<mxGeometry x="1715" y="767" width="220" height="50" as="geometry"/>
</mxCell>
- <mxCell id="7" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;">VhostUserMasterReqHandlerMut</pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="7" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;">VhostUserFrontendReqHandlerMut</pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="1630" y="657" width="390" height="60" as="geometry"/>
</mxCell>
<mxCell id="8" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" edge="1" parent="1" source="2" target="4">
@@ -39,13 +39,13 @@
<mxPoint x="680" y="717" as="targetPoint"/>
</mxGeometry>
</mxCell>
- <mxCell id="10" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">SlaveListener</pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="10" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">BackendListener</pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="1360" y="472" width="190" height="50" as="geometry"/>
</mxCell>
- <mxCell id="11" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">SlaveReqHandler</pre></pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="11" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">BackendReqHandler</pre></pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="1712" y="387" width="210" height="50" as="geometry"/>
</mxCell>
- <mxCell id="14" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;"><pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;">VhostUserSlaveReqHandler</pre></pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="14" value="<pre style="font-size: 16.5pt; font-weight: 700; font-family: &quot;jetbrains mono&quot;, monospace;"><pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;">VhostUserBackendReqHandler</pre></pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="1652" y="537" width="330" height="60" as="geometry"/>
</mxCell>
<mxCell id="15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.5;entryY=0;entryDx=0;entryDy=0;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" edge="1" parent="1" source="11" target="14">
@@ -63,7 +63,7 @@
<mxCell id="18" value="<pre style="font-family: &quot;jetbrains mono&quot;, monospace; font-size: 16.5pt;">VhostVdpaBackend</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#808080;strokeColor=#808080;" vertex="1" parent="1">
<mxGeometry x="270" y="387" width="220" height="50" as="geometry"/>
</mxCell>
- <mxCell id="19" value="<pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">Master</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="19" value="<pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">Frontend</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="820" y="387" width="220" height="50" as="geometry"/>
</mxCell>
<mxCell id="20" value="<pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">VhostSoftBackend</pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#808080;strokeColor=#808080;" vertex="1" parent="1">
@@ -126,7 +126,7 @@
<mxCell id="36" value="<pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">Message</pre></pre>" style="rounded=0;whiteSpace=wrap;html=1;fontStyle=1;labelBackgroundColor=none;fontColor=#FF00FF;strokeColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="1360" y="632" width="190" height="50" as="geometry"/>
</mxCell>
- <mxCell id="37" value="<pre style="font-size: 16.5pt ; font-weight: 700 ; font-family: &quot;jetbrains mono&quot; , monospace"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">VhostUserMaster</pre></pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;strokeColor=#FF33FF;fontColor=#FF00FF;" vertex="1" parent="1">
+ <mxCell id="37" value="<pre style="font-size: 16.5pt ; font-weight: 700 ; font-family: &quot;jetbrains mono&quot; , monospace"><pre style="font-family: &quot;jetbrains mono&quot; , monospace ; font-size: 16.5pt">VhostUserFrontend</pre></pre>" style="rounded=1;whiteSpace=wrap;html=1;labelBackgroundColor=none;strokeColor=#FF33FF;fontColor=#FF00FF;" vertex="1" parent="1">
<mxGeometry x="980" y="257" width="230" height="60" as="geometry"/>
</mxCell>
<mxCell id="38" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=0;exitDx=0;exitDy=0;labelBackgroundColor=none;entryX=0.5;entryY=1;entryDx=0;entryDy=0;strokeColor=#00994D;" edge="1" parent="1" source="19" target="16">
diff --git a/crates/vhost/src/lib.rs b/crates/vhost/src/lib.rs
index f8be913..3c9fea8 100644
--- a/crates/vhost/src/lib.rs
+++ b/crates/vhost/src/lib.rs
@@ -26,8 +26,8 @@
//! vhost implementation in the Linux kernel. It implements the control plane needed to establish
//! virtqueues sharing with a user space process on the same host. It uses communication over a
//! Unix domain socket to share file descriptors in the ancillary data of the message.
-//! The protocol defines 2 sides of the communication, master and slave. Master is the application
-//! that shares its virtqueues. Slave is the consumer of the virtqueues. Master and slave can be
+//! The protocol defines 2 sides of the communication, frontend and backend. Frontend is the application
+//! that shares its virtqueues. Backend is the consumer of the virtqueues. Frontend and backend can be
//! either a client (i.e. connecting) or server (listening) in the socket communication.
#![deny(missing_docs)]
@@ -51,6 +51,12 @@
#[cfg(feature = "vhost-vsock")]
pub mod vsock;
+/// Due to the way `xen` handles memory mappings we can not combine it with
+/// `postcopy` feature which relies on persistent memory mappings. Thus we
+/// disallow enabling both features at the same time.
+#[cfg(all(feature = "postcopy", feature = "xen"))]
+compile_error!("Both `postcopy` and `xen` features can not be enabled at the same time.");
+
/// Error codes for vhost operations
#[derive(Debug)]
pub enum Error {
diff --git a/crates/vhost/src/vhost_kern/mod.rs b/crates/vhost/src/vhost_kern/mod.rs
index 1fa5000..834b41f 100644
--- a/crates/vhost/src/vhost_kern/mod.rs
+++ b/crates/vhost/src/vhost_kern/mod.rs
@@ -21,8 +21,9 @@
use vmm_sys_util::ioctl::{ioctl, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref};
use super::{
- Error, Result, VhostBackend, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbMsgParser,
- VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, VHOST_MAX_MEMORY_REGIONS,
+ Error, Result, VhostAccess, VhostBackend, VhostIotlbBackend, VhostIotlbMsg,
+ VhostIotlbMsgParser, VhostIotlbType, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo,
+ VringConfigData, VHOST_MAX_MEMORY_REGIONS,
};
pub mod vhost_binding;
@@ -402,8 +403,8 @@
msg.iova = self.__bindgen_anon_1.iotlb.iova;
msg.size = self.__bindgen_anon_1.iotlb.size;
msg.userspace_addr = self.__bindgen_anon_1.iotlb.uaddr;
- msg.perm = mem::transmute(self.__bindgen_anon_1.iotlb.perm);
- msg.msg_type = mem::transmute(self.__bindgen_anon_1.iotlb.type_);
+ msg.perm = mem::transmute::<u8, VhostAccess>(self.__bindgen_anon_1.iotlb.perm);
+ msg.msg_type = mem::transmute::<u8, VhostIotlbType>(self.__bindgen_anon_1.iotlb.type_);
}
Ok(())
@@ -427,8 +428,8 @@
msg.iova = self.__bindgen_anon_1.iotlb.iova;
msg.size = self.__bindgen_anon_1.iotlb.size;
msg.userspace_addr = self.__bindgen_anon_1.iotlb.uaddr;
- msg.perm = mem::transmute(self.__bindgen_anon_1.iotlb.perm);
- msg.msg_type = mem::transmute(self.__bindgen_anon_1.iotlb.type_);
+ msg.perm = mem::transmute::<u8, VhostAccess>(self.__bindgen_anon_1.iotlb.perm);
+ msg.msg_type = mem::transmute::<u8, VhostIotlbType>(self.__bindgen_anon_1.iotlb.type_);
}
Ok(())
diff --git a/crates/vhost/src/vhost_kern/vdpa.rs b/crates/vhost/src/vhost_kern/vdpa.rs
index 65e0123..657efac 100644
--- a/crates/vhost/src/vhost_kern/vdpa.rs
+++ b/crates/vhost/src/vhost_kern/vdpa.rs
@@ -121,7 +121,10 @@
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
- config.as_mut_fam_struct().off = offset;
+ // SAFETY: We are not modifying the `len` field of the vhost-vdpa fam-struct
+ unsafe {
+ config.as_mut_fam_struct().off = offset;
+ }
// SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its
// return value checked.
@@ -142,7 +145,10 @@
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
- config.as_mut_fam_struct().off = offset;
+ // SAFETY: We are not modifying the `len` field of the vhost-vdpa fam-struct
+ unsafe {
+ config.as_mut_fam_struct().off = offset;
+ }
config.as_mut_slice().copy_from_slice(buffer);
let ret =
diff --git a/crates/vhost/src/vhost_kern/vhost_binding.rs b/crates/vhost/src/vhost_kern/vhost_binding.rs
index 5ebaa56..c8fcbdd 100644
--- a/crates/vhost/src/vhost_kern/vhost_binding.rs
+++ b/crates/vhost/src/vhost_kern/vhost_binding.rs
@@ -132,7 +132,7 @@
impl<T> ::std::clone::Clone for __IncompleteArrayField<T> {
#[inline]
fn clone(&self) -> Self {
- Self::new()
+ *self
}
}
diff --git a/crates/vhost/src/vhost_user/backend.rs b/crates/vhost/src/vhost_user/backend.rs
new file mode 100644
index 0000000..8463e1a
--- /dev/null
+++ b/crates/vhost/src/vhost_user/backend.rs
@@ -0,0 +1,86 @@
+// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+//! Traits and Structs for vhost-user backend.
+
+use std::sync::Arc;
+
+use super::connection::{Endpoint, Listener};
+use super::message::*;
+use super::{BackendReqHandler, Result, VhostUserBackendReqHandler};
+
+/// Vhost-user backend side connection listener.
+pub struct BackendListener<S: VhostUserBackendReqHandler> {
+ listener: Listener,
+ backend: Option<Arc<S>>,
+}
+
+/// Sets up a listener for incoming frontend connections, and handles construction
+/// of a Backend on success.
+impl<S: VhostUserBackendReqHandler> BackendListener<S> {
+ /// Create a unix domain socket for incoming frontend connections.
+ pub fn new(listener: Listener, backend: Arc<S>) -> Result<Self> {
+ Ok(BackendListener {
+ listener,
+ backend: Some(backend),
+ })
+ }
+
+ /// Accept an incoming connection from the frontend, returning Some(Backend) on
+ /// success, or None if the socket is nonblocking and no incoming connection
+ /// was detected
+ pub fn accept(&mut self) -> Result<Option<BackendReqHandler<S>>> {
+ if let Some(fd) = self.listener.accept()? {
+ return Ok(Some(BackendReqHandler::new(
+ Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(fd),
+ self.backend.take().unwrap(),
+ )));
+ }
+ Ok(None)
+ }
+
+ /// Change blocking status on the listener.
+ pub fn set_nonblocking(&self, block: bool) -> Result<()> {
+ self.listener.set_nonblocking(block)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Mutex;
+
+ use super::*;
+ use crate::vhost_user::dummy_backend::DummyBackendReqHandler;
+
+ #[test]
+ fn test_backend_listener_set_nonblocking() {
+ let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
+ let listener =
+ Listener::new("/tmp/vhost_user_lib_unit_test_backend_nonblocking", true).unwrap();
+ let backend_listener = BackendListener::new(listener, backend).unwrap();
+
+ backend_listener.set_nonblocking(true).unwrap();
+ backend_listener.set_nonblocking(false).unwrap();
+ backend_listener.set_nonblocking(false).unwrap();
+ backend_listener.set_nonblocking(true).unwrap();
+ backend_listener.set_nonblocking(true).unwrap();
+ }
+
+ #[cfg(feature = "vhost-user-frontend")]
+ #[test]
+ fn test_backend_listener_accept() {
+ use super::super::Frontend;
+
+ let path = "/tmp/vhost_user_lib_unit_test_backend_accept";
+ let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
+ let listener = Listener::new(path, true).unwrap();
+ let mut backend_listener = BackendListener::new(listener, backend).unwrap();
+
+ backend_listener.set_nonblocking(true).unwrap();
+ assert!(backend_listener.accept().unwrap().is_none());
+ assert!(backend_listener.accept().unwrap().is_none());
+
+ let _frontend = Frontend::connect(path, 1).unwrap();
+ let _backend = backend_listener.accept().unwrap().unwrap();
+ }
+}
diff --git a/crates/vhost/src/vhost_user/backend_req.rs b/crates/vhost/src/vhost_user/backend_req.rs
new file mode 100644
index 0000000..f95f812
--- /dev/null
+++ b/crates/vhost/src/vhost_user/backend_req.rs
@@ -0,0 +1,269 @@
+// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::io;
+use std::mem;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::net::UnixStream;
+use std::sync::{Arc, Mutex, MutexGuard};
+
+use super::connection::Endpoint;
+use super::message::*;
+use super::{Error, HandlerResult, Result, VhostUserFrontendReqHandler};
+
+use vm_memory::ByteValued;
+
+struct BackendInternal {
+ sock: Endpoint<VhostUserMsgHeader<BackendReq>>,
+
+ // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
+ reply_ack_negotiated: bool,
+
+ // Protocol feature VHOST_USER_PROTOCOL_F_SHARED_OBJECT has been negotiated.
+ shared_object_negotiated: bool,
+
+ // whether the endpoint has encountered any failure
+ error: Option<i32>,
+}
+
+impl BackendInternal {
+ fn check_state(&self) -> Result<u64> {
+ match self.error {
+ Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))),
+ None => Ok(0),
+ }
+ }
+
+ fn send_message<T: ByteValued>(
+ &mut self,
+ request: BackendReq,
+ body: &T,
+ fds: Option<&[RawFd]>,
+ ) -> Result<u64> {
+ self.check_state()?;
+
+ let len = mem::size_of::<T>();
+ let mut hdr = VhostUserMsgHeader::new(request, 0, len as u32);
+ if self.reply_ack_negotiated {
+ hdr.set_need_reply(true);
+ }
+ self.sock.send_message(&hdr, body, fds)?;
+
+ self.wait_for_ack(&hdr)
+ }
+
+ fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<BackendReq>) -> Result<u64> {
+ self.check_state()?;
+ if !self.reply_ack_negotiated {
+ return Ok(0);
+ }
+
+ let (reply, body, rfds) = self.sock.recv_body::<VhostUserU64>()?;
+ if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
+ return Err(Error::InvalidMessage);
+ }
+ if body.value != 0 {
+ return Err(Error::FrontendInternalError);
+ }
+
+ Ok(body.value)
+ }
+}
+
+/// Request proxy to send vhost-user backend requests to the frontend through the backend
+/// communication channel.
+///
+/// The [Backend] acts as a message proxy to forward vhost-user backend requests to the
+/// frontend through the vhost-user backend communication channel. The forwarded messages will be
+/// handled by the [FrontendReqHandler] server.
+///
+/// [Backend]: struct.Backend.html
+/// [FrontendReqHandler]: struct.FrontendReqHandler.html
+#[derive(Clone)]
+pub struct Backend {
+ // underlying Unix domain socket for communication
+ node: Arc<Mutex<BackendInternal>>,
+}
+
+impl Backend {
+ fn new(ep: Endpoint<VhostUserMsgHeader<BackendReq>>) -> Self {
+ Backend {
+ node: Arc::new(Mutex::new(BackendInternal {
+ sock: ep,
+ reply_ack_negotiated: false,
+ shared_object_negotiated: false,
+ error: None,
+ })),
+ }
+ }
+
+ fn node(&self) -> MutexGuard<BackendInternal> {
+ self.node.lock().unwrap()
+ }
+
+ fn send_message<T: ByteValued>(
+ &self,
+ request: BackendReq,
+ body: &T,
+ fds: Option<&[RawFd]>,
+ ) -> io::Result<u64> {
+ self.node()
+ .send_message(request, body, fds)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}", e)))
+ }
+
+ /// Create a new instance from a `UnixStream` object.
+ pub fn from_stream(sock: UnixStream) -> Self {
+ Self::new(Endpoint::<VhostUserMsgHeader<BackendReq>>::from_stream(
+ sock,
+ ))
+ }
+
+ /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
+ ///
+ /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
+ /// the "REPLY_ACK" flag will be set in the message header for every backend to frontend request
+ /// message.
+ pub fn set_reply_ack_flag(&self, enable: bool) {
+ self.node().reply_ack_negotiated = enable;
+ }
+
+ /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_SHARED_OBJECT` protocol feature.
+ ///
+ /// When the `VHOST_USER_PROTOCOL_F_SHARED_OBJECT` protocol feature has been negotiated,
+ /// the backend is allowed to send "SHARED_OBJECT_*" messages to the frontend.
+ pub fn set_shared_object_flag(&self, enable: bool) {
+ self.node().shared_object_negotiated = enable;
+ }
+
+ /// Mark endpoint as failed with specified error code.
+ pub fn set_failed(&self, error: i32) {
+ self.node().error = Some(error);
+ }
+}
+
+impl VhostUserFrontendReqHandler for Backend {
+ /// Forward vhost-user shared-object add request to the frontend.
+ fn shared_object_add(&self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ if !self.node().shared_object_negotiated {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "Shared Object feature not negotiated",
+ ));
+ }
+ self.send_message(BackendReq::SHARED_OBJECT_ADD, uuid, None)
+ }
+
+ /// Forward vhost-user shared-object remove request to the frontend.
+ fn shared_object_remove(&self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ if !self.node().shared_object_negotiated {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "Shared Object feature not negotiated",
+ ));
+ }
+ self.send_message(BackendReq::SHARED_OBJECT_REMOVE, uuid, None)
+ }
+
+ /// Forward vhost-user shared-object lookup request to the frontend.
+ fn shared_object_lookup(
+ &self,
+ uuid: &VhostUserSharedMsg,
+ fd: &dyn AsRawFd,
+ ) -> HandlerResult<u64> {
+ if !self.node().shared_object_negotiated {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ "Shared Object feature not negotiated",
+ ));
+ }
+ self.send_message(
+ BackendReq::SHARED_OBJECT_LOOKUP,
+ uuid,
+ Some(&[fd.as_raw_fd()]),
+ )
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::os::unix::io::AsRawFd;
+
+ use super::*;
+
+ #[test]
+ fn test_backend_req_set_failed() {
+ let (p1, _p2) = UnixStream::pair().unwrap();
+ let backend = Backend::from_stream(p1);
+
+ assert!(backend.node().error.is_none());
+ backend.set_failed(libc::EAGAIN);
+ assert_eq!(backend.node().error, Some(libc::EAGAIN));
+ }
+
+ #[test]
+ fn test_backend_req_send_failure() {
+ let (p1, _) = UnixStream::pair().unwrap();
+ let backend = Backend::from_stream(p1);
+
+ backend.set_failed(libc::ECONNRESET);
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap_err();
+ backend
+ .shared_object_remove(&VhostUserSharedMsg::default())
+ .unwrap_err();
+ backend.node().error = None;
+ }
+
+ #[test]
+ fn test_backend_req_recv_negative() {
+ let (p1, p2) = UnixStream::pair().unwrap();
+ let backend = Backend::from_stream(p1);
+ let mut frontend = Endpoint::<VhostUserMsgHeader<BackendReq>>::from_stream(p2);
+
+ let len = mem::size_of::<VhostUserSharedMsg>();
+ let mut hdr = VhostUserMsgHeader::new(
+ BackendReq::SHARED_OBJECT_ADD,
+ VhostUserHeaderFlag::REPLY.bits(),
+ len as u32,
+ );
+ let body = VhostUserU64::new(0);
+
+ frontend
+ .send_message(&hdr, &body, Some(&[frontend.as_raw_fd()]))
+ .unwrap();
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap_err();
+
+ backend.set_shared_object_flag(true);
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap();
+
+ backend.set_reply_ack_flag(true);
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap_err();
+
+ hdr.set_code(BackendReq::SHARED_OBJECT_REMOVE);
+ frontend.send_message(&hdr, &body, None).unwrap();
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap_err();
+ hdr.set_code(BackendReq::SHARED_OBJECT_ADD);
+
+ let body = VhostUserU64::new(1);
+ frontend.send_message(&hdr, &body, None).unwrap();
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap_err();
+
+ let body = VhostUserU64::new(0);
+ frontend.send_message(&hdr, &body, None).unwrap();
+ backend
+ .shared_object_add(&VhostUserSharedMsg::default())
+ .unwrap();
+ }
+}
diff --git a/crates/vhost/src/vhost_user/slave_req_handler.rs b/crates/vhost/src/vhost_user/backend_req_handler.rs
similarity index 66%
rename from crates/vhost/src/vhost_user/slave_req_handler.rs
rename to crates/vhost/src/vhost_user/backend_req_handler.rs
index e998339..635766d 100644
--- a/crates/vhost/src/vhost_user/slave_req_handler.rs
+++ b/crates/vhost/src/vhost_user/backend_req_handler.rs
@@ -10,35 +10,37 @@
use vm_memory::ByteValued;
+use super::backend_req::Backend;
use super::connection::Endpoint;
+use super::gpu_backend_req::GpuBackend;
use super::message::*;
-use super::slave_req::Slave;
use super::{take_single_file, Error, Result};
-/// Services provided to the master by the slave with interior mutability.
+/// Services provided to the frontend by the backend with interior mutability.
///
-/// The [VhostUserSlaveReqHandler] trait defines the services provided to the master by the slave.
-/// And the [VhostUserSlaveReqHandlerMut] trait is a helper mirroring [VhostUserSlaveReqHandler],
+/// The [VhostUserBackendReqHandler] trait defines the services provided to the frontend by the backend.
+/// And the [VhostUserBackendReqHandlerMut] trait is a helper mirroring [VhostUserBackendReqHandler],
/// but without interior mutability.
-/// The vhost-user specification defines a master communication channel, by which masters could
-/// request services from slaves. The [VhostUserSlaveReqHandler] trait defines services provided by
-/// slaves, and it's used both on the master side and slave side.
+/// The vhost-user specification defines a frontend communication channel, by which frontends could
+/// request services from backends. The [VhostUserBackendReqHandler] trait defines services provided by
+/// backends, and it's used both on the frontend side and backend side.
///
-/// - on the master side, a stub forwarder implementing [VhostUserSlaveReqHandler] will proxy
-/// service requests to slaves.
-/// - on the slave side, the [SlaveReqHandler] will forward service requests to a handler
-/// implementing [VhostUserSlaveReqHandler].
+/// - on the frontend side, a stub forwarder implementing [VhostUserBackendReqHandler] will proxy
+/// service requests to backends.
+/// - on the backend side, the [BackendReqHandler] will forward service requests to a handler
+/// implementing [VhostUserBackendReqHandler].
///
-/// The [VhostUserSlaveReqHandler] trait is design with interior mutability to improve performance
+/// The [VhostUserBackendReqHandler] trait is design with interior mutability to improve performance
/// for multi-threading.
///
-/// [VhostUserSlaveReqHandler]: trait.VhostUserSlaveReqHandler.html
-/// [VhostUserSlaveReqHandlerMut]: trait.VhostUserSlaveReqHandlerMut.html
-/// [SlaveReqHandler]: struct.SlaveReqHandler.html
+/// [VhostUserBackendReqHandler]: trait.VhostUserBackendReqHandler.html
+/// [VhostUserBackendReqHandlerMut]: trait.VhostUserBackendReqHandlerMut.html
+/// [BackendReqHandler]: struct.BackendReqHandler.html
#[allow(missing_docs)]
-pub trait VhostUserSlaveReqHandler {
+pub trait VhostUserBackendReqHandler {
fn set_owner(&self) -> Result<()>;
fn reset_owner(&self) -> Result<()>;
+ fn reset_device(&self) -> Result<()>;
fn get_features(&self) -> Result<u64>;
fn set_features(&self, features: u64) -> Result<()>;
fn set_mem_table(&self, ctx: &[VhostUserMemoryRegion], files: Vec<File>) -> Result<()>;
@@ -64,21 +66,37 @@
fn set_vring_enable(&self, index: u32, enable: bool) -> Result<()>;
fn get_config(&self, offset: u32, size: u32, flags: VhostUserConfigFlags) -> Result<Vec<u8>>;
fn set_config(&self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>;
- fn set_slave_req_fd(&self, _slave: Slave) {}
+ fn set_backend_req_fd(&self, _backend: Backend) {}
+ fn set_gpu_socket(&self, _gpu_backend: GpuBackend) -> Result<()>;
fn get_inflight_fd(&self, inflight: &VhostUserInflight) -> Result<(VhostUserInflight, File)>;
fn set_inflight_fd(&self, inflight: &VhostUserInflight, file: File) -> Result<()>;
fn get_max_mem_slots(&self) -> Result<u64>;
fn add_mem_region(&self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()>;
fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()>;
+ fn set_device_state_fd(
+ &self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ fd: File,
+ ) -> Result<Option<File>>;
+ fn check_device_state(&self) -> Result<()>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advice(&self) -> Result<File>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&self) -> Result<()>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&self) -> Result<()>;
+ fn set_log_base(&self, log: &VhostUserLog, file: File) -> Result<()>;
}
-/// Services provided to the master by the slave without interior mutability.
+/// Services provided to the frontend by the backend without interior mutability.
///
-/// This is a helper trait mirroring the [VhostUserSlaveReqHandler] trait.
+/// This is a helper trait mirroring the [VhostUserBackendReqHandler] trait.
#[allow(missing_docs)]
-pub trait VhostUserSlaveReqHandlerMut {
+pub trait VhostUserBackendReqHandlerMut {
fn set_owner(&mut self) -> Result<()>;
fn reset_owner(&mut self) -> Result<()>;
+ fn reset_device(&mut self) -> Result<()>;
fn get_features(&mut self) -> Result<u64>;
fn set_features(&mut self, features: u64) -> Result<()>;
fn set_mem_table(&mut self, ctx: &[VhostUserMemoryRegion], files: Vec<File>) -> Result<()>;
@@ -109,7 +127,8 @@
flags: VhostUserConfigFlags,
) -> Result<Vec<u8>>;
fn set_config(&mut self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>;
- fn set_slave_req_fd(&mut self, _slave: Slave) {}
+ fn set_backend_req_fd(&mut self, _backend: Backend) {}
+ fn set_gpu_socket(&mut self, _gpu_backend: GpuBackend) -> Result<()>;
fn get_inflight_fd(
&mut self,
inflight: &VhostUserInflight,
@@ -118,9 +137,23 @@
fn get_max_mem_slots(&mut self) -> Result<u64>;
fn add_mem_region(&mut self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()>;
fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> Result<()>;
+ fn set_device_state_fd(
+ &mut self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ fd: File,
+ ) -> Result<Option<File>>;
+ fn check_device_state(&mut self) -> Result<()>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advice(&mut self) -> Result<File>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&mut self) -> Result<()>;
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&mut self) -> Result<()>;
+ fn set_log_base(&mut self, log: &VhostUserLog, file: File) -> Result<()>;
}
-impl<T: VhostUserSlaveReqHandlerMut> VhostUserSlaveReqHandler for Mutex<T> {
+impl<T: VhostUserBackendReqHandlerMut> VhostUserBackendReqHandler for Mutex<T> {
fn set_owner(&self) -> Result<()> {
self.lock().unwrap().set_owner()
}
@@ -129,6 +162,10 @@
self.lock().unwrap().reset_owner()
}
+ fn reset_device(&self) -> Result<()> {
+ self.lock().unwrap().reset_device()
+ }
+
fn get_features(&self) -> Result<u64> {
self.lock().unwrap().get_features()
}
@@ -203,8 +240,12 @@
self.lock().unwrap().set_config(offset, buf, flags)
}
- fn set_slave_req_fd(&self, slave: Slave) {
- self.lock().unwrap().set_slave_req_fd(slave)
+ fn set_backend_req_fd(&self, backend: Backend) {
+ self.lock().unwrap().set_backend_req_fd(backend)
+ }
+
+ fn set_gpu_socket(&self, gpu_backend: GpuBackend) -> Result<()> {
+ self.lock().unwrap().set_gpu_socket(gpu_backend)
}
fn get_inflight_fd(&self, inflight: &VhostUserInflight) -> Result<(VhostUserInflight, File)> {
@@ -226,22 +267,55 @@
fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()> {
self.lock().unwrap().remove_mem_region(region)
}
+
+ fn set_device_state_fd(
+ &self,
+ direction: VhostTransferStateDirection,
+ phase: VhostTransferStatePhase,
+ fd: File,
+ ) -> Result<Option<File>> {
+ self.lock()
+ .unwrap()
+ .set_device_state_fd(direction, phase, fd)
+ }
+
+ fn check_device_state(&self) -> Result<()> {
+ self.lock().unwrap().check_device_state()
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advice(&self) -> Result<File> {
+ self.lock().unwrap().postcopy_advice()
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&self) -> Result<()> {
+ self.lock().unwrap().postcopy_listen()
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&self) -> Result<()> {
+ self.lock().unwrap().postcopy_end()
+ }
+ fn set_log_base(&self, log: &VhostUserLog, file: File) -> Result<()> {
+ self.lock().unwrap().set_log_base(log, file)
+ }
}
-/// Server to handle service requests from masters from the master communication channel.
+/// Server to handle service requests from frontends from the frontend communication channel.
///
-/// The [SlaveReqHandler] acts as a server on the slave side, to handle service requests from
-/// masters on the master communication channel. It's actually a proxy invoking the registered
-/// handler implementing [VhostUserSlaveReqHandler] to do the real work.
+/// The [BackendReqHandler] acts as a server on the backend side, to handle service requests from
+/// frontends on the frontend communication channel. It's actually a proxy invoking the registered
+/// handler implementing [VhostUserBackendReqHandler] to do the real work.
///
-/// The lifetime of the SlaveReqHandler object should be the same as the underline Unix Domain
+/// The lifetime of the BackendReqHandler object should be the same as the underline Unix Domain
/// Socket, so it gets simpler to recover from disconnect.
///
-/// [VhostUserSlaveReqHandler]: trait.VhostUserSlaveReqHandler.html
-/// [SlaveReqHandler]: struct.SlaveReqHandler.html
-pub struct SlaveReqHandler<S: VhostUserSlaveReqHandler> {
+/// [VhostUserBackendReqHandler]: trait.VhostUserBackendReqHandler.html
+/// [BackendReqHandler]: struct.BackendReqHandler.html
+pub struct BackendReqHandler<S: VhostUserBackendReqHandler> {
// underlying Unix domain socket for communication
- main_sock: Endpoint<MasterReq>,
+ main_sock: Endpoint<VhostUserMsgHeader<FrontendReq>>,
// the vhost-user backend device object
backend: Arc<S>,
@@ -256,10 +330,13 @@
error: Option<i32>,
}
-impl<S: VhostUserSlaveReqHandler> SlaveReqHandler<S> {
- /// Create a vhost-user slave endpoint.
- pub(super) fn new(main_sock: Endpoint<MasterReq>, backend: Arc<S>) -> Self {
- SlaveReqHandler {
+impl<S: VhostUserBackendReqHandler> BackendReqHandler<S> {
+ /// Create a vhost-user backend endpoint.
+ pub(super) fn new(
+ main_sock: Endpoint<VhostUserMsgHeader<FrontendReq>>,
+ backend: Arc<S>,
+ ) -> Self {
+ BackendReqHandler {
main_sock,
backend,
virtio_features: 0,
@@ -287,18 +364,21 @@
}
}
- /// Create a vhost-user slave endpoint from a connected socket.
+ /// Create a vhost-user backend endpoint from a connected socket.
pub fn from_stream(socket: UnixStream, backend: Arc<S>) -> Self {
Self::new(Endpoint::from_stream(socket), backend)
}
- /// Create a new vhost-user slave endpoint.
+ /// Create a new vhost-user backend endpoint.
///
/// # Arguments
/// * - `path` - path of Unix domain socket listener to connect to
- /// * - `backend` - handler for requests from the master to the slave
+ /// * - `backend` - handler for requests from the frontend to the backend
pub fn connect(path: &str, backend: Arc<S>) -> Result<Self> {
- Ok(Self::new(Endpoint::<MasterReq>::connect(path)?, backend))
+ Ok(Self::new(
+ Endpoint::<VhostUserMsgHeader<FrontendReq>>::connect(path)?,
+ backend,
+ ))
}
/// Mark endpoint as failed with specified error code.
@@ -306,9 +386,9 @@
self.error = Some(error);
}
- /// Main entrance to server slave request from the slave communication channel.
+ /// Main entrance to server backend request from the backend communication channel.
///
- /// Receive and handle one incoming request message from the master. The caller needs to:
+ /// Receive and handle one incoming request message from the frontend. The caller needs to:
/// - serialize calls to this function
/// - decide what to do when error happens
/// - optional recover from failure
@@ -340,17 +420,23 @@
};
match hdr.get_code() {
- Ok(MasterReq::SET_OWNER) => {
+ Ok(FrontendReq::SET_OWNER) => {
self.check_request_size(&hdr, size, 0)?;
let res = self.backend.set_owner();
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::RESET_OWNER) => {
+ Ok(FrontendReq::RESET_OWNER) => {
self.check_request_size(&hdr, size, 0)?;
let res = self.backend.reset_owner();
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_FEATURES) => {
+ Ok(FrontendReq::RESET_DEVICE) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::RESET_DEVICE)?;
+ self.check_request_size(&hdr, size, 0)?;
+ let res = self.backend.reset_device();
+ self.send_ack_message(&hdr, res)?;
+ }
+ Ok(FrontendReq::GET_FEATURES) => {
self.check_request_size(&hdr, size, 0)?;
let features = self.backend.get_features()?;
let msg = VhostUserU64::new(features);
@@ -358,23 +444,23 @@
self.virtio_features = features;
self.update_reply_ack_flag();
}
- Ok(MasterReq::SET_FEATURES) => {
+ Ok(FrontendReq::SET_FEATURES) => {
let msg = self.extract_request_body::<VhostUserU64>(&hdr, size, &buf)?;
let res = self.backend.set_features(msg.value);
self.acked_virtio_features = msg.value;
self.update_reply_ack_flag();
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_MEM_TABLE) => {
+ Ok(FrontendReq::SET_MEM_TABLE) => {
let res = self.set_mem_table(&hdr, size, &buf, files);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_VRING_NUM) => {
+ Ok(FrontendReq::SET_VRING_NUM) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let res = self.backend.set_vring_num(msg.index, msg.num);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_VRING_ADDR) => {
+ Ok(FrontendReq::SET_VRING_ADDR) => {
let msg = self.extract_request_body::<VhostUserVringAddr>(&hdr, size, &buf)?;
let flags = match VhostUserVringAddrFlags::from_bits(msg.flags) {
Some(val) => val,
@@ -390,35 +476,35 @@
);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_VRING_BASE) => {
+ Ok(FrontendReq::SET_VRING_BASE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let res = self.backend.set_vring_base(msg.index, msg.num);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_VRING_BASE) => {
+ Ok(FrontendReq::GET_VRING_BASE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
let reply = self.backend.get_vring_base(msg.index)?;
self.send_reply_message(&hdr, &reply)?;
}
- Ok(MasterReq::SET_VRING_CALL) => {
+ Ok(FrontendReq::SET_VRING_CALL) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_call(index, file);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_VRING_KICK) => {
+ Ok(FrontendReq::SET_VRING_KICK) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_kick(index, file);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_VRING_ERR) => {
+ Ok(FrontendReq::SET_VRING_ERR) => {
self.check_request_size(&hdr, size, mem::size_of::<VhostUserU64>())?;
let (index, file) = self.handle_vring_fd_request(&buf, files)?;
let res = self.backend.set_vring_err(index, file);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_PROTOCOL_FEATURES) => {
+ Ok(FrontendReq::GET_PROTOCOL_FEATURES) => {
self.check_request_size(&hdr, size, 0)?;
let features = self.backend.get_protocol_features()?;
@@ -431,7 +517,7 @@
self.protocol_features = features;
self.update_reply_ack_flag();
}
- Ok(MasterReq::SET_PROTOCOL_FEATURES) => {
+ Ok(FrontendReq::SET_PROTOCOL_FEATURES) => {
let msg = self.extract_request_body::<VhostUserU64>(&hdr, size, &buf)?;
let res = self.backend.set_protocol_features(msg.value);
self.acked_protocol_features = msg.value;
@@ -441,14 +527,14 @@
#[cfg(feature = "xen")]
self.check_proto_feature(VhostUserProtocolFeatures::XEN_MMAP)?;
}
- Ok(MasterReq::GET_QUEUE_NUM) => {
+ Ok(FrontendReq::GET_QUEUE_NUM) => {
self.check_proto_feature(VhostUserProtocolFeatures::MQ)?;
self.check_request_size(&hdr, size, 0)?;
let num = self.backend.get_queue_num()?;
let msg = VhostUserU64::new(num);
self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::SET_VRING_ENABLE) => {
+ Ok(FrontendReq::SET_VRING_ENABLE) => {
let msg = self.extract_request_body::<VhostUserVringState>(&hdr, size, &buf)?;
self.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
let enable = match msg.num {
@@ -460,24 +546,24 @@
let res = self.backend.set_vring_enable(msg.index, enable);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_CONFIG) => {
+ Ok(FrontendReq::GET_CONFIG) => {
self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
self.get_config(&hdr, &buf)?;
}
- Ok(MasterReq::SET_CONFIG) => {
+ Ok(FrontendReq::SET_CONFIG) => {
self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
let res = self.set_config(size, &buf);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::SET_SLAVE_REQ_FD) => {
- self.check_proto_feature(VhostUserProtocolFeatures::SLAVE_REQ)?;
+ Ok(FrontendReq::SET_BACKEND_REQ_FD) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::BACKEND_REQ)?;
self.check_request_size(&hdr, size, hdr.get_size() as usize)?;
- let res = self.set_slave_req_fd(files);
+ let res = self.set_backend_req_fd(files);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_INFLIGHT_FD) => {
+ Ok(FrontendReq::GET_INFLIGHT_FD) => {
self.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?;
let msg = self.extract_request_body::<VhostUserInflight>(&hdr, size, &buf)?;
@@ -486,21 +572,25 @@
self.main_sock
.send_message(&reply_hdr, &inflight, Some(&[file.as_raw_fd()]))?;
}
- Ok(MasterReq::SET_INFLIGHT_FD) => {
+ Ok(FrontendReq::SET_INFLIGHT_FD) => {
self.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?;
let file = take_single_file(files).ok_or(Error::IncorrectFds)?;
let msg = self.extract_request_body::<VhostUserInflight>(&hdr, size, &buf)?;
let res = self.backend.set_inflight_fd(&msg, file);
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::GET_MAX_MEM_SLOTS) => {
+ Ok(FrontendReq::GPU_SET_SOCKET) => {
+ let res = self.set_gpu_socket(files);
+ self.send_ack_message(&hdr, res)?;
+ }
+ Ok(FrontendReq::GET_MAX_MEM_SLOTS) => {
self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
self.check_request_size(&hdr, size, 0)?;
let num = self.backend.get_max_mem_slots()?;
let msg = VhostUserU64::new(num);
self.send_reply_message(&hdr, &msg)?;
}
- Ok(MasterReq::ADD_MEM_REG) => {
+ Ok(FrontendReq::ADD_MEM_REG) => {
self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
let mut files = files.ok_or(Error::InvalidParam)?;
if files.len() != 1 {
@@ -511,7 +601,7 @@
let res = self.backend.add_mem_region(&msg, files.swap_remove(0));
self.send_ack_message(&hdr, res)?;
}
- Ok(MasterReq::REM_MEM_REG) => {
+ Ok(FrontendReq::REM_MEM_REG) => {
self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
let msg =
@@ -519,6 +609,92 @@
let res = self.backend.remove_mem_region(&msg);
self.send_ack_message(&hdr, res)?;
}
+ Ok(FrontendReq::SET_DEVICE_STATE_FD) => {
+ let file = take_single_file(files).ok_or(Error::IncorrectFds)?;
+ let msg =
+ self.extract_request_body::<VhostUserTransferDeviceState>(&hdr, size, &buf)?;
+ let reply_hdr = self.new_reply_header::<VhostUserU64>(&hdr, 0)?;
+
+ let direction: VhostTransferStateDirection = msg
+ .direction
+ .try_into()
+ .map_err(|_| Error::InvalidMessage)?;
+ let phase: VhostTransferStatePhase =
+ msg.phase.try_into().map_err(|_| Error::InvalidMessage)?;
+ let res = self.backend.set_device_state_fd(direction, phase, file);
+
+ // The value returned is both an indication for success, and whether a file
+ // descriptor for a back-end-provided channel is returned: Bits 0–7 are 0 on
+ // success, and non-zero on error. Bit 8 is the invalid FD flag; this flag is
+ // set when there is no file descriptor returned.
+ match res {
+ Ok(None) => {
+ let msg = VhostUserU64::new(0x100); // set invalid FD flag
+ self.main_sock.send_message(&reply_hdr, &msg, None)?;
+ }
+ Ok(Some(file)) => {
+ let msg = VhostUserU64::new(0);
+ self.main_sock
+ .send_message(&reply_hdr, &msg, Some(&[file.as_raw_fd()]))?;
+ }
+ Err(_) => {
+ let msg = VhostUserU64::new(0x101);
+ self.main_sock.send_message(&reply_hdr, &msg, None)?;
+ }
+ }
+ }
+ Ok(FrontendReq::CHECK_DEVICE_STATE) => {
+ let res = self.backend.check_device_state();
+
+ // We must return a value in the payload to indicate success or error:
+ // 0 is success, any non-zero value is an error.
+ let msg = match res {
+ Ok(_) => VhostUserU64::new(0),
+ Err(_) => VhostUserU64::new(1),
+ };
+ self.send_reply_message(&hdr, &msg)?;
+ }
+ #[cfg(feature = "postcopy")]
+ Ok(FrontendReq::POSTCOPY_ADVISE) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+
+ let res = self.backend.postcopy_advice();
+ match res {
+ Ok(uffd_file) => {
+ let hdr = self.new_reply_header::<VhostUserEmpty>(&hdr, 0)?;
+ self.main_sock.send_message(
+ &hdr,
+ &VhostUserEmpty,
+ Some(&[uffd_file.as_raw_fd()]),
+ )?
+ }
+ Err(_) => self.main_sock.send_message(&hdr, &VhostUserEmpty, None)?,
+ }
+ }
+ #[cfg(feature = "postcopy")]
+ Ok(FrontendReq::POSTCOPY_LISTEN) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+ let res = self.backend.postcopy_listen();
+ self.send_ack_message(&hdr, res)?;
+ }
+ #[cfg(feature = "postcopy")]
+ Ok(FrontendReq::POSTCOPY_END) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+ let res = self.backend.postcopy_end();
+ self.send_ack_message(&hdr, res)?;
+ }
+ // Sets logging shared memory space.
+ // When the back-end has `VHOST_USER_PROTOCOL_F_LOG_SHMFD` protocol feature, the log
+ // memory `fd` is provided in the ancillary data of `VHOST_USER_SET_LOG_BASE` message,
+ // the size and offset of shared memory area provided in the message.
+ // See https://qemu-project.gitlab.io/qemu/interop/vhost-user.html#migration.
+ Ok(FrontendReq::SET_LOG_BASE) => {
+ self.check_proto_feature(VhostUserProtocolFeatures::LOG_SHMFD)?;
+ let file = take_single_file(files).ok_or(Error::IncorrectFds)?;
+ let msg = self.extract_request_body::<VhostUserLog>(&hdr, size, &buf)?;
+ self.backend.set_log_base(&msg, file)?;
+ self.send_reply_message(&hdr, &msg)?;
+ }
_ => {
return Err(Error::InvalidMessage);
}
@@ -528,7 +704,7 @@
fn set_mem_table(
&mut self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
buf: &[u8],
files: Option<Vec<File>>,
@@ -575,7 +751,7 @@
self.backend.set_mem_table(regions, files)
}
- fn get_config(&mut self, hdr: &VhostUserMsgHeader<MasterReq>, buf: &[u8]) -> Result<()> {
+ fn get_config(&mut self, hdr: &VhostUserMsgHeader<FrontendReq>, buf: &[u8]) -> Result<()> {
let payload_offset = mem::size_of::<VhostUserConfig>();
if buf.len() > MAX_MSG_SIZE || buf.len() < payload_offset {
return Err(Error::InvalidMessage);
@@ -594,9 +770,9 @@
};
let res = self.backend.get_config(msg.offset, msg.size, flags);
- // vhost-user slave's payload size MUST match master's request
+ // vhost-user backend's payload size MUST match frontend's request
// on success, uses zero length of payload to indicate an error
- // to vhost-user master.
+ // to vhost-user frontend.
match res {
Ok(ref buf) if buf.len() == msg.size as usize => {
let reply = VhostUserConfig::new(msg.offset, buf.len() as u32, flags);
@@ -632,17 +808,27 @@
.set_config(msg.offset, &buf[mem::size_of::<VhostUserConfig>()..], flags)
}
- fn set_slave_req_fd(&mut self, files: Option<Vec<File>>) -> Result<()> {
+ fn set_backend_req_fd(&mut self, files: Option<Vec<File>>) -> Result<()> {
let file = take_single_file(files).ok_or(Error::InvalidMessage)?;
// SAFETY: Safe because we have ownership of the files that were
// checked when received. We have to trust that they are Unix sockets
// since we have no way to check this. If not, it will fail later.
let sock = unsafe { UnixStream::from_raw_fd(file.into_raw_fd()) };
- let slave = Slave::from_stream(sock);
- self.backend.set_slave_req_fd(slave);
+ let backend = Backend::from_stream(sock);
+ self.backend.set_backend_req_fd(backend);
Ok(())
}
+ fn set_gpu_socket(&mut self, files: Option<Vec<File>>) -> Result<()> {
+ let file = take_single_file(files).ok_or(Error::InvalidMessage)?;
+ // SAFETY: Safe because we have ownership of the files that were
+ // checked when received. We have to trust that they are Unix sockets
+ // since we have no way to check this. If not, it will fail later.
+ let sock = unsafe { UnixStream::from_raw_fd(file.into_raw_fd()) };
+ let gpu_backend = GpuBackend::from_stream(sock);
+ self.backend.set_gpu_socket(gpu_backend)
+ }
+
fn handle_vring_fd_request(
&mut self,
buf: &[u8],
@@ -682,7 +868,7 @@
fn check_request_size(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
expected: usize,
) -> Result<()> {
@@ -698,20 +884,22 @@
fn check_attached_files(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
files: &Option<Vec<File>>,
) -> Result<()> {
match hdr.get_code() {
Ok(
- MasterReq::SET_MEM_TABLE
- | MasterReq::SET_VRING_CALL
- | MasterReq::SET_VRING_KICK
- | MasterReq::SET_VRING_ERR
- | MasterReq::SET_LOG_BASE
- | MasterReq::SET_LOG_FD
- | MasterReq::SET_SLAVE_REQ_FD
- | MasterReq::SET_INFLIGHT_FD
- | MasterReq::ADD_MEM_REG,
+ FrontendReq::SET_MEM_TABLE
+ | FrontendReq::SET_VRING_CALL
+ | FrontendReq::SET_VRING_KICK
+ | FrontendReq::SET_VRING_ERR
+ | FrontendReq::SET_LOG_BASE
+ | FrontendReq::SET_LOG_FD
+ | FrontendReq::SET_BACKEND_REQ_FD
+ | FrontendReq::SET_INFLIGHT_FD
+ | FrontendReq::ADD_MEM_REG
+ | FrontendReq::SET_DEVICE_STATE_FD
+ | FrontendReq::GPU_SET_SOCKET,
) => Ok(()),
_ if files.is_some() => Err(Error::InvalidMessage),
_ => Ok(()),
@@ -720,7 +908,7 @@
fn extract_request_body<T: Sized + VhostUserMsgValidator>(
&self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
size: usize,
buf: &[u8],
) -> Result<T> {
@@ -736,21 +924,17 @@
fn update_reply_ack_flag(&mut self) {
let vflag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
let pflag = VhostUserProtocolFeatures::REPLY_ACK;
- if (self.virtio_features & vflag) != 0
+
+ self.reply_ack_enabled = (self.virtio_features & vflag) != 0
&& self.protocol_features.contains(pflag)
- && (self.acked_protocol_features & pflag.bits()) != 0
- {
- self.reply_ack_enabled = true;
- } else {
- self.reply_ack_enabled = false;
- }
+ && (self.acked_protocol_features & pflag.bits()) != 0;
}
fn new_reply_header<T: Sized>(
&self,
- req: &VhostUserMsgHeader<MasterReq>,
+ req: &VhostUserMsgHeader<FrontendReq>,
payload_size: usize,
- ) -> Result<VhostUserMsgHeader<MasterReq>> {
+ ) -> Result<VhostUserMsgHeader<FrontendReq>> {
if mem::size_of::<T>() > MAX_MSG_SIZE
|| payload_size > MAX_MSG_SIZE
|| mem::size_of::<T>() + payload_size > MAX_MSG_SIZE
@@ -767,7 +951,7 @@
fn send_ack_message(
&mut self,
- req: &VhostUserMsgHeader<MasterReq>,
+ req: &VhostUserMsgHeader<FrontendReq>,
res: Result<()>,
) -> Result<()> {
if self.reply_ack_enabled && req.is_need_reply() {
@@ -784,7 +968,7 @@
fn send_reply_message<T: ByteValued>(
&mut self,
- req: &VhostUserMsgHeader<MasterReq>,
+ req: &VhostUserMsgHeader<FrontendReq>,
msg: &T,
) -> Result<()> {
let hdr = self.new_reply_header::<T>(req, 0)?;
@@ -794,7 +978,7 @@
fn send_reply_with_payload<T: ByteValued>(
&mut self,
- req: &VhostUserMsgHeader<MasterReq>,
+ req: &VhostUserMsgHeader<FrontendReq>,
msg: &T,
payload: &[u8],
) -> Result<()> {
@@ -805,7 +989,7 @@
}
}
-impl<S: VhostUserSlaveReqHandler> AsRawFd for SlaveReqHandler<S> {
+impl<S: VhostUserBackendReqHandler> AsRawFd for BackendReqHandler<S> {
fn as_raw_fd(&self) -> RawFd {
self.main_sock.as_raw_fd()
}
@@ -816,14 +1000,14 @@
use std::os::unix::io::AsRawFd;
use super::*;
- use crate::vhost_user::dummy_slave::DummySlaveReqHandler;
+ use crate::vhost_user::dummy_backend::DummyBackendReqHandler;
#[test]
- fn test_slave_req_handler_new() {
+ fn test_backend_req_handler_new() {
let (p1, _p2) = UnixStream::pair().unwrap();
- let endpoint = Endpoint::<MasterReq>::from_stream(p1);
- let backend = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let mut handler = SlaveReqHandler::new(endpoint, backend);
+ let endpoint = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(p1);
+ let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
+ let mut handler = BackendReqHandler::new(endpoint, backend);
handler.check_state().unwrap();
handler.set_failed(libc::EAGAIN);
diff --git a/crates/vhost/src/vhost_user/connection.rs b/crates/vhost/src/vhost_user/connection.rs
index 4a62e12..4fca9c5 100644
--- a/crates/vhost/src/vhost_user/connection.rs
+++ b/crates/vhost/src/vhost_user/connection.rs
@@ -102,12 +102,12 @@
}
/// Unix domain socket endpoint for vhost-user connection.
-pub(super) struct Endpoint<R: Req> {
+pub(super) struct Endpoint<H: MsgHeader> {
sock: UnixStream,
- _r: PhantomData<R>,
+ _h: PhantomData<H>,
}
-impl<R: Req> Endpoint<R> {
+impl<H: MsgHeader> Endpoint<H> {
/// Create a new stream by connecting to server at `str`.
///
/// # Return:
@@ -122,7 +122,7 @@
pub fn from_stream(sock: UnixStream) -> Self {
Endpoint {
sock,
- _r: PhantomData,
+ _h: PhantomData,
}
}
@@ -135,10 +135,7 @@
/// * - SocketBroken: the underline socket is broken.
/// * - SocketError: other socket related errors.
pub fn send_iovec(&mut self, iovs: &[&[u8]], fds: Option<&[RawFd]>) -> Result<usize> {
- let rfds = match fds {
- Some(rfds) => rfds,
- _ => &[],
- };
+ let rfds = fds.unwrap_or_default();
self.sock.send_with_fds(iovs, rfds).map_err(Into::into)
}
@@ -196,20 +193,16 @@
/// * - SocketBroken: the underline socket is broken.
/// * - SocketError: other socket related errors.
/// * - PartialMessage: received a partial message.
- pub fn send_header(
- &mut self,
- hdr: &VhostUserMsgHeader<R>,
- fds: Option<&[RawFd]>,
- ) -> Result<()> {
+ pub fn send_header(&mut self, hdr: &H, fds: Option<&[RawFd]>) -> Result<()> {
// SAFETY: Safe because there can't be other mutable referance to hdr.
let iovs = unsafe {
[slice::from_raw_parts(
- hdr as *const VhostUserMsgHeader<R> as *const u8,
- mem::size_of::<VhostUserMsgHeader<R>>(),
+ hdr as *const H as *const u8,
+ mem::size_of::<H>(),
)]
};
let bytes = self.send_iovec_all(&iovs[..], fds)?;
- if bytes != mem::size_of::<VhostUserMsgHeader<R>>() {
+ if bytes != mem::size_of::<H>() {
return Err(Error::PartialMessage);
}
Ok(())
@@ -226,15 +219,15 @@
/// * - PartialMessage: received a partial message.
pub fn send_message<T: ByteValued>(
&mut self,
- hdr: &VhostUserMsgHeader<R>,
+ hdr: &H,
body: &T,
fds: Option<&[RawFd]>,
) -> Result<()> {
- if mem::size_of::<T>() > MAX_MSG_SIZE {
+ if mem::size_of::<T>() > H::MAX_MSG_SIZE {
return Err(Error::OversizedMsg);
}
let bytes = self.send_iovec_all(&[hdr.as_slice(), body.as_slice()], fds)?;
- if bytes != mem::size_of::<VhostUserMsgHeader<R>>() + mem::size_of::<T>() {
+ if bytes != mem::size_of::<H>() + mem::size_of::<T>() {
return Err(Error::PartialMessage);
}
Ok(())
@@ -253,16 +246,16 @@
/// * - IncorrectFds: wrong number of attached fds.
pub fn send_message_with_payload<T: ByteValued>(
&mut self,
- hdr: &VhostUserMsgHeader<R>,
+ hdr: &H,
body: &T,
payload: &[u8],
fds: Option<&[RawFd]>,
) -> Result<()> {
let len = payload.len();
- if mem::size_of::<T>() > MAX_MSG_SIZE {
+ if mem::size_of::<T>() > H::MAX_MSG_SIZE {
return Err(Error::OversizedMsg);
}
- if len > MAX_MSG_SIZE - mem::size_of::<T>() {
+ if len > H::MAX_MSG_SIZE - mem::size_of::<T>() {
return Err(Error::OversizedMsg);
}
if let Some(fd_arr) = fds {
@@ -271,7 +264,7 @@
}
}
- let total = mem::size_of::<VhostUserMsgHeader<R>>() + mem::size_of::<T>() + len;
+ let total = mem::size_of::<H>() + mem::size_of::<T>() + len;
let len = self.send_iovec_all(&[hdr.as_slice(), body.as_slice(), payload], fds)?;
if len != total {
return Err(Error::PartialMessage);
@@ -306,6 +299,7 @@
/// attached file descriptors, the receiver must obey following rules:
/// 1) file descriptors are attached to a message.
/// 2) message(packet) boundaries must be respected on the receive side.
+ ///
/// In other words, recvmsg() operations must not cross the packet boundary, otherwise the
/// attached file descriptors will get lost.
/// Note that this function wraps received file descriptors as `File`.
@@ -354,6 +348,7 @@
/// attached file descriptors, the receiver must obey following rules:
/// 1) file descriptors are attached to a message.
/// 2) message(packet) boundaries must be respected on the receive side.
+ ///
/// In other words, recvmsg() operations must not cross the packet boundary, otherwise the
/// attached file descriptors will get lost.
/// Note that this function wraps received file descriptors as `File`.
@@ -445,18 +440,18 @@
/// * - SocketError: other socket related errors.
/// * - PartialMessage: received a partial message.
/// * - InvalidMessage: received a invalid message.
- pub fn recv_header(&mut self) -> Result<(VhostUserMsgHeader<R>, Option<Vec<File>>)> {
- let mut hdr = VhostUserMsgHeader::default();
+ pub fn recv_header(&mut self) -> Result<(H, Option<Vec<File>>)> {
+ let mut hdr = H::default();
let mut iovs = [iovec {
- iov_base: (&mut hdr as *mut VhostUserMsgHeader<R>) as *mut c_void,
- iov_len: mem::size_of::<VhostUserMsgHeader<R>>(),
+ iov_base: (&mut hdr as *mut H) as *mut c_void,
+ iov_len: mem::size_of::<H>(),
}];
// SAFETY: Safe because we own hdr and it's ByteValued.
let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? };
if bytes == 0 {
return Err(Error::Disconnected);
- } else if bytes != mem::size_of::<VhostUserMsgHeader<R>>() {
+ } else if bytes != mem::size_of::<H>() {
return Err(Error::PartialMessage);
} else if !hdr.is_valid() {
return Err(Error::InvalidMessage);
@@ -476,15 +471,15 @@
/// * - SocketError: other socket related errors.
/// * - PartialMessage: received a partial message.
/// * - InvalidMessage: received a invalid message.
- pub fn recv_body<T: ByteValued + Sized + VhostUserMsgValidator>(
+ pub fn recv_body<T: ByteValued + Sized + VhostUserMsgValidator + Default>(
&mut self,
- ) -> Result<(VhostUserMsgHeader<R>, T, Option<Vec<File>>)> {
- let mut hdr = VhostUserMsgHeader::default();
+ ) -> Result<(H, T, Option<Vec<File>>)> {
+ let mut hdr = H::default();
let mut body: T = Default::default();
let mut iovs = [
iovec {
- iov_base: (&mut hdr as *mut VhostUserMsgHeader<R>) as *mut c_void,
- iov_len: mem::size_of::<VhostUserMsgHeader<R>>(),
+ iov_base: (&mut hdr as *mut H) as *mut c_void,
+ iov_len: mem::size_of::<H>(),
},
iovec {
iov_base: (&mut body as *mut T) as *mut c_void,
@@ -494,7 +489,7 @@
// SAFETY: Safe because we own hdr and body and they're ByteValued.
let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? };
- let total = mem::size_of::<VhostUserMsgHeader<R>>() + mem::size_of::<T>();
+ let total = mem::size_of::<H>() + mem::size_of::<T>();
if bytes != total {
return Err(Error::PartialMessage);
} else if !hdr.is_valid() || !body.is_valid() {
@@ -518,15 +513,12 @@
/// * - SocketError: other socket related errors.
/// * - PartialMessage: received a partial message.
/// * - InvalidMessage: received a invalid message.
- pub fn recv_body_into_buf(
- &mut self,
- buf: &mut [u8],
- ) -> Result<(VhostUserMsgHeader<R>, usize, Option<Vec<File>>)> {
- let mut hdr = VhostUserMsgHeader::default();
+ pub fn recv_body_into_buf(&mut self, buf: &mut [u8]) -> Result<(H, usize, Option<Vec<File>>)> {
+ let mut hdr = H::default();
let mut iovs = [
iovec {
- iov_base: (&mut hdr as *mut VhostUserMsgHeader<R>) as *mut c_void,
- iov_len: mem::size_of::<VhostUserMsgHeader<R>>(),
+ iov_base: (&mut hdr as *mut H) as *mut c_void,
+ iov_len: mem::size_of::<H>(),
},
iovec {
iov_base: buf.as_mut_ptr() as *mut c_void,
@@ -537,13 +529,13 @@
// and it's safe to fill a byte slice with arbitrary data.
let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? };
- if bytes < mem::size_of::<VhostUserMsgHeader<R>>() {
+ if bytes < mem::size_of::<H>() {
return Err(Error::PartialMessage);
} else if !hdr.is_valid() {
return Err(Error::InvalidMessage);
}
- Ok((hdr, bytes - mem::size_of::<VhostUserMsgHeader<R>>(), files))
+ Ok((hdr, bytes - mem::size_of::<H>(), files))
}
/// Receive a message with optional payload and attached file descriptors.
@@ -557,17 +549,17 @@
/// * - SocketError: other socket related errors.
/// * - PartialMessage: received a partial message.
/// * - InvalidMessage: received a invalid message.
- #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
- pub fn recv_payload_into_buf<T: ByteValued + Sized + VhostUserMsgValidator>(
+ #[allow(clippy::type_complexity)]
+ pub fn recv_payload_into_buf<T: ByteValued + Sized + VhostUserMsgValidator + Default>(
&mut self,
buf: &mut [u8],
- ) -> Result<(VhostUserMsgHeader<R>, T, usize, Option<Vec<File>>)> {
- let mut hdr = VhostUserMsgHeader::default();
+ ) -> Result<(H, T, usize, Option<Vec<File>>)> {
+ let mut hdr = H::default();
let mut body: T = Default::default();
let mut iovs = [
iovec {
- iov_base: (&mut hdr as *mut VhostUserMsgHeader<R>) as *mut c_void,
- iov_len: mem::size_of::<VhostUserMsgHeader<R>>(),
+ iov_base: (&mut hdr as *mut H) as *mut c_void,
+ iov_len: mem::size_of::<H>(),
},
iovec {
iov_base: (&mut body as *mut T) as *mut c_void,
@@ -583,7 +575,7 @@
// arbitrary data.
let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? };
- let total = mem::size_of::<VhostUserMsgHeader<R>>() + mem::size_of::<T>();
+ let total = mem::size_of::<H>() + mem::size_of::<T>();
if bytes < total {
return Err(Error::PartialMessage);
} else if !hdr.is_valid() || !body.is_valid() {
@@ -594,7 +586,7 @@
}
}
-impl<T: Req> AsRawFd for Endpoint<T> {
+impl<H: MsgHeader> AsRawFd for Endpoint<H> {
fn as_raw_fd(&self) -> RawFd {
self.sock.as_raw_fd()
}
@@ -624,6 +616,7 @@
mod tests {
use super::*;
use std::io::{Read, Seek, SeekFrom, Write};
+ use std::os::fd::IntoRawFd;
use vmm_sys_util::rand::rand_alphanumerics;
use vmm_sys_util::tempfile::TempFile;
@@ -647,8 +640,9 @@
let path = temp_path();
let file = File::create(path).unwrap();
- // SAFETY: Safe because `file` contains a valid fd to a file just created.
- let listener = unsafe { Listener::from_raw_fd(file.as_raw_fd()) };
+ // SAFETY: Safe because `file` contains a valid fd to a file just created and ownership of
+ // the file descriptor is released.
+ let listener = unsafe { Listener::from_raw_fd(file.into_raw_fd()) };
assert!(listener.as_raw_fd() > 0);
}
@@ -669,23 +663,23 @@
let path = temp_path();
let listener = Listener::new(&path, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let mut master = Endpoint::<MasterReq>::connect(&path).unwrap();
+ let mut frontend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::connect(&path).unwrap();
let sock = listener.accept().unwrap().unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(sock);
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock);
- let buf1 = vec![0x1, 0x2, 0x3, 0x4];
- let mut len = master.send_slice(&buf1[..], None).unwrap();
+ let buf1 = [0x1, 0x2, 0x3, 0x4];
+ let mut len = frontend.send_slice(&buf1[..], None).unwrap();
assert_eq!(len, 4);
- let (bytes, buf2, _) = slave.recv_into_buf(0x1000).unwrap();
+ let (bytes, buf2, _) = backend.recv_into_buf(0x1000).unwrap();
assert_eq!(bytes, 4);
assert_eq!(&buf1[..], &buf2[..bytes]);
- len = master.send_slice(&buf1[..], None).unwrap();
+ len = frontend.send_slice(&buf1[..], None).unwrap();
assert_eq!(len, 4);
- let (bytes, buf2, _) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, _) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[..2], &buf2[..]);
- let (bytes, buf2, _) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, _) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[2..], &buf2[..]);
}
@@ -695,21 +689,21 @@
let path = temp_path();
let listener = Listener::new(&path, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let mut master = Endpoint::<MasterReq>::connect(&path).unwrap();
+ let mut frontend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::connect(&path).unwrap();
let sock = listener.accept().unwrap().unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(sock);
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock);
let mut fd = TempFile::new().unwrap().into_file();
write!(fd, "test").unwrap();
// Normal case for sending/receiving file descriptors
- let buf1 = vec![0x1, 0x2, 0x3, 0x4];
- let len = master
+ let buf1 = [0x1, 0x2, 0x3, 0x4];
+ let len = frontend
.send_slice(&buf1[..], Some(&[fd.as_raw_fd()]))
.unwrap();
assert_eq!(len, 4);
- let (bytes, buf2, files) = slave.recv_into_buf(4).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(4).unwrap();
assert_eq!(bytes, 4);
assert_eq!(&buf1[..], &buf2[..]);
assert!(files.is_some());
@@ -726,7 +720,7 @@
// Following communication pattern should work:
// Sending side: data(header, body) with fds
// Receiving side: data(header) with fds, data(body)
- let len = master
+ let len = frontend
.send_slice(
&buf1[..],
Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]),
@@ -734,7 +728,7 @@
.unwrap();
assert_eq!(len, 4);
- let (bytes, buf2, files) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[..2], &buf2[..]);
assert!(files.is_some());
@@ -747,7 +741,7 @@
file.read_to_string(&mut content).unwrap();
assert_eq!(content, "test");
}
- let (bytes, buf2, files) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[2..], &buf2[..]);
assert!(files.is_none());
@@ -755,7 +749,7 @@
// Following communication pattern should not work:
// Sending side: data(header, body) with fds
// Receiving side: data(header), data(body) with fds
- let len = master
+ let len = frontend
.send_slice(
&buf1[..],
Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]),
@@ -763,10 +757,10 @@
.unwrap();
assert_eq!(len, 4);
- let (bytes, buf4) = slave.recv_data(2).unwrap();
+ let (bytes, buf4) = backend.recv_data(2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[..2], &buf4[..]);
- let (bytes, buf2, files) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[2..], &buf2[..]);
assert!(files.is_none());
@@ -774,9 +768,9 @@
// Following communication pattern should work:
// Sending side: data, data with fds
// Receiving side: data, data with fds
- let len = master.send_slice(&buf1[..], None).unwrap();
+ let len = frontend.send_slice(&buf1[..], None).unwrap();
assert_eq!(len, 4);
- let len = master
+ let len = frontend
.send_slice(
&buf1[..],
Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]),
@@ -784,12 +778,12 @@
.unwrap();
assert_eq!(len, 4);
- let (bytes, buf2, files) = slave.recv_into_buf(0x4).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x4).unwrap();
assert_eq!(bytes, 4);
assert_eq!(&buf1[..], &buf2[..]);
assert!(files.is_none());
- let (bytes, buf2, files) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[..2], &buf2[..]);
assert!(files.is_some());
@@ -802,7 +796,7 @@
file.read_to_string(&mut content).unwrap();
assert_eq!(content, "test");
}
- let (bytes, buf2, files) = slave.recv_into_buf(0x2).unwrap();
+ let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap();
assert_eq!(bytes, 2);
assert_eq!(&buf1[2..], &buf2[..]);
assert!(files.is_none());
@@ -810,9 +804,9 @@
// Following communication pattern should not work:
// Sending side: data1, data2 with fds
// Receiving side: data + partial of data2, left of data2 with fds
- let len = master.send_slice(&buf1[..], None).unwrap();
+ let len = frontend.send_slice(&buf1[..], None).unwrap();
assert_eq!(len, 4);
- let len = master
+ let len = frontend
.send_slice(
&buf1[..],
Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]),
@@ -820,15 +814,15 @@
.unwrap();
assert_eq!(len, 4);
- let (bytes, _) = slave.recv_data(5).unwrap();
+ let (bytes, _) = backend.recv_data(5).unwrap();
assert_eq!(bytes, 5);
- let (bytes, _, files) = slave.recv_into_buf(0x4).unwrap();
+ let (bytes, _, files) = backend.recv_into_buf(0x4).unwrap();
assert_eq!(bytes, 3);
assert!(files.is_none());
// If the target fd array is too small, extra file descriptors will get lost.
- let len = master
+ let len = frontend
.send_slice(
&buf1[..],
Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]),
@@ -836,7 +830,7 @@
.unwrap();
assert_eq!(len, 4);
- let (bytes, _, files) = slave.recv_into_buf(0x4).unwrap();
+ let (bytes, _, files) = backend.recv_into_buf(0x4).unwrap();
assert_eq!(bytes, 4);
assert!(files.is_some());
}
@@ -846,15 +840,15 @@
let path = temp_path();
let listener = Listener::new(&path, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let mut master = Endpoint::<MasterReq>::connect(&path).unwrap();
+ let mut frontend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::connect(&path).unwrap();
let sock = listener.accept().unwrap().unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(sock);
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock);
let mut hdr1 =
- VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, mem::size_of::<u64>() as u32);
+ VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, mem::size_of::<u64>() as u32);
hdr1.set_need_reply(true);
let features1 = 0x1u64;
- master.send_message(&hdr1, &features1, None).unwrap();
+ frontend.send_message(&hdr1, &features1, None).unwrap();
let mut features2 = 0u64;
@@ -865,14 +859,14 @@
mem::size_of::<u64>(),
)
};
- let (hdr2, bytes, files) = slave.recv_body_into_buf(slice).unwrap();
+ let (hdr2, bytes, files) = backend.recv_body_into_buf(slice).unwrap();
assert_eq!(hdr1, hdr2);
assert_eq!(bytes, 8);
assert_eq!(features1, features2);
assert!(files.is_none());
- master.send_header(&hdr1, None).unwrap();
- let (hdr2, files) = slave.recv_header().unwrap();
+ frontend.send_header(&hdr1, None).unwrap();
+ let (hdr2, files) = backend.recv_header().unwrap();
assert_eq!(hdr1, hdr2);
assert!(files.is_none());
}
@@ -881,13 +875,13 @@
fn partial_message() {
let path = temp_path();
let listener = Listener::new(&path, true).unwrap();
- let mut master = UnixStream::connect(&path).unwrap();
+ let mut frontend = UnixStream::connect(&path).unwrap();
let sock = listener.accept().unwrap().unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(sock);
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock);
- write!(master, "a").unwrap();
- drop(master);
- assert!(matches!(slave.recv_header(), Err(Error::PartialMessage)));
+ write!(frontend, "a").unwrap();
+ drop(frontend);
+ assert!(matches!(backend.recv_header(), Err(Error::PartialMessage)));
}
#[test]
@@ -896,8 +890,8 @@
let listener = Listener::new(&path, true).unwrap();
let _ = UnixStream::connect(&path).unwrap();
let sock = listener.accept().unwrap().unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(sock);
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock);
- assert!(matches!(slave.recv_header(), Err(Error::Disconnected)));
+ assert!(matches!(backend.recv_header(), Err(Error::Disconnected)));
}
}
diff --git a/crates/vhost/src/vhost_user/dummy_slave.rs b/crates/vhost/src/vhost_user/dummy_backend.rs
similarity index 84%
rename from crates/vhost/src/vhost_user/dummy_slave.rs
rename to crates/vhost/src/vhost_user/dummy_backend.rs
index ae728a0..ac36cdc 100644
--- a/crates/vhost/src/vhost_user/dummy_slave.rs
+++ b/crates/vhost/src/vhost_user/dummy_backend.rs
@@ -8,11 +8,11 @@
pub const MAX_QUEUE_NUM: usize = 2;
pub const MAX_VRING_NUM: usize = 256;
-pub const MAX_MEM_SLOTS: usize = 32;
+pub const MAX_MEM_SLOTS: usize = 509;
pub const VIRTIO_FEATURES: u64 = 0x40000003;
#[derive(Default)]
-pub struct DummySlaveReqHandler {
+pub struct DummyBackendReqHandler {
pub owned: bool,
pub features_acked: bool,
pub acked_features: u64,
@@ -28,9 +28,9 @@
pub inflight_file: Option<File>,
}
-impl DummySlaveReqHandler {
+impl DummyBackendReqHandler {
pub fn new() -> Self {
- DummySlaveReqHandler {
+ DummyBackendReqHandler {
queue_num: MAX_QUEUE_NUM,
..Default::default()
}
@@ -55,7 +55,7 @@
}
}
-impl VhostUserSlaveReqHandlerMut for DummySlaveReqHandler {
+impl VhostUserBackendReqHandlerMut for DummyBackendReqHandler {
fn set_owner(&mut self) -> Result<()> {
if self.owned {
return Err(Error::InvalidOperation("already claimed"));
@@ -72,6 +72,12 @@
Ok(())
}
+ fn reset_device(&mut self) -> Result<()> {
+ self.features_acked = false;
+ self.acked_features = 0;
+ Ok(())
+ }
+
fn get_features(&mut self) -> Result<u64> {
Ok(VIRTIO_FEATURES)
}
@@ -193,10 +199,10 @@
}
fn set_protocol_features(&mut self, features: u64) -> Result<()> {
- // Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must
+ // Note: backend that reported VHOST_USER_F_PROTOCOL_FEATURES must
// support this message even before VHOST_USER_SET_FEATURES was
// called.
- // What happens if the master calls set_features() with
+ // What happens if the frontend calls set_features() with
// VHOST_USER_F_PROTOCOL_FEATURES cleared after calling this
// interface?
self.acked_protocol_features = features;
@@ -216,7 +222,7 @@
return Err(Error::InvalidParam);
}
- // Slave must not pass data to/from the backend until ring is
+ // Backend must not pass data to/from the backend until ring is
// enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1,
// or after it has been disabled by VHOST_USER_SET_VRING_ENABLE
// with parameter 0.
@@ -259,6 +265,10 @@
Ok(())
}
+ fn set_gpu_socket(&mut self, _gpu_backend: GpuBackend) -> Result<()> {
+ Ok(())
+ }
+
fn get_inflight_fd(
&mut self,
inflight: &VhostUserInflight,
@@ -291,4 +301,42 @@
fn remove_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion) -> Result<()> {
Ok(())
}
+
+ fn set_device_state_fd(
+ &mut self,
+ _direction: VhostTransferStateDirection,
+ _phase: VhostTransferStatePhase,
+ _fd: File,
+ ) -> Result<Option<File>> {
+ Err(Error::ReqHandlerError(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "dummy back end does not support state transfer",
+ )))
+ }
+
+ fn check_device_state(&mut self) -> Result<()> {
+ Err(Error::ReqHandlerError(std::io::Error::new(
+ std::io::ErrorKind::Unsupported,
+ "dummy back end does not support state transfer",
+ )))
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advice(&mut self) -> Result<File> {
+ let file = tempfile::tempfile().unwrap();
+ Ok(file)
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&mut self) -> Result<()> {
+ Ok(())
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&mut self) -> Result<()> {
+ Ok(())
+ }
+ fn set_log_base(&mut self, _log: &VhostUserLog, _file: File) -> Result<()> {
+ Err(Error::InvalidMessage)
+ }
}
diff --git a/crates/vhost/src/vhost_user/master.rs b/crates/vhost/src/vhost_user/frontend.rs
similarity index 71%
rename from crates/vhost/src/vhost_user/master.rs
rename to crates/vhost/src/vhost_user/frontend.rs
index feeb984..0fb548b 100644
--- a/crates/vhost/src/vhost_user/master.rs
+++ b/crates/vhost/src/vhost_user/frontend.rs
@@ -1,7 +1,7 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
-//! Traits and Struct for vhost-user master.
+//! Traits and Struct for vhost-user frontend.
use std::fs::File;
use std::mem;
@@ -21,8 +21,8 @@
};
use crate::{Error, Result};
-/// Trait for vhost-user master to provide extra methods not covered by the VhostBackend yet.
-pub trait VhostUserMaster: VhostBackend {
+/// Trait for vhost-user frontend to provide extra methods not covered by the VhostBackend yet.
+pub trait VhostUserFrontend: VhostBackend {
/// Get the protocol feature bitmask from the underlying vhost implementation.
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>;
@@ -32,9 +32,12 @@
/// Query how many queues the backend supports.
fn get_queue_num(&mut self) -> Result<u64>;
- /// Signal slave to enable or disable corresponding vring.
+ /// Disable all rings and reset the internal device state.
+ fn reset_device(&mut self) -> Result<()>;
+
+ /// Signal backend to enable or disable corresponding vring.
///
- /// Slave must not pass data to/from the backend until ring is enabled by
+ /// Backend must not pass data to/from the backend until ring is enabled by
/// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been
/// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>;
@@ -52,8 +55,8 @@
/// destination host to set readonly configuration space fields.
fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>;
- /// Setup slave communication channel.
- fn set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>;
+ /// Setup backend communication channel.
+ fn set_backend_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>;
/// Retrieve shared buffer for inflight I/O tracking.
fn get_inflight_fd(
@@ -72,23 +75,43 @@
/// Remove a guest memory mapping from vhost.
fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
+
+ /// Sends VHOST_USER_POSTCOPY_ADVISE msg to the backend
+ /// initiating the beginning of the postcopy process.
+ /// Backend will return a userfaultfd.
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advise(&mut self) -> Result<File>;
+
+ /// Sends VHOST_USER_POSTCOPY_LISTEN msg to the backend
+ /// telling it to register its memory regions with
+ /// userfaultfd previously received through the
+ /// [`VhostUserFrontend::postcopy_advise`] call.
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&mut self) -> Result<()>;
+
+ /// Sends VHOST_USER_POSTCOPY_END msg to the backend
+ /// indicating the end of the postcopy process.
+ /// Backend will destroy the userfaultfd object previously
+ /// sent by [`VhostUserFrontend::postcopy_advise`].
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&mut self) -> Result<()>;
}
fn error_code<T>(err: VhostUserError) -> Result<T> {
Err(Error::VhostUserProtocol(err))
}
-/// Struct for the vhost-user master endpoint.
+/// Struct for the vhost-user frontend endpoint.
#[derive(Clone)]
-pub struct Master {
- node: Arc<Mutex<MasterInternal>>,
+pub struct Frontend {
+ node: Arc<Mutex<FrontendInternal>>,
}
-impl Master {
+impl Frontend {
/// Create a new instance.
- fn new(ep: Endpoint<MasterReq>, max_queue_num: u64) -> Self {
- Master {
- node: Arc::new(Mutex::new(MasterInternal {
+ fn new(ep: Endpoint<VhostUserMsgHeader<FrontendReq>>, max_queue_num: u64) -> Self {
+ Frontend {
+ node: Arc::new(Mutex::new(FrontendInternal {
main_sock: ep,
virtio_features: 0,
acked_virtio_features: 0,
@@ -102,16 +125,19 @@
}
}
- fn node(&self) -> MutexGuard<MasterInternal> {
+ fn node(&self) -> MutexGuard<FrontendInternal> {
self.node.lock().unwrap()
}
/// Create a new instance from a Unix stream socket.
pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
- Self::new(Endpoint::<MasterReq>::from_stream(sock), max_queue_num)
+ Self::new(
+ Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(sock),
+ max_queue_num,
+ )
}
- /// Create a new vhost-user master endpoint.
+ /// Create a new vhost-user frontend endpoint.
///
/// Will retry as the backend may not be ready to accept the connection.
///
@@ -120,7 +146,7 @@
pub fn connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self> {
let mut retry_count = 5;
let endpoint = loop {
- match Endpoint::<MasterReq>::connect(&path) {
+ match Endpoint::<VhostUserMsgHeader<FrontendReq>>::connect(&path) {
Ok(endpoint) => break Ok(endpoint),
Err(e) => match &e {
VhostUserError::SocketConnect(why) => {
@@ -147,11 +173,11 @@
}
}
-impl VhostBackend for Master {
+impl VhostBackend for Frontend {
/// Get from the underlying vhost implementation the feature bitmask.
fn get_features(&self) -> Result<u64> {
let mut node = self.node();
- let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
+ let hdr = node.send_request_header(FrontendReq::GET_FEATURES, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
node.virtio_features = val.value;
Ok(node.virtio_features)
@@ -161,27 +187,27 @@
fn set_features(&self, features: u64) -> Result<()> {
let mut node = self.node();
let val = VhostUserU64::new(features);
- let hdr = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_FEATURES, &val, None)?;
node.acked_virtio_features = features & node.virtio_features;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
- /// Set the current Master as an owner of the session.
+ /// Set the current Frontend as an owner of the session.
fn set_owner(&self) -> Result<()> {
// We unwrap() the return value to assert that we are not expecting threads to ever fail
// while holding the lock.
let mut node = self.node();
- let hdr = node.send_request_header(MasterReq::SET_OWNER, None)?;
+ let hdr = node.send_request_header(FrontendReq::SET_OWNER, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
fn reset_owner(&self) -> Result<()> {
let mut node = self.node();
- let hdr = node.send_request_header(MasterReq::RESET_OWNER, None)?;
+ let hdr = node.send_request_header(FrontendReq::RESET_OWNER, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
- /// Set the memory map regions on the slave so it can translate the vring
+ /// Set the memory map regions on the backend so it can translate the vring
/// addresses. In the ancillary data there is an array of file descriptors
fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
@@ -202,7 +228,7 @@
// SAFETY: Safe because ctx.regions is a valid Vec() at this point.
let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
let hdr = node.send_request_with_payload(
- MasterReq::SET_MEM_TABLE,
+ FrontendReq::SET_MEM_TABLE,
&body,
payload,
Some(ctx.fds.as_slice()),
@@ -225,13 +251,13 @@
mmap_offset: region.mmap_offset,
};
let hdr = node.send_request_with_body(
- MasterReq::SET_LOG_BASE,
+ FrontendReq::SET_LOG_BASE,
&log,
Some(&[region.mmap_handle]),
)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
} else {
- let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, None)?;
+ let _ = node.send_request_with_body(FrontendReq::SET_LOG_BASE, &val, None)?;
Ok(())
}
}
@@ -239,7 +265,7 @@
fn set_log_fd(&self, fd: RawFd) -> Result<()> {
let mut node = self.node();
let fds = [fd];
- let hdr = node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
+ let hdr = node.send_request_header(FrontendReq::SET_LOG_FD, Some(&fds))?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -251,7 +277,7 @@
}
let val = VhostUserVringState::new(queue_index as u32, num.into());
- let hdr = node.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_VRING_NUM, &val, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -265,7 +291,7 @@
}
let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data);
- let hdr = node.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_VRING_ADDR, &val, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -277,7 +303,7 @@
}
let val = VhostUserVringState::new(queue_index as u32, base.into());
- let hdr = node.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_VRING_BASE, &val, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -288,7 +314,7 @@
}
let req = VhostUserVringState::new(queue_index as u32, 0);
- let hdr = node.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::GET_VRING_BASE, &req, None)?;
let reply = node.recv_reply::<VhostUserVringState>(&hdr)?;
Ok(reply.num)
}
@@ -302,7 +328,8 @@
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
- let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_CALL, queue_index, fd.as_raw_fd())?;
+ let hdr =
+ node.send_fd_for_vring(FrontendReq::SET_VRING_CALL, queue_index, fd.as_raw_fd())?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -315,7 +342,8 @@
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
- let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_KICK, queue_index, fd.as_raw_fd())?;
+ let hdr =
+ node.send_fd_for_vring(FrontendReq::SET_VRING_KICK, queue_index, fd.as_raw_fd())?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -327,31 +355,29 @@
if queue_index as u64 >= node.max_queue_num {
return error_code(VhostUserError::InvalidParam);
}
- let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_ERR, queue_index, fd.as_raw_fd())?;
+ let hdr =
+ node.send_fd_for_vring(FrontendReq::SET_VRING_ERR, queue_index, fd.as_raw_fd())?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
}
-impl VhostUserMaster for Master {
+impl VhostUserFrontend for Frontend {
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
let mut node = self.node();
node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
- let hdr = node.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?;
+ let hdr = node.send_request_header(FrontendReq::GET_PROTOCOL_FEATURES, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
node.protocol_features = val.value;
- // Should we support forward compatibility?
- // If so just mask out unrecognized flags instead of return errors.
- match VhostUserProtocolFeatures::from_bits(node.protocol_features) {
- Some(val) => Ok(val),
- None => error_code(VhostUserError::InvalidMessage),
- }
+ Ok(VhostUserProtocolFeatures::from_bits_truncate(
+ node.protocol_features,
+ ))
}
fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
let mut node = self.node();
node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
let val = VhostUserU64::new(features.bits());
- let hdr = node.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_PROTOCOL_FEATURES, &val, None)?;
// Don't wait for ACK here because the protocol feature negotiation process hasn't been
// completed yet.
node.acked_protocol_features = features.bits();
@@ -363,7 +389,7 @@
let mut node = self.node();
node.check_proto_feature(VhostUserProtocolFeatures::MQ)?;
- let hdr = node.send_request_header(MasterReq::GET_QUEUE_NUM, None)?;
+ let hdr = node.send_request_header(FrontendReq::GET_QUEUE_NUM, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
if val.value > VHOST_USER_MAX_VRINGS {
return error_code(VhostUserError::InvalidMessage);
@@ -372,6 +398,14 @@
Ok(node.max_queue_num)
}
+ fn reset_device(&mut self) -> Result<()> {
+ let mut node = self.node();
+ node.check_proto_feature(VhostUserProtocolFeatures::RESET_DEVICE)?;
+
+ let hdr = node.send_request_header(FrontendReq::RESET_DEVICE, None)?;
+ node.wait_for_ack(&hdr).map_err(|e| e.into())
+ }
+
fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
let mut node = self.node();
// set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
@@ -383,9 +417,9 @@
return error_code(VhostUserError::InvalidParam);
}
- let flag = if enable { 1 } else { 0 };
+ let flag = enable.into();
let val = VhostUserVringState::new(queue_index as u32, flag);
- let hdr = node.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::SET_VRING_ENABLE, &val, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -406,15 +440,15 @@
node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
// vhost-user spec states that:
- // "Master payload: virtio device config space"
- // "Slave payload: virtio device config space"
- let hdr = node.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?;
+ // "Frontend payload: virtio device config space"
+ // "Backend payload: virtio device config space"
+ let hdr = node.send_request_with_payload(FrontendReq::GET_CONFIG, &body, buf, None)?;
let (body_reply, buf_reply, rfds) =
node.recv_reply_with_payload::<VhostUserConfig>(&hdr)?;
if rfds.is_some() {
return error_code(VhostUserError::InvalidMessage);
} else if body_reply.size == 0 {
- return error_code(VhostUserError::SlaveInternalError);
+ return error_code(VhostUserError::BackendInternalError);
} else if body_reply.size != body.size
|| body_reply.size as usize != buf.len()
|| body_reply.offset != body.offset
@@ -438,15 +472,15 @@
// depends on VhostUserProtocolFeatures::CONFIG
node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
- let hdr = node.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?;
+ let hdr = node.send_request_with_payload(FrontendReq::SET_CONFIG, &body, buf, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
- fn set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()> {
+ fn set_backend_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()> {
let mut node = self.node();
- node.check_proto_feature(VhostUserProtocolFeatures::SLAVE_REQ)?;
+ node.check_proto_feature(VhostUserProtocolFeatures::BACKEND_REQ)?;
let fds = [fd.as_raw_fd()];
- let hdr = node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
+ let hdr = node.send_request_header(FrontendReq::SET_BACKEND_REQ_FD, Some(&fds))?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -457,7 +491,7 @@
let mut node = self.node();
node.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?;
- let hdr = node.send_request_with_body(MasterReq::GET_INFLIGHT_FD, inflight, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::GET_INFLIGHT_FD, inflight, None)?;
let (inflight, files) = node.recv_reply_with_files::<VhostUserInflight>(&hdr)?;
match take_single_file(files) {
@@ -475,7 +509,8 @@
return error_code(VhostUserError::InvalidParam);
}
- let hdr = node.send_request_with_body(MasterReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
+ let hdr =
+ node.send_request_with_body(FrontendReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -483,7 +518,7 @@
let mut node = self.node();
node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
- let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
+ let hdr = node.send_request_header(FrontendReq::GET_MAX_MEM_SLOTS, None)?;
let val = node.recv_reply::<VhostUserU64>(&hdr)?;
Ok(val.value)
@@ -498,7 +533,7 @@
let body = region.to_single_region();
let fds = [region.mmap_handle];
- let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
+ let hdr = node.send_request_with_body(FrontendReq::ADD_MEM_REG, &body, Some(&fds))?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
@@ -510,19 +545,49 @@
}
let body = region.to_single_region();
- let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
+ let hdr = node.send_request_with_body(FrontendReq::REM_MEM_REG, &body, None)?;
+ node.wait_for_ack(&hdr).map_err(|e| e.into())
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_advise(&mut self) -> Result<File> {
+ let mut node = self.node();
+ node.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+
+ let hdr = node.send_request_header(FrontendReq::POSTCOPY_ADVISE, None)?;
+ let (_, files) = node.recv_reply_with_files::<VhostUserEmpty>(&hdr)?;
+
+ match take_single_file(files) {
+ Some(file) => Ok(file),
+ None => error_code(VhostUserError::IncorrectFds),
+ }
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_listen(&mut self) -> Result<()> {
+ let mut node = self.node();
+ node.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+ let hdr = node.send_request_header(FrontendReq::POSTCOPY_LISTEN, None)?;
+ node.wait_for_ack(&hdr).map_err(|e| e.into())
+ }
+
+ #[cfg(feature = "postcopy")]
+ fn postcopy_end(&mut self) -> Result<()> {
+ let mut node = self.node();
+ node.check_proto_feature(VhostUserProtocolFeatures::PAGEFAULT)?;
+ let hdr = node.send_request_header(FrontendReq::POSTCOPY_END, None)?;
node.wait_for_ack(&hdr).map_err(|e| e.into())
}
}
-impl AsRawFd for Master {
+impl AsRawFd for Frontend {
fn as_raw_fd(&self) -> RawFd {
let node = self.node();
node.main_sock.as_raw_fd()
}
}
-/// Context object to pass guest memory configuration to VhostUserMaster::set_mem_table().
+/// Context object to pass guest memory configuration to VhostUserFrontend::set_mem_table().
struct VhostUserMemoryContext {
regions: VhostUserMemoryPayload,
fds: Vec<RawFd>,
@@ -544,20 +609,20 @@
}
}
-struct MasterInternal {
- // Used to send requests to the slave.
- main_sock: Endpoint<MasterReq>,
- // Cached virtio features from the slave.
+struct FrontendInternal {
+ // Used to send requests to the backend.
+ main_sock: Endpoint<VhostUserMsgHeader<FrontendReq>>,
+ // Cached virtio features from the backend.
virtio_features: u64,
// Cached acked virtio features from the driver.
acked_virtio_features: u64,
- // Cached vhost-user protocol features from the slave.
+ // Cached vhost-user protocol features from the backend.
protocol_features: u64,
// Cached vhost-user protocol features.
acked_protocol_features: u64,
// Cached vhost-user protocol features are ready to use.
protocol_features_ready: bool,
- // Cached maxinum number of queues supported from the slave.
+ // Cached maxinum number of queues supported from the backend.
max_queue_num: u64,
// Internal flag to mark failure state.
error: Option<i32>,
@@ -565,12 +630,12 @@
hdr_flags: VhostUserHeaderFlag,
}
-impl MasterInternal {
+impl FrontendInternal {
fn send_request_header(
&mut self,
- code: MasterReq,
+ code: FrontendReq,
fds: Option<&[RawFd]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
self.check_state()?;
let hdr = self.new_request_header(code, 0);
self.main_sock.send_header(&hdr, fds)?;
@@ -579,10 +644,10 @@
fn send_request_with_body<T: ByteValued>(
&mut self,
- code: MasterReq,
+ code: FrontendReq,
msg: &T,
fds: Option<&[RawFd]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
if mem::size_of::<T>() > MAX_MSG_SIZE {
return Err(VhostUserError::InvalidParam);
}
@@ -595,11 +660,11 @@
fn send_request_with_payload<T: ByteValued>(
&mut self,
- code: MasterReq,
+ code: FrontendReq,
msg: &T,
payload: &[u8],
fds: Option<&[RawFd]>,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
let len = mem::size_of::<T>() + payload.len();
if len > MAX_MSG_SIZE {
return Err(VhostUserError::InvalidParam);
@@ -619,10 +684,10 @@
fn send_fd_for_vring(
&mut self,
- code: MasterReq,
+ code: FrontendReq,
queue_index: usize,
fd: RawFd,
- ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
+ ) -> VhostUserResult<VhostUserMsgHeader<FrontendReq>> {
if queue_index as u64 >= self.max_queue_num {
return Err(VhostUserError::InvalidParam);
}
@@ -637,9 +702,9 @@
Ok(hdr)
}
- fn recv_reply<T: ByteValued + Sized + VhostUserMsgValidator>(
+ fn recv_reply<T: ByteValued + Sized + VhostUserMsgValidator + Default>(
&mut self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<T> {
if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
return Err(VhostUserError::InvalidParam);
@@ -653,9 +718,9 @@
Ok(body)
}
- fn recv_reply_with_files<T: ByteValued + Sized + VhostUserMsgValidator>(
+ fn recv_reply_with_files<T: ByteValued + Sized + VhostUserMsgValidator + Default>(
&mut self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<(T, Option<Vec<File>>)> {
if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
return Err(VhostUserError::InvalidParam);
@@ -669,9 +734,9 @@
Ok((body, files))
}
- fn recv_reply_with_payload<T: ByteValued + Sized + VhostUserMsgValidator>(
+ fn recv_reply_with_payload<T: ByteValued + Sized + VhostUserMsgValidator + Default>(
&mut self,
- hdr: &VhostUserMsgHeader<MasterReq>,
+ hdr: &VhostUserMsgHeader<FrontendReq>,
) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)> {
if mem::size_of::<T>() > MAX_MSG_SIZE
|| hdr.get_size() as usize <= mem::size_of::<T>()
@@ -696,7 +761,7 @@
Ok((body, buf, files))
}
- fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> {
+ fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<FrontendReq>) -> VhostUserResult<()> {
if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0
|| !hdr.is_need_reply()
{
@@ -709,7 +774,7 @@
return Err(VhostUserError::InvalidMessage);
}
if body.value != 0 {
- return Err(VhostUserError::SlaveInternalError);
+ return Err(VhostUserError::BackendInternalError);
}
Ok(())
}
@@ -740,7 +805,11 @@
}
#[inline]
- fn new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> {
+ fn new_request_header(
+ &self,
+ request: FrontendReq,
+ size: u32,
+ ) -> VhostUserMsgHeader<FrontendReq> {
VhostUserMsgHeader::new(request, self.hdr_flags.bits() | 0x1, size)
}
}
@@ -753,6 +822,8 @@
use std::path::PathBuf;
+ const INVALID_PROTOCOL_FEATURE: u64 = 1 << 63;
+
fn temp_path() -> PathBuf {
PathBuf::from(format!(
"/tmp/vhost_test_{}",
@@ -760,36 +831,40 @@
))
}
- fn create_pair<P: AsRef<Path>>(path: P) -> (Master, Endpoint<MasterReq>) {
+ fn create_pair<P: AsRef<Path>>(
+ path: P,
+ ) -> (Frontend, Endpoint<VhostUserMsgHeader<FrontendReq>>) {
let listener = Listener::new(&path, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let master = Master::connect(path, 2).unwrap();
- let slave = listener.accept().unwrap().unwrap();
- (master, Endpoint::from_stream(slave))
+ let frontend = Frontend::connect(path, 2).unwrap();
+ let backend = listener.accept().unwrap().unwrap();
+ (frontend, Endpoint::from_stream(backend))
}
#[test]
- fn create_master() {
+ fn create_frontend() {
let path = temp_path();
let listener = Listener::new(&path, true).unwrap();
listener.set_nonblocking(true).unwrap();
- let master = Master::connect(&path, 1).unwrap();
- let mut slave = Endpoint::<MasterReq>::from_stream(listener.accept().unwrap().unwrap());
+ let frontend = Frontend::connect(&path, 1).unwrap();
+ let mut backend = Endpoint::<VhostUserMsgHeader<FrontendReq>>::from_stream(
+ listener.accept().unwrap().unwrap(),
+ );
- assert!(master.as_raw_fd() > 0);
+ assert!(frontend.as_raw_fd() > 0);
// Send two messages continuously
- master.set_owner().unwrap();
- master.reset_owner().unwrap();
+ frontend.set_owner().unwrap();
+ frontend.reset_owner().unwrap();
- let (hdr, rfds) = slave.recv_header().unwrap();
- assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
+ let (hdr, rfds) = backend.recv_header().unwrap();
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER);
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_none());
- let (hdr, rfds) = slave.recv_header().unwrap();
- assert_eq!(hdr.get_code().unwrap(), MasterReq::RESET_OWNER);
+ let (hdr, rfds) = backend.recv_header().unwrap();
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::RESET_OWNER);
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_none());
@@ -800,311 +875,312 @@
let path = temp_path();
let _ = Listener::new(&path, true).unwrap();
let _ = Listener::new(&path, false).is_err();
- assert!(Master::connect(&path, 1).is_err());
+ assert!(Frontend::connect(&path, 1).is_err());
let listener = Listener::new(&path, true).unwrap();
assert!(Listener::new(&path, false).is_err());
listener.set_nonblocking(true).unwrap();
- let _master = Master::connect(&path, 1).unwrap();
- let _slave = listener.accept().unwrap().unwrap();
+ let _frontend = Frontend::connect(&path, 1).unwrap();
+ let _backend = listener.accept().unwrap().unwrap();
}
#[test]
fn test_features() {
let path = temp_path();
- let (master, mut peer) = create_pair(path);
+ let (frontend, mut peer) = create_pair(path);
- master.set_owner().unwrap();
+ frontend.set_owner().unwrap();
let (hdr, rfds) = peer.recv_header().unwrap();
- assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER);
assert_eq!(hdr.get_size(), 0);
assert_eq!(hdr.get_version(), 0x1);
assert!(rfds.is_none());
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(0x15);
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_features().unwrap();
+ let features = frontend.get_features().unwrap();
assert_eq!(features, 0x15u64);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_none());
- let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::SET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(0x15);
peer.send_message(&hdr, &msg, None).unwrap();
- master.set_features(0x15).unwrap();
+ frontend.set_features(0x15).unwrap();
let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
assert!(rfds.is_none());
let val = msg.value;
assert_eq!(val, 0x15);
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = 0x15u32;
peer.send_message(&hdr, &msg, None).unwrap();
- assert!(master.get_features().is_err());
+ assert!(frontend.get_features().is_err());
}
#[test]
fn test_protocol_features() {
let path = temp_path();
- let (mut master, mut peer) = create_pair(path);
+ let (mut frontend, mut peer) = create_pair(path);
- master.set_owner().unwrap();
+ frontend.set_owner().unwrap();
let (hdr, rfds) = peer.recv_header().unwrap();
- assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER);
assert!(rfds.is_none());
- assert!(master.get_protocol_features().is_err());
- assert!(master
+ assert!(frontend.get_protocol_features().is_err());
+ assert!(frontend
.set_protocol_features(VhostUserProtocolFeatures::all())
.is_err());
let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(vfeatures);
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_features().unwrap();
+ let features = frontend.get_features().unwrap();
assert_eq!(features, vfeatures);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_none());
- master.set_features(vfeatures).unwrap();
+ frontend.set_features(vfeatures).unwrap();
let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
assert!(rfds.is_none());
let val = msg.value;
assert_eq!(val, vfeatures);
let pfeatures = VhostUserProtocolFeatures::all();
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8);
- let msg = VhostUserU64::new(pfeatures.bits());
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_PROTOCOL_FEATURES, 0x4, 8);
+ // Unknown feature bits should be ignored.
+ let msg = VhostUserU64::new(pfeatures.bits() | INVALID_PROTOCOL_FEATURE);
peer.send_message(&hdr, &msg, None).unwrap();
- let features = master.get_protocol_features().unwrap();
+ let features = frontend.get_protocol_features().unwrap();
assert_eq!(features, pfeatures);
let (_hdr, rfds) = peer.recv_header().unwrap();
assert!(rfds.is_none());
- master.set_protocol_features(pfeatures).unwrap();
+ frontend.set_protocol_features(pfeatures).unwrap();
let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
assert!(rfds.is_none());
let val = msg.value;
assert_eq!(val, pfeatures.bits());
- let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::SET_PROTOCOL_FEATURES, 0x4, 8);
let msg = VhostUserU64::new(pfeatures.bits());
peer.send_message(&hdr, &msg, None).unwrap();
- assert!(master.get_protocol_features().is_err());
+ assert!(frontend.get_protocol_features().is_err());
}
#[test]
- fn test_master_set_config_negative() {
+ fn test_frontend_set_config_negative() {
let path = temp_path();
- let (mut master, _peer) = create_pair(path);
+ let (mut frontend, _peer) = create_pair(path);
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- master
+ frontend
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
{
- let mut node = master.node();
+ let mut node = frontend.node();
node.virtio_features = 0xffff_ffff;
node.acked_virtio_features = 0xffff_ffff;
node.protocol_features = 0xffff_ffff;
node.acked_protocol_features = 0xffff_ffff;
}
- master
+ frontend
.set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap();
- master
+ frontend
.set_config(
VHOST_USER_CONFIG_SIZE,
VhostUserConfigFlags::WRITABLE,
&buf[0..4],
)
.unwrap_err();
- master
+ frontend
.set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.unwrap_err();
- master
+ frontend
.set_config(
0x100,
- // SAFETY: This is a negative test, so we are setting unexpected flags.
- unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
+ // This is a negative test, so we are setting unexpected flags.
+ VhostUserConfigFlags::from_bits_retain(0xffff_ffff),
&buf[0..4],
)
.unwrap_err();
- master
+ frontend
.set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf)
.unwrap_err();
- master
+ frontend
.set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[])
.unwrap_err();
}
- fn create_pair2() -> (Master, Endpoint<MasterReq>) {
+ fn create_pair2() -> (Frontend, Endpoint<VhostUserMsgHeader<FrontendReq>>) {
let path = temp_path();
- let (master, peer) = create_pair(path);
+ let (frontend, peer) = create_pair(path);
{
- let mut node = master.node();
+ let mut node = frontend.node();
node.virtio_features = 0xffff_ffff;
node.acked_virtio_features = 0xffff_ffff;
node.protocol_features = 0xffff_ffff;
node.acked_protocol_features = 0xffff_ffff;
}
- (master, peer)
+ (frontend, peer)
}
#[test]
- fn test_master_get_config_negative0() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative0() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
- hdr.set_code(MasterReq::GET_FEATURES);
+ hdr.set_code(FrontendReq::GET_FEATURES);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
- hdr.set_code(MasterReq::GET_CONFIG);
+ hdr.set_code(FrontendReq::GET_CONFIG);
}
#[test]
- fn test_master_get_config_negative1() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative1() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
hdr.set_reply(false);
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative2() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative2() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
}
#[test]
- fn test_master_get_config_negative3() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative3() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative4() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative4() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = 0x101;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative5() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative5() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.offset = (MAX_MSG_SIZE + 1) as u32;
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
- fn test_master_get_config_negative6() {
- let (mut master, mut peer) = create_pair2();
+ fn test_frontend_get_config_negative6() {
+ let (mut frontend, mut peer) = create_pair2();
let buf = vec![0x0; MAX_MSG_SIZE + 1];
- let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
+ let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16);
let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_ok());
msg.size = 6;
peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
.unwrap();
- assert!(master
+ assert!(frontend
.get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
.is_err());
}
#[test]
fn test_maset_set_mem_table_failure() {
- let (master, _peer) = create_pair2();
+ let (frontend, _peer) = create_pair2();
- master.set_mem_table(&[]).unwrap_err();
+ frontend.set_mem_table(&[]).unwrap_err();
let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
- master.set_mem_table(&tables).unwrap_err();
+ frontend.set_mem_table(&tables).unwrap_err();
}
}
diff --git a/crates/vhost/src/vhost_user/frontend_req_handler.rs b/crates/vhost/src/vhost_user/frontend_req_handler.rs
new file mode 100644
index 0000000..77d4bf5
--- /dev/null
+++ b/crates/vhost/src/vhost_user/frontend_req_handler.rs
@@ -0,0 +1,512 @@
+// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::fs::File;
+use std::mem;
+use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::net::UnixStream;
+use std::sync::{Arc, Mutex};
+
+use super::connection::Endpoint;
+use super::message::*;
+use super::{Error, HandlerResult, Result};
+
+/// Define services provided by frontends for the backend communication channel.
+///
+/// The vhost-user specification defines a backend communication channel, by which backends could
+/// request services from frontends. The [VhostUserFrontendReqHandler] trait defines services provided
+/// by frontends, and it's used both on the frontend side and backend side.
+/// - on the backend side, a stub forwarder implementing [VhostUserFrontendReqHandler] will proxy
+/// service requests to frontends. The [Backend] is an example stub forwarder.
+/// - on the frontend side, the [FrontendReqHandler] will forward service requests to a handler
+/// implementing [VhostUserFrontendReqHandler].
+///
+/// The [VhostUserFrontendReqHandler] trait is design with interior mutability to improve performance
+/// for multi-threading.
+///
+/// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html
+/// [FrontendReqHandler]: struct.FrontendReqHandler.html
+/// [Backend]: struct.Backend.html
+pub trait VhostUserFrontendReqHandler {
+ /// Handle device configuration change notifications.
+ fn handle_config_change(&self) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object add operation
+ fn shared_object_add(&self, _uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object remove operation
+ fn shared_object_remove(&self, _uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object lookup operation
+ fn shared_object_lookup(
+ &self,
+ _uuid: &VhostUserSharedMsg,
+ _fd: &dyn AsRawFd,
+ ) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
+ // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: &dyn AsRawFd);
+}
+
+/// A helper trait mirroring [VhostUserFrontendReqHandler] but without interior mutability.
+///
+/// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html
+pub trait VhostUserFrontendReqHandlerMut {
+ /// Handle device configuration change notifications.
+ fn handle_config_change(&mut self) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object add operation
+ fn shared_object_add(&mut self, _uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object remove operation
+ fn shared_object_remove(&mut self, _uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ /// Handle shared object lookup operation
+ fn shared_object_lookup(
+ &mut self,
+ _uuid: &VhostUserSharedMsg,
+ _fd: &dyn AsRawFd,
+ ) -> HandlerResult<u64> {
+ Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
+ }
+
+ // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
+ // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawFd);
+}
+
+impl<S: VhostUserFrontendReqHandlerMut> VhostUserFrontendReqHandler for Mutex<S> {
+ fn handle_config_change(&self) -> HandlerResult<u64> {
+ self.lock().unwrap().handle_config_change()
+ }
+
+ /// Handle shared object add operation
+ fn shared_object_add(&self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ self.lock().unwrap().shared_object_add(uuid)
+ }
+
+ /// Handle shared object remove operation
+ fn shared_object_remove(&self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ self.lock().unwrap().shared_object_remove(uuid)
+ }
+
+ /// Handle shared object lookup operation
+ fn shared_object_lookup(
+ &self,
+ uuid: &VhostUserSharedMsg,
+ fd: &dyn AsRawFd,
+ ) -> HandlerResult<u64> {
+ self.lock().unwrap().shared_object_lookup(uuid, fd)
+ }
+}
+
+/// Server to handle service requests from backends from the backend communication channel.
+///
+/// The [FrontendReqHandler] acts as a server on the frontend side, to handle service requests from
+/// backends on the backend communication channel. It's actually a proxy invoking the registered
+/// handler implementing [VhostUserFrontendReqHandler] to do the real work.
+///
+/// [FrontendReqHandler]: struct.FrontendReqHandler.html
+/// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html
+pub struct FrontendReqHandler<S: VhostUserFrontendReqHandler> {
+ // underlying Unix domain socket for communication
+ sub_sock: Endpoint<VhostUserMsgHeader<BackendReq>>,
+ tx_sock: UnixStream,
+ // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
+ reply_ack_negotiated: bool,
+ // the VirtIO backend device object
+ backend: Arc<S>,
+ // whether the endpoint has encountered any failure
+ error: Option<i32>,
+}
+
+impl<S: VhostUserFrontendReqHandler> FrontendReqHandler<S> {
+ /// Create a server to handle service requests from backends on the backend communication channel.
+ ///
+ /// This opens a pair of connected anonymous sockets to form the backend communication channel.
+ /// The socket fd returned by [Self::get_tx_raw_fd()] should be sent to the backend by
+ /// [VhostUserFrontend::set_backend_request_fd()].
+ ///
+ /// [Self::get_tx_raw_fd()]: struct.FrontendReqHandler.html#method.get_tx_raw_fd
+ /// [VhostUserFrontend::set_backend_request_fd()]: trait.VhostUserFrontend.html#tymethod.set_backend_request_fd
+ pub fn new(backend: Arc<S>) -> Result<Self> {
+ let (tx, rx) = UnixStream::pair().map_err(Error::SocketError)?;
+
+ Ok(FrontendReqHandler {
+ sub_sock: Endpoint::<VhostUserMsgHeader<BackendReq>>::from_stream(rx),
+ tx_sock: tx,
+ reply_ack_negotiated: false,
+ backend,
+ error: None,
+ })
+ }
+
+ /// Get the socket fd for the backend to communication with the frontend.
+ ///
+ /// The returned fd should be sent to the backend by [VhostUserFrontend::set_backend_request_fd()].
+ ///
+ /// [VhostUserFrontend::set_backend_request_fd()]: trait.VhostUserFrontend.html#tymethod.set_backend_request_fd
+ pub fn get_tx_raw_fd(&self) -> RawFd {
+ self.tx_sock.as_raw_fd()
+ }
+
+ /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
+ ///
+ /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
+ /// the "REPLY_ACK" flag will be set in the message header for every backend to frontend request
+ /// message.
+ pub fn set_reply_ack_flag(&mut self, enable: bool) {
+ self.reply_ack_negotiated = enable;
+ }
+
+ /// Mark endpoint as failed or in normal state.
+ pub fn set_failed(&mut self, error: i32) {
+ if error == 0 {
+ self.error = None;
+ } else {
+ self.error = Some(error);
+ }
+ }
+
+ /// Main entrance to server backend request from the backend communication channel.
+ ///
+ /// The caller needs to:
+ /// - serialize calls to this function
+ /// - decide what to do when errer happens
+ /// - optional recover from failure
+ pub fn handle_request(&mut self) -> Result<u64> {
+ // Return error if the endpoint is already in failed state.
+ self.check_state()?;
+
+ // The underlying communication channel is a Unix domain socket in
+ // stream mode, and recvmsg() is a little tricky here. To successfully
+ // receive attached file descriptors, we need to receive messages and
+ // corresponding attached file descriptors in this way:
+ // . recv messsage header and optional attached file
+ // . validate message header
+ // . recv optional message body and payload according size field in
+ // message header
+ // . validate message body and optional payload
+ let (hdr, files) = self.sub_sock.recv_header()?;
+ self.check_attached_files(&hdr, &files)?;
+ let (size, buf) = match hdr.get_size() {
+ 0 => (0, vec![0u8; 0]),
+ len => {
+ if len as usize > MAX_MSG_SIZE {
+ return Err(Error::InvalidMessage);
+ }
+ let (size2, rbuf) = self.sub_sock.recv_data(len as usize)?;
+ if size2 != len as usize {
+ return Err(Error::InvalidMessage);
+ }
+ (size2, rbuf)
+ }
+ };
+
+ let res = match hdr.get_code() {
+ Ok(BackendReq::CONFIG_CHANGE_MSG) => {
+ self.check_msg_size(&hdr, size, 0)?;
+ self.backend
+ .handle_config_change()
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::SHARED_OBJECT_ADD) => {
+ let msg = self.extract_msg_body::<VhostUserSharedMsg>(&hdr, size, &buf)?;
+ self.backend
+ .shared_object_add(&msg)
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::SHARED_OBJECT_REMOVE) => {
+ let msg = self.extract_msg_body::<VhostUserSharedMsg>(&hdr, size, &buf)?;
+ self.backend
+ .shared_object_remove(&msg)
+ .map_err(Error::ReqHandlerError)
+ }
+ Ok(BackendReq::SHARED_OBJECT_LOOKUP) => {
+ let msg = self.extract_msg_body::<VhostUserSharedMsg>(&hdr, size, &buf)?;
+ self.backend
+ .shared_object_lookup(&msg, &files.unwrap()[0])
+ .map_err(Error::ReqHandlerError)
+ }
+ _ => Err(Error::InvalidMessage),
+ };
+
+ self.send_ack_message(&hdr, &res)?;
+
+ res
+ }
+
+ fn check_state(&self) -> Result<()> {
+ match self.error {
+ Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))),
+ None => Ok(()),
+ }
+ }
+
+ fn check_msg_size(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ size: usize,
+ expected: usize,
+ ) -> Result<()> {
+ if hdr.get_size() as usize != expected
+ || hdr.is_reply()
+ || hdr.get_version() != 0x1
+ || size != expected
+ {
+ return Err(Error::InvalidMessage);
+ }
+ Ok(())
+ }
+
+ fn check_attached_files(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ files: &Option<Vec<File>>,
+ ) -> Result<()> {
+ match hdr.get_code() {
+ Ok(BackendReq::SHARED_OBJECT_LOOKUP) => {
+ // Expect a single file is passed.
+ match files {
+ Some(files) if files.len() == 1 => Ok(()),
+ _ => Err(Error::InvalidMessage),
+ }
+ }
+ _ if files.is_some() => Err(Error::InvalidMessage),
+ _ => Ok(()),
+ }
+ }
+
+ fn extract_msg_body<T: Sized + VhostUserMsgValidator>(
+ &self,
+ hdr: &VhostUserMsgHeader<BackendReq>,
+ size: usize,
+ buf: &[u8],
+ ) -> Result<T> {
+ self.check_msg_size(hdr, size, mem::size_of::<T>())?;
+ // SAFETY: Safe because we checked that `buf` size is equal to T size.
+ let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) };
+ if !msg.is_valid() {
+ return Err(Error::InvalidMessage);
+ }
+ Ok(msg)
+ }
+
+ fn new_reply_header<T: Sized>(
+ &self,
+ req: &VhostUserMsgHeader<BackendReq>,
+ ) -> Result<VhostUserMsgHeader<BackendReq>> {
+ if mem::size_of::<T>() > MAX_MSG_SIZE {
+ return Err(Error::InvalidParam);
+ }
+ self.check_state()?;
+ Ok(VhostUserMsgHeader::new(
+ req.get_code()?,
+ VhostUserHeaderFlag::REPLY.bits(),
+ mem::size_of::<T>() as u32,
+ ))
+ }
+
+ fn send_ack_message(
+ &mut self,
+ req: &VhostUserMsgHeader<BackendReq>,
+ res: &Result<u64>,
+ ) -> Result<()> {
+ if self.reply_ack_negotiated && req.is_need_reply() {
+ let hdr = self.new_reply_header::<VhostUserU64>(req)?;
+ let def_err = libc::EINVAL;
+ let val = match res {
+ Ok(n) => *n,
+ Err(e) => match e {
+ Error::ReqHandlerError(ioerr) => match ioerr.raw_os_error() {
+ Some(rawerr) => -rawerr as u64,
+ None => -def_err as u64,
+ },
+ _ => -def_err as u64,
+ },
+ };
+ let msg = VhostUserU64::new(val);
+ self.sub_sock.send_message(&hdr, &msg, None)?;
+ }
+ Ok(())
+ }
+}
+
+impl<S: VhostUserFrontendReqHandler> AsRawFd for FrontendReqHandler<S> {
+ fn as_raw_fd(&self) -> RawFd {
+ self.sub_sock.as_raw_fd()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ use std::collections::HashSet;
+
+ use uuid::Uuid;
+
+ #[cfg(feature = "vhost-user-backend")]
+ use crate::vhost_user::Backend;
+ #[cfg(feature = "vhost-user-backend")]
+ use std::os::unix::io::FromRawFd;
+
+ struct MockFrontendReqHandler {
+ shared_objects: HashSet<Uuid>,
+ }
+
+ impl MockFrontendReqHandler {
+ fn new() -> Self {
+ Self {
+ shared_objects: HashSet::new(),
+ }
+ }
+ }
+
+ impl VhostUserFrontendReqHandlerMut for MockFrontendReqHandler {
+ fn shared_object_add(&mut self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Ok(!self.shared_objects.insert(uuid.uuid) as u64)
+ }
+
+ fn shared_object_remove(&mut self, uuid: &VhostUserSharedMsg) -> HandlerResult<u64> {
+ Ok(!self.shared_objects.remove(&uuid.uuid) as u64)
+ }
+
+ fn shared_object_lookup(
+ &mut self,
+ uuid: &VhostUserSharedMsg,
+ _fd: &dyn AsRawFd,
+ ) -> HandlerResult<u64> {
+ if self.shared_objects.contains(&uuid.uuid) {
+ return Ok(0);
+ }
+ Ok(1)
+ }
+ }
+
+ #[test]
+ fn test_new_frontend_req_handler() {
+ let backend = Arc::new(Mutex::new(MockFrontendReqHandler::new()));
+ let mut handler = FrontendReqHandler::new(backend).unwrap();
+
+ assert!(handler.get_tx_raw_fd() >= 0);
+ assert!(handler.as_raw_fd() >= 0);
+ handler.check_state().unwrap();
+
+ assert_eq!(handler.error, None);
+ handler.set_failed(libc::EAGAIN);
+ assert_eq!(handler.error, Some(libc::EAGAIN));
+ handler.check_state().unwrap_err();
+ }
+
+ #[cfg(feature = "vhost-user-backend")]
+ #[test]
+ fn test_frontend_backend_req_handler() {
+ let backend = Arc::new(Mutex::new(MockFrontendReqHandler::new()));
+ let mut handler = FrontendReqHandler::new(backend).unwrap();
+
+ // SAFETY: Safe because `handler` contains valid fds, and we are
+ // checking if `dup` returns a valid fd.
+ let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) };
+ if fd < 0 {
+ panic!("failed to duplicated tx fd!");
+ }
+ // SAFETY: Safe because we checked if fd is valid.
+ let stream = unsafe { UnixStream::from_raw_fd(fd) };
+ let backend = Backend::from_stream(stream);
+
+ let frontend_handler = std::thread::spawn(move || {
+ // Testing shared object messages.
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ });
+
+ backend.set_shared_object_flag(true);
+
+ let shobj_msg = VhostUserSharedMsg {
+ uuid: Uuid::new_v4(),
+ };
+ assert!(backend.shared_object_add(&shobj_msg).is_ok());
+ assert!(backend.shared_object_add(&shobj_msg).is_ok());
+ assert!(backend.shared_object_lookup(&shobj_msg, &fd).is_ok());
+ assert!(backend
+ .shared_object_lookup(
+ &VhostUserSharedMsg {
+ uuid: Uuid::new_v4(),
+ },
+ &fd,
+ )
+ .is_ok());
+ assert!(backend.shared_object_remove(&shobj_msg).is_ok());
+ assert!(backend.shared_object_remove(&shobj_msg).is_ok());
+ // Ensure that the handler thread did not panic.
+ assert!(frontend_handler.join().is_ok());
+ }
+
+ #[cfg(feature = "vhost-user-backend")]
+ #[test]
+ fn test_frontend_backend_req_handler_with_ack() {
+ let backend = Arc::new(Mutex::new(MockFrontendReqHandler::new()));
+ let mut handler = FrontendReqHandler::new(backend).unwrap();
+ handler.set_reply_ack_flag(true);
+
+ // SAFETY: Safe because `handler` contains valid fds, and we are
+ // checking if `dup` returns a valid fd.
+ let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) };
+ if fd < 0 {
+ panic!("failed to duplicated tx fd!");
+ }
+ // SAFETY: Safe because we checked if fd is valid.
+ let stream = unsafe { UnixStream::from_raw_fd(fd) };
+ let backend = Backend::from_stream(stream);
+
+ let frontend_handler = std::thread::spawn(move || {
+ // Testing shared object messages.
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ assert_eq!(handler.handle_request().unwrap(), 0);
+ assert_eq!(handler.handle_request().unwrap(), 1);
+ });
+
+ backend.set_reply_ack_flag(true);
+ backend.set_shared_object_flag(true);
+
+ let shobj_msg = VhostUserSharedMsg {
+ uuid: Uuid::new_v4(),
+ };
+ assert!(backend.shared_object_add(&shobj_msg).is_ok());
+ assert!(backend.shared_object_add(&shobj_msg).is_err());
+ assert!(backend.shared_object_lookup(&shobj_msg, &fd).is_ok());
+ assert!(backend
+ .shared_object_lookup(
+ &VhostUserSharedMsg {
+ uuid: Uuid::new_v4(),
+ },
+ &fd,
+ )
+ .is_err());
+ assert!(backend.shared_object_remove(&shobj_msg).is_ok());
+ assert!(backend.shared_object_remove(&shobj_msg).is_err());
+ // Ensure that the handler thread did not panic.
+ assert!(frontend_handler.join().is_ok());
+ }
+}
diff --git a/crates/vhost/src/vhost_user/gpu_backend_req.rs b/crates/vhost/src/vhost_user/gpu_backend_req.rs
new file mode 100644
index 0000000..776e94d
--- /dev/null
+++ b/crates/vhost/src/vhost_user/gpu_backend_req.rs
@@ -0,0 +1,654 @@
+// Copyright (C) 2024 Red Hat, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::os::fd::{AsRawFd, RawFd};
+use std::os::unix::net::UnixStream;
+use std::sync::{Arc, Mutex, MutexGuard};
+use std::{io, mem, slice};
+
+use vm_memory::ByteValued;
+
+use crate::vhost_user;
+use crate::vhost_user::connection::Endpoint;
+use crate::vhost_user::gpu_message::*;
+use crate::vhost_user::message::{VhostUserEmpty, VhostUserMsgValidator, VhostUserU64};
+use crate::vhost_user::Error;
+
+struct BackendInternal {
+ sock: Endpoint<VhostUserGpuMsgHeader<GpuBackendReq>>,
+ // whether the endpoint has encountered any failure
+ error: Option<i32>,
+}
+
+fn io_err_convert_fn(info: &str) -> impl Fn(vhost_user::Error) -> io::Error + '_ {
+ move |e| io::Error::new(io::ErrorKind::Other, format!("{info}: {e}"))
+}
+
+impl BackendInternal {
+ fn check_state(&self) -> io::Result<u64> {
+ match self.error {
+ Some(e) => Err(io_err_convert_fn("check_state")(Error::SocketBroken(
+ io::Error::from_raw_os_error(e),
+ ))),
+ None => Ok(0),
+ }
+ }
+
+ fn send_header(
+ &mut self,
+ request: GpuBackendReq,
+ fds: Option<&[RawFd]>,
+ ) -> io::Result<VhostUserGpuMsgHeader<GpuBackendReq>> {
+ self.check_state()?;
+ let hdr = VhostUserGpuMsgHeader::new(request, 0, 0);
+ self.sock
+ .send_header(&hdr, fds)
+ .map_err(io_err_convert_fn("send_header"))?;
+ Ok(hdr)
+ }
+
+ fn send_message<T: ByteValued>(
+ &mut self,
+ request: GpuBackendReq,
+ body: &T,
+ fds: Option<&[RawFd]>,
+ ) -> io::Result<VhostUserGpuMsgHeader<GpuBackendReq>> {
+ self.check_state()?;
+
+ let len = mem::size_of::<T>();
+ let hdr = VhostUserGpuMsgHeader::new(request, 0, len as u32);
+ self.sock
+ .send_message(&hdr, body, fds)
+ .map_err(io_err_convert_fn("send_message"))?;
+ Ok(hdr)
+ }
+
+ fn send_message_with_payload<T: ByteValued>(
+ &mut self,
+ request: GpuBackendReq,
+ body: &T,
+ data: &[u8],
+ fds: Option<&[RawFd]>,
+ ) -> io::Result<VhostUserGpuMsgHeader<GpuBackendReq>> {
+ self.check_state()?;
+
+ let len = mem::size_of::<T>() + data.len();
+ let hdr = VhostUserGpuMsgHeader::new(request, 0, len as u32);
+ self.sock
+ .send_message_with_payload(&hdr, body, data, fds)
+ .map_err(io_err_convert_fn("send_message_with_payload"))?;
+ Ok(hdr)
+ }
+
+ // Note that there is no VHOST_USER_PROTOCOL_F_REPLY_ACK for this protocol, some messages always
+ // expect a reply/ack and others don't expect a reply/ack at all.
+ fn recv_reply<V: ByteValued + Sized + Default + VhostUserMsgValidator>(
+ &mut self,
+ hdr: &VhostUserGpuMsgHeader<GpuBackendReq>,
+ ) -> io::Result<V> {
+ self.check_state()?;
+ let (reply, body, rfds) = self
+ .sock
+ .recv_body::<V>()
+ .map_err(io_err_convert_fn("recv_body"))?;
+ if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
+ return Err(io_err_convert_fn("Unexpected reply")(Error::InvalidMessage));
+ }
+ Ok(body)
+ }
+}
+
+/// Proxy for sending messages from the backend to the fronted
+/// over the socket obtained from VHOST_USER_GPU_SET_SOCKET.
+/// The protocol is documented here: https://www.qemu.org/docs/master/interop/vhost-user-gpu.html
+#[derive(Clone)]
+pub struct GpuBackend {
+ // underlying Unix domain socket for communication
+ node: Arc<Mutex<BackendInternal>>,
+}
+
+impl GpuBackend {
+ fn new(ep: Endpoint<VhostUserGpuMsgHeader<GpuBackendReq>>) -> Self {
+ Self {
+ node: Arc::new(Mutex::new(BackendInternal {
+ sock: ep,
+ error: None,
+ })),
+ }
+ }
+
+ fn node(&self) -> MutexGuard<BackendInternal> {
+ self.node.lock().unwrap()
+ }
+
+ /// Send the VHOST_USER_GPU_GET_PROTOCOL_FEATURES message to the frontend and wait for a reply.
+ /// Get the supported protocol features bitmask.
+ pub fn get_protocol_features(&self) -> io::Result<VhostUserU64> {
+ let mut node = self.node();
+
+ let hdr = node.send_header(GpuBackendReq::GET_PROTOCOL_FEATURES, None)?;
+ node.recv_reply(&hdr)
+ }
+
+ /// Send the VHOST_USER_GPU_SET_PROTOCOL_FEATURES message to the frontend. Doesn't wait for
+ /// a reply.
+ /// Enable protocol features using a bitmask.
+ pub fn set_protocol_features(&self, msg: &VhostUserU64) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message(GpuBackendReq::SET_PROTOCOL_FEATURES, msg, None)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_GET_DISPLAY_INFO message to the frontend and wait for a reply.
+ /// Get the preferred display configuration.
+ pub fn get_display_info(&self) -> io::Result<VirtioGpuRespDisplayInfo> {
+ let mut node = self.node();
+
+ let hdr = node.send_header(GpuBackendReq::GET_DISPLAY_INFO, None)?;
+ node.recv_reply(&hdr)
+ }
+
+ /// Send the VHOST_USER_GPU_GET_EDID message to the frontend and wait for a reply.
+ /// Retrieve the EDID data for a given scanout.
+ /// This message requires the VHOST_USER_GPU_PROTOCOL_F_EDID protocol feature to be supported.
+ pub fn get_edid(&self, get_edid: &VhostUserGpuEdidRequest) -> io::Result<VirtioGpuRespGetEdid> {
+ let mut node = self.node();
+
+ let hdr = node.send_message(GpuBackendReq::GET_EDID, get_edid, None)?;
+ node.recv_reply(&hdr)
+ }
+
+ /// Send the VHOST_USER_GPU_SCANOUT message to the frontend. Doesn't wait for a reply.
+ /// Set the scanout resolution. To disable a scanout, the dimensions width/height are set to 0.
+ pub fn set_scanout(&self, scanout: &VhostUserGpuScanout) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message(GpuBackendReq::SCANOUT, scanout, None)?;
+ Ok(())
+ }
+
+ /// Sends the VHOST_USER_GPU_UPDATE message to the frontend. Doesn't wait for a reply.
+ /// Updates the scanout content. The data payload contains the graphical bits.
+ /// The display should be flushed and presented.
+ pub fn update_scanout(&self, update: &VhostUserGpuUpdate, data: &[u8]) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message_with_payload(GpuBackendReq::UPDATE, update, data, None)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_DMABUF_SCANOUT message to the frontend. Doesn't wait for a reply.
+ /// Set the scanout resolution/configuration, and share a DMABUF file descriptor for the scanout
+ /// content, which is passed as ancillary data. To disable a scanout, the dimensions
+ /// width/height are set to 0, there is no file descriptor passed.
+ pub fn set_dmabuf_scanout(
+ &self,
+ scanout: &VhostUserGpuDMABUFScanout,
+ fd: Option<&impl AsRawFd>,
+ ) -> io::Result<()> {
+ let mut node = self.node();
+
+ let fd = fd.map(AsRawFd::as_raw_fd);
+ let fd = fd.as_ref().map(slice::from_ref);
+ node.send_message(GpuBackendReq::DMABUF_SCANOUT, scanout, fd)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_DMABUF_SCANOUT2 message to the frontend. Doesn't wait for a reply.
+ /// Same as `set_dmabuf_scanout` (VHOST_USER_GPU_DMABUF_SCANOUT), but also sends the dmabuf
+ /// modifiers appended to the message, which were not provided in the other message. This
+ /// message requires the VhostUserGpuProtocolFeatures::DMABUF2
+ /// (VHOST_USER_GPU_PROTOCOL_F_DMABUF2) protocol feature to be supported.
+ pub fn set_dmabuf_scanout2(
+ &self,
+ scanout: &VhostUserGpuDMABUFScanout2,
+ fd: Option<&impl AsRawFd>,
+ ) -> io::Result<()> {
+ let mut node = self.node();
+
+ let fd = fd.map(AsRawFd::as_raw_fd);
+ let fd = fd.as_ref().map(slice::from_ref);
+ node.send_message(GpuBackendReq::DMABUF_SCANOUT2, scanout, fd)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_DMABUF_UPDATE message to the frontend and wait for acknowledgment.
+ /// The display should be flushed and presented according to updated region
+ /// from VhostUserGpuUpdate.
+ pub fn update_dmabuf_scanout(&self, update: &VhostUserGpuUpdate) -> io::Result<()> {
+ let mut node = self.node();
+
+ let hdr = node.send_message(GpuBackendReq::DMABUF_UPDATE, update, None)?;
+ let _: VhostUserEmpty = node.recv_reply(&hdr)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_CURSOR_POS message to the frontend. Doesn't wait for a reply.
+ /// Set/show the cursor position.
+ pub fn cursor_pos(&self, cursor_pos: &VhostUserGpuCursorPos) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message(GpuBackendReq::CURSOR_POS, cursor_pos, None)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_CURSOR_POS_HIDE message to the frontend. Doesn't wait for a reply.
+ /// Set/hide the cursor.
+ pub fn cursor_pos_hide(&self, cursor_pos: &VhostUserGpuCursorPos) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message(GpuBackendReq::CURSOR_POS_HIDE, cursor_pos, None)?;
+ Ok(())
+ }
+
+ /// Send the VHOST_USER_GPU_CURSOR_UPDATE message to the frontend. Doesn't wait for a reply.
+ /// Update the cursor shape and location.
+ /// `data` represents a 64*64 cursor image (PIXMAN_x8r8g8b8 format).
+ pub fn cursor_update(
+ &self,
+ cursor_update: &VhostUserGpuCursorUpdate,
+ data: &[u8; 4 * 64 * 64],
+ ) -> io::Result<()> {
+ let mut node = self.node();
+
+ node.send_message_with_payload(GpuBackendReq::CURSOR_UPDATE, cursor_update, data, None)?;
+ Ok(())
+ }
+
+ /// Create a new instance from a `UnixStream` object.
+ pub fn from_stream(sock: UnixStream) -> Self {
+ Self::new(Endpoint::<VhostUserGpuMsgHeader<GpuBackendReq>>::from_stream(sock))
+ }
+
+ /// Mark endpoint as failed with specified error code.
+ pub fn set_failed(&self, error: i32) {
+ self.node().error = Some(error);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use libc::STDOUT_FILENO;
+ use std::mem::{size_of, size_of_val};
+ use std::thread;
+ use std::time::Duration;
+
+ const TEST_DMABUF_SCANOUT_REQUEST: VhostUserGpuDMABUFScanout = VhostUserGpuDMABUFScanout {
+ scanout_id: 1,
+ x: 0,
+ y: 0,
+ width: 1920,
+ height: 1080,
+ fd_width: 1920,
+ fd_height: 1080,
+ fd_stride: 0,
+ fd_flags: 0,
+ fd_drm_fourcc: 0,
+ };
+ const TEST_CURSOR_POS_REQUEST: VhostUserGpuCursorPos = VhostUserGpuCursorPos {
+ scanout_id: 1,
+ x: 31,
+ y: 102,
+ };
+
+ fn frontend_backend_pair() -> (Endpoint<VhostUserGpuMsgHeader<GpuBackendReq>>, GpuBackend) {
+ let (backend, frontend) = UnixStream::pair().unwrap();
+ let backend = GpuBackend::from_stream(backend);
+ let frontend = Endpoint::from_stream(frontend);
+
+ (frontend, backend)
+ }
+
+ fn assert_hdr(
+ hdr: &VhostUserGpuMsgHeader<GpuBackendReq>,
+ expected_req_code: GpuBackendReq,
+ expected_size: usize,
+ ) {
+ let size: u32 = expected_size.try_into().unwrap();
+ assert_eq!(hdr, &VhostUserGpuMsgHeader::new(expected_req_code, 0, size));
+ }
+
+ fn reply_with_msg<R>(
+ frontend: &mut Endpoint<VhostUserGpuMsgHeader<GpuBackendReq>>,
+ req_hdr: &VhostUserGpuMsgHeader<GpuBackendReq>,
+ reply_body: &R,
+ ) where
+ R: ByteValued,
+ {
+ let response_hdr = VhostUserGpuMsgHeader::new(
+ req_hdr.get_code().unwrap(),
+ VhostUserGpuHeaderFlag::REPLY.bits(),
+ size_of::<R>() as u32,
+ );
+
+ frontend
+ .send_message(&response_hdr, reply_body, None)
+ .unwrap();
+ }
+
+ #[test]
+ fn test_gpu_backend_req_set_failed() {
+ let (p1, _p2) = UnixStream::pair().unwrap();
+ let backend = GpuBackend::from_stream(p1);
+ assert!(backend.node().error.is_none());
+ backend.set_failed(libc::EAGAIN);
+ assert_eq!(backend.node().error, Some(libc::EAGAIN));
+ }
+
+ #[test]
+ fn test_get_display_info() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let expected_response = {
+ let mut resp = VirtioGpuRespDisplayInfo {
+ hdr: Default::default(),
+ pmodes: Default::default(),
+ };
+ resp.pmodes[0] = VirtioGpuDisplayOne {
+ r: VirtioGpuRect {
+ x: 0,
+ y: 0,
+ width: 640,
+ height: 480,
+ },
+ enabled: 1,
+ flags: 0,
+ };
+ resp
+ };
+
+ let sender_thread = thread::spawn(move || {
+ let response = backend.get_display_info().unwrap();
+ assert_eq!(response, expected_response);
+ });
+
+ let (hdr, fds) = frontend.recv_header().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(&hdr, GpuBackendReq::GET_DISPLAY_INFO, 0);
+
+ reply_with_msg(&mut frontend, &hdr, &expected_response);
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_get_edid_info() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let expected_response = VirtioGpuRespGetEdid {
+ hdr: Default::default(),
+ size: 512,
+ padding: 0,
+ edid: [1u8; 1024],
+ };
+ let request = VhostUserGpuEdidRequest { scanout_id: 1 };
+
+ let sender_thread = thread::spawn(move || {
+ let response = backend.get_edid(&request).unwrap();
+ assert_eq!(response, expected_response);
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuEdidRequest>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(&hdr, GpuBackendReq::GET_EDID, size_of_val(&request));
+ assert_eq!(req_body, request);
+
+ reply_with_msg(&mut frontend, &hdr, &expected_response);
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_scanout() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = VhostUserGpuScanout {
+ scanout_id: 1,
+ width: 1920,
+ height: 1080,
+ };
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.set_scanout(&request).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuScanout>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(&hdr, GpuBackendReq::SCANOUT, size_of_val(&request));
+ assert_eq!(req_body, request);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_update_scanout() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = VhostUserGpuUpdate {
+ scanout_id: 1,
+ x: 30,
+ y: 40,
+ width: 10,
+ height: 10,
+ };
+ let payload = [1u8; 4 * 10 * 10];
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.update_scanout(&request, &payload).unwrap();
+ });
+
+ let mut recv_buf = [0u8; 4096];
+ let (hdr, req_body, recv_buf_len, fds) = frontend
+ .recv_payload_into_buf::<VhostUserGpuUpdate>(&mut recv_buf)
+ .unwrap();
+ assert!(fds.is_none());
+ assert_hdr(
+ &hdr,
+ GpuBackendReq::UPDATE,
+ size_of_val(&request) + payload.len(),
+ );
+ assert_eq!(req_body, request);
+
+ assert_eq!(&payload[..], &recv_buf[..recv_buf_len]);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_dmabuf_scanout() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = TEST_DMABUF_SCANOUT_REQUEST;
+
+ let fd: RawFd = STDOUT_FILENO;
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.set_dmabuf_scanout(&request, Some(&fd)).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuDMABUFScanout>().unwrap();
+
+ assert!(fds.is_some_and(|fds| fds.len() == 1));
+ assert_hdr(&hdr, GpuBackendReq::DMABUF_SCANOUT, size_of_val(&request));
+ assert_eq!(req_body, request);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_update_dmabuf_scanout() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = VhostUserGpuUpdate {
+ scanout_id: 1,
+ x: 30,
+ y: 40,
+ width: 10,
+ height: 10,
+ };
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.update_dmabuf_scanout(&request).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuUpdate>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(&hdr, GpuBackendReq::DMABUF_UPDATE, size_of_val(&request));
+ assert_eq!(req_body, request);
+
+ // let's check if update_dmabuf_scanout blocks
+ // The 100ms should be enough for the thread to write to a socket and quit.
+ // (worst case on slow computer is that this test succeeds even though it should have failed)
+ thread::sleep(Duration::from_millis(100));
+ assert!(
+ !sender_thread.is_finished(),
+ "update_dmabuf_scanout is supposed to block until it receives an empty reply"
+ );
+
+ // send ack
+ reply_with_msg(&mut frontend, &hdr, &VhostUserEmpty);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_get_protocol_features() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let expected_value = VhostUserU64::new(
+ (VhostUserGpuProtocolFeatures::DMABUF2 | VhostUserGpuProtocolFeatures::EDID).bits(),
+ );
+
+ let sender_thread = thread::spawn(move || {
+ let response: VhostUserU64 = backend.get_protocol_features().unwrap();
+ assert_eq!(response.value, expected_value.value)
+ });
+
+ let (hdr, fds) = frontend.recv_header().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(&hdr, GpuBackendReq::GET_PROTOCOL_FEATURES, 0);
+
+ reply_with_msg(&mut frontend, &hdr, &expected_value);
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_protocol_features() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let expected_value = VhostUserU64::new(
+ (VhostUserGpuProtocolFeatures::DMABUF2 | VhostUserGpuProtocolFeatures::EDID).bits(),
+ );
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.set_protocol_features(&expected_value).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserU64>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(
+ &hdr,
+ GpuBackendReq::SET_PROTOCOL_FEATURES,
+ size_of_val(&expected_value),
+ );
+ assert_eq!(req_body.value, expected_value.value);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_cursor_pos() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.cursor_pos(&TEST_CURSOR_POS_REQUEST).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuCursorPos>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(
+ &hdr,
+ GpuBackendReq::CURSOR_POS,
+ size_of_val(&TEST_CURSOR_POS_REQUEST),
+ );
+ assert_eq!(req_body, TEST_CURSOR_POS_REQUEST);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_cursor_pos_hide() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.cursor_pos_hide(&TEST_CURSOR_POS_REQUEST).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuCursorPos>().unwrap();
+ assert!(fds.is_none());
+ assert_hdr(
+ &hdr,
+ GpuBackendReq::CURSOR_POS_HIDE,
+ size_of_val(&TEST_CURSOR_POS_REQUEST),
+ );
+ assert_eq!(req_body, TEST_CURSOR_POS_REQUEST);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_cursor_update() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = VhostUserGpuCursorUpdate {
+ pos: TEST_CURSOR_POS_REQUEST,
+ hot_x: 30,
+ hot_y: 30,
+ };
+ let payload = [2u8; 4 * 64 * 64];
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.cursor_update(&request, &payload).unwrap();
+ });
+
+ let mut recv_buf = vec![0u8; 1 + size_of_val(&payload)];
+ let (hdr, req_body, recv_buf_len, fds) = frontend
+ .recv_payload_into_buf::<VhostUserGpuCursorUpdate>(&mut recv_buf)
+ .unwrap();
+ assert!(fds.is_none());
+ assert_hdr(
+ &hdr,
+ GpuBackendReq::CURSOR_UPDATE,
+ size_of_val(&request) + payload.len(),
+ );
+ assert_eq!(req_body, request);
+
+ assert_eq!(&payload[..], &recv_buf[..recv_buf_len]);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+
+ #[test]
+ fn test_set_dmabuf_scanout2() {
+ let (mut frontend, backend) = frontend_backend_pair();
+
+ let request = VhostUserGpuDMABUFScanout2 {
+ dmabuf_scanout: TEST_DMABUF_SCANOUT_REQUEST,
+ modifier: 13,
+ };
+
+ let fd: RawFd = STDOUT_FILENO;
+
+ let sender_thread = thread::spawn(move || {
+ let _: () = backend.set_dmabuf_scanout2(&request, Some(&fd)).unwrap();
+ });
+
+ let (hdr, req_body, fds) = frontend.recv_body::<VhostUserGpuDMABUFScanout2>().unwrap();
+
+ assert!(fds.is_some_and(|fds| fds.len() == 1));
+ assert_hdr(&hdr, GpuBackendReq::DMABUF_SCANOUT2, size_of_val(&request));
+ assert_eq!(req_body, request);
+
+ sender_thread.join().expect("Failed to send!");
+ }
+}
diff --git a/crates/vhost/src/vhost_user/gpu_message.rs b/crates/vhost/src/vhost_user/gpu_message.rs
new file mode 100644
index 0000000..7aa1726
--- /dev/null
+++ b/crates/vhost/src/vhost_user/gpu_message.rs
@@ -0,0 +1,423 @@
+// Copyright (C) 2024 Red Hat, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+//! Implementation parts of the protocol on the socket from VHOST_USER_SET_GPU_SOCKET
+//! see: https://www.qemu.org/docs/master/interop/vhost-user-gpu.html
+
+use super::enum_value;
+use crate::vhost_user::message::{MsgHeader, Req, VhostUserMsgValidator};
+use crate::vhost_user::Error;
+
+use std::fmt::Debug;
+use std::marker::PhantomData;
+use vm_memory::ByteValued;
+
+enum_value! {
+ /// Type of requests sending from gpu backends to gpu frontends.
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ #[allow(non_camel_case_types, clippy::upper_case_acronyms)]
+ pub enum GpuBackendReq: u32 {
+ /// Get the supported protocol features bitmask.
+ GET_PROTOCOL_FEATURES = 1,
+ /// Enable protocol features using a bitmask.
+ SET_PROTOCOL_FEATURES = 2,
+ /// Get the preferred display configuration.
+ GET_DISPLAY_INFO = 3,
+ /// Set/show the cursor position.
+ CURSOR_POS = 4,
+ /// Set/hide the cursor.
+ CURSOR_POS_HIDE = 5,
+ /// Update the cursor shape and location.
+ CURSOR_UPDATE = 6,
+ /// Set the scanout resolution.
+ /// To disable a scanout, the dimensions width/height are set to 0.
+ SCANOUT = 7,
+ /// Update the scanout content. The data payload contains the graphical bits.
+ /// The display should be flushed and presented.
+ UPDATE = 8,
+ /// Set the scanout resolution/configuration, and share a DMABUF file descriptor for the
+ /// scanout content, which is passed as ancillary data.
+ /// To disable a scanout, the dimensions width/height are set to 0, there is no file
+ /// descriptor passed.
+ DMABUF_SCANOUT = 9,
+ /// The display should be flushed and presented according to updated region from
+ /// VhostUserGpuUpdate.
+ /// Note: there is no data payload, since the scanout is shared thanks to DMABUF,
+ /// that must have been set previously with VHOST_USER_GPU_DMABUF_SCANOUT.
+ DMABUF_UPDATE = 10,
+ /// Retrieve the EDID data for a given scanout.
+ /// This message requires the VHOST_USER_GPU_PROTOCOL_F_EDID protocol feature to be
+ /// supported.
+ GET_EDID = 11,
+ /// Same as DMABUF_SCANOUT, but also sends the dmabuf modifiers appended to the message,
+ /// which were not provided in the other message.
+ /// This message requires the VHOST_USER_GPU_PROTOCOL_F_DMABUF2 protocol feature to be
+ /// supported.
+ DMABUF_SCANOUT2 = 12,
+ }
+}
+
+impl Req for GpuBackendReq {}
+
+// Bit mask for common message flags.
+bitflags! {
+ /// Common message flags for vhost-user requests and replies.
+ pub struct VhostUserGpuHeaderFlag: u32 {
+ /// Mark message as reply.
+ const REPLY = 0x4;
+ }
+}
+
+/// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the
+/// machine native byte order.
+#[repr(C, packed)]
+#[derive(Clone, Copy)]
+pub(super) struct VhostUserGpuMsgHeader<R: Req> {
+ request: u32,
+ flags: u32,
+ size: u32,
+ _r: PhantomData<R>,
+}
+
+impl<R: Req> Debug for VhostUserGpuMsgHeader<R> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("VhostUserMsgHeader")
+ .field("request", &{ self.request })
+ .field("flags", &{ self.flags })
+ .field("size", &{ self.size })
+ .finish()
+ }
+}
+
+impl<R: Req> PartialEq for VhostUserGpuMsgHeader<R> {
+ fn eq(&self, other: &Self) -> bool {
+ self.request == other.request && self.flags == other.flags && self.size == other.size
+ }
+}
+
+#[allow(dead_code)]
+impl<R: Req> VhostUserGpuMsgHeader<R> {
+ /// Create a new instance of `VhostUserMsgHeader`.
+ pub fn new(request: R, flags: u32, size: u32) -> Self {
+ VhostUserGpuMsgHeader {
+ request: request.into(),
+ flags,
+ size,
+ _r: PhantomData,
+ }
+ }
+
+ /// Get message type.
+ pub fn get_code(&self) -> crate::vhost_user::Result<R> {
+ R::try_from(self.request).map_err(|_| Error::InvalidMessage)
+ }
+
+ /// Check whether it's a reply message.
+ pub fn is_reply(&self) -> bool {
+ (self.flags & VhostUserGpuHeaderFlag::REPLY.bits()) != 0
+ }
+
+ /// Mark message as reply.
+ pub fn set_reply(&mut self, is_reply: bool) {
+ if is_reply {
+ self.flags |= VhostUserGpuHeaderFlag::REPLY.bits();
+ } else {
+ self.flags &= !VhostUserGpuHeaderFlag::REPLY.bits();
+ }
+ }
+
+ /// Check whether it's the reply message for the request `req`.
+ pub fn is_reply_for(&self, req: &VhostUserGpuMsgHeader<R>) -> bool {
+ if let (Ok(code1), Ok(code2)) = (self.get_code(), req.get_code()) {
+ self.is_reply() && !req.is_reply() && code1 == code2
+ } else {
+ false
+ }
+ }
+
+ /// Get message size.
+ pub fn get_size(&self) -> u32 {
+ self.size
+ }
+
+ /// Set message size.
+ pub fn set_size(&mut self, size: u32) {
+ self.size = size;
+ }
+}
+
+impl<R: Req> Default for VhostUserGpuMsgHeader<R> {
+ fn default() -> Self {
+ VhostUserGpuMsgHeader {
+ request: 0,
+ flags: 0,
+ size: 0,
+ _r: PhantomData,
+ }
+ }
+}
+
+// SAFETY: Safe because all fields of VhostUserGpuMsgHeader are POD.
+unsafe impl<R: Req> ByteValued for VhostUserGpuMsgHeader<R> {}
+
+impl<T: Req> VhostUserMsgValidator for VhostUserGpuMsgHeader<T> {
+ fn is_valid(&self) -> bool {
+ self.get_code().is_ok() && VhostUserGpuHeaderFlag::from_bits(self.flags).is_some()
+ }
+}
+
+impl<R: Req> MsgHeader for VhostUserGpuMsgHeader<R> {
+ type Request = R;
+ const MAX_MSG_SIZE: usize = u32::MAX as usize;
+}
+
+// Bit mask for vhost-user-gpu protocol feature flags.
+bitflags! {
+ #[derive(Copy, Clone, Debug, Eq, PartialEq)]
+ /// Vhost-user-gpu protocol feature flags from the vhost-user-gpu specification.
+ pub struct VhostUserGpuProtocolFeatures: u64 {
+ /// Frontend support for EDID
+ const EDID = 0;
+ /// Frontend support for DMABUF_SCANOUT2
+ const DMABUF2 = 1;
+ }
+}
+
+/// The virtio_gpu_ctrl_hdr from virtio specification
+/// Defined here because some GpuBackend commands return virtio structs, which contain this header.
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+#[repr(C)]
+pub struct VirtioGpuCtrlHdr {
+ /// Specifies the type of the driver request (VIRTIO_GPU_CMD_*)
+ /// or device response (VIRTIO_GPU_RESP_*).
+ pub type_: u32,
+ /// Request / response flags.
+ pub flags: u32,
+ /// Set VIRTIO_GPU_FLAG_FENCE bit in the response
+ pub fence_id: u64,
+ /// Rendering context (used in 3D mode only).
+ pub ctx_id: u32,
+ /// ring_idx indicates the value of a context-specific ring index.
+ /// The minimum value is 0 and maximum value is 63 (inclusive).
+ pub ring_idx: u8,
+ /// padding of the structure
+ pub padding: [u8; 3],
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VirtioGpuCtrlHdr {}
+
+/// The virtio_gpu_rect struct from virtio specification.
+/// Part of the reply for GpuBackend::get_display_info
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+#[repr(C)]
+pub struct VirtioGpuRect {
+ /// The position field x describes how the displays are arranged
+ pub x: u32,
+ /// The position field y describes how the displays are arranged
+ pub y: u32,
+ /// Display resolution width
+ pub width: u32,
+ /// Display resolution height
+ pub height: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VirtioGpuRect {}
+
+/// The virtio_gpu_display_one struct from virtio specification.
+/// Part of the reply for GpuBackend::get_display_info
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+#[repr(C)]
+pub struct VirtioGpuDisplayOne {
+ /// Preferred display resolutions and display positions relative to each other
+ pub r: VirtioGpuRect,
+ /// The enabled field is set when the user enabled the display.
+ pub enabled: u32,
+ /// The display flags
+ pub flags: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VirtioGpuDisplayOne {}
+
+/// Constant for maximum number of scanouts, defined in the virtio specification.
+pub const VIRTIO_GPU_MAX_SCANOUTS: usize = 16;
+
+/// The virtio_gpu_resp_display_info from the virtio specification.
+/// This it the reply from GpuBackend::get_display_info
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
+#[repr(C)]
+pub struct VirtioGpuRespDisplayInfo {
+ /// The fixed header struct
+ pub hdr: VirtioGpuCtrlHdr,
+ /// pmodes contains whether the scanout is enabled and what
+ /// its preferred position and size is
+ pub pmodes: [VirtioGpuDisplayOne; VIRTIO_GPU_MAX_SCANOUTS],
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VirtioGpuRespDisplayInfo {}
+
+impl VhostUserMsgValidator for VirtioGpuRespDisplayInfo {}
+
+/// The VhostUserGpuEdidRequest from the vhost-user-gpu specification.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuEdidRequest {
+ /// The id of the scanout to retrieve EDID data for
+ pub scanout_id: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuEdidRequest {}
+
+impl VhostUserMsgValidator for VhostUserGpuEdidRequest {}
+
+/// The VhostUserGpuUpdate from the vhost-user-gpu specification.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuUpdate {
+ /// The id of the scanout that is being updated
+ pub scanout_id: u32,
+ /// The x coordinate of the region to update
+ pub x: u32,
+ /// The y coordinate of the region to update
+ pub y: u32,
+ /// The width of the region to update
+ pub width: u32,
+ /// The height of the region to update
+ pub height: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuUpdate {}
+
+impl VhostUserMsgValidator for VhostUserGpuUpdate {}
+
+/// The VhostUserGpuDMABUFScanout from the vhost-user-gpu specification.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuDMABUFScanout {
+ /// The id of the scanout to update
+ pub scanout_id: u32,
+ /// The position field x of the scanout within the DMABUF
+ pub x: u32,
+ /// The position field y of the scanout within the DMABUF
+ pub y: u32,
+ /// Scanout width size
+ pub width: u32,
+ /// Scanout height size
+ pub height: u32,
+ /// The DMABUF width
+ pub fd_width: u32,
+ /// The DMABUF height
+ pub fd_height: u32,
+ /// The DMABUF stride
+ pub fd_stride: u32,
+ /// The DMABUF flags
+ pub fd_flags: u32,
+ /// The DMABUF fourcc
+ pub fd_drm_fourcc: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuDMABUFScanout {}
+
+impl VhostUserMsgValidator for VhostUserGpuDMABUFScanout {}
+
+/// The VhostUserGpuDMABUFScanout2 from the vhost-user-gpu specification.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+#[repr(C, packed)]
+pub struct VhostUserGpuDMABUFScanout2 {
+ /// The dmabuf scanout parameters
+ pub dmabuf_scanout: VhostUserGpuDMABUFScanout,
+ /// The DMABUF modifiers
+ pub modifier: u64,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuDMABUFScanout2 {}
+
+impl VhostUserMsgValidator for VhostUserGpuDMABUFScanout2 {}
+
+/// The VhostUserGpuCursorPos from the vhost-user-gpu specification.
+#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuCursorPos {
+ /// The scanout where the cursor is located
+ pub scanout_id: u32,
+ /// The cursor position field x
+ pub x: u32,
+ /// The cursor position field y
+ pub y: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuCursorPos {}
+
+impl VhostUserMsgValidator for VhostUserGpuCursorPos {}
+
+/// The VhostUserGpuCursorUpdate from the vhost-user-gpu specification.
+#[derive(Default, Copy, Clone, Debug, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuCursorUpdate {
+ /// The cursor location
+ pub pos: VhostUserGpuCursorPos,
+ /// The cursor hot location x
+ pub hot_x: u32,
+ /// The cursor hot location y
+ pub hot_y: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuCursorUpdate {}
+
+impl VhostUserMsgValidator for VhostUserGpuCursorUpdate {}
+
+/// The virtio_gpu_resp_edid struct from the virtio specification.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+#[repr(C)]
+pub struct VirtioGpuRespGetEdid {
+ /// The fixed header struct
+ pub hdr: VirtioGpuCtrlHdr,
+ /// The actual size of the `edid` field.
+ pub size: u32,
+ /// Padding of the structure
+ pub padding: u32,
+ /// The EDID display data blob (as specified by VESA) for the scanout.
+ pub edid: [u8; 1024],
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VirtioGpuRespGetEdid {}
+
+impl Default for VirtioGpuRespGetEdid {
+ fn default() -> Self {
+ VirtioGpuRespGetEdid {
+ hdr: VirtioGpuCtrlHdr::default(),
+ size: u32::default(),
+ padding: u32::default(),
+ edid: [0; 1024], // Default value for the edid array (filled with zeros)
+ }
+ }
+}
+
+impl VhostUserMsgValidator for VirtioGpuRespGetEdid {}
+
+/// The VhostUserGpuScanout from the vhost-user-gpu specification.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+#[repr(C)]
+pub struct VhostUserGpuScanout {
+ /// The id of the scanout
+ pub scanout_id: u32,
+ /// The scanout width
+ pub width: u32,
+ /// The scanout height
+ pub height: u32,
+}
+
+// SAFETY: Safe because all fields are POD.
+unsafe impl ByteValued for VhostUserGpuScanout {}
+
+impl VhostUserMsgValidator for VhostUserGpuScanout {}
diff --git a/crates/vhost/src/vhost_user/master_req_handler.rs b/crates/vhost/src/vhost_user/master_req_handler.rs
deleted file mode 100644
index c9c528b..0000000
--- a/crates/vhost/src/vhost_user/master_req_handler.rs
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-use std::fs::File;
-use std::mem;
-use std::os::unix::io::{AsRawFd, RawFd};
-use std::os::unix::net::UnixStream;
-use std::sync::{Arc, Mutex};
-
-use super::connection::Endpoint;
-use super::message::*;
-use super::{Error, HandlerResult, Result};
-
-/// Define services provided by masters for the slave communication channel.
-///
-/// The vhost-user specification defines a slave communication channel, by which slaves could
-/// request services from masters. The [VhostUserMasterReqHandler] trait defines services provided
-/// by masters, and it's used both on the master side and slave side.
-/// - on the slave side, a stub forwarder implementing [VhostUserMasterReqHandler] will proxy
-/// service requests to masters. The [Slave] is an example stub forwarder.
-/// - on the master side, the [MasterReqHandler] will forward service requests to a handler
-/// implementing [VhostUserMasterReqHandler].
-///
-/// The [VhostUserMasterReqHandler] trait is design with interior mutability to improve performance
-/// for multi-threading.
-///
-/// [VhostUserMasterReqHandler]: trait.VhostUserMasterReqHandler.html
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-/// [Slave]: struct.Slave.html
-pub trait VhostUserMasterReqHandler {
- /// Handle device configuration change notifications.
- fn handle_config_change(&self) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs map file requests.
- fn fs_slave_map(&self, _fs: &VhostUserFSSlaveMsg, _fd: &dyn AsRawFd) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs unmap file requests.
- fn fs_slave_unmap(&self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs sync file requests.
- fn fs_slave_sync(&self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs file IO requests.
- fn fs_slave_io(&self, _fs: &VhostUserFSSlaveMsg, _fd: &dyn AsRawFd) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
- // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: &dyn AsRawFd);
-}
-
-/// A helper trait mirroring [VhostUserMasterReqHandler] but without interior mutability.
-///
-/// [VhostUserMasterReqHandler]: trait.VhostUserMasterReqHandler.html
-pub trait VhostUserMasterReqHandlerMut {
- /// Handle device configuration change notifications.
- fn handle_config_change(&mut self) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs map file requests.
- fn fs_slave_map(&mut self, _fs: &VhostUserFSSlaveMsg, _fd: &dyn AsRawFd) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs unmap file requests.
- fn fs_slave_unmap(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs sync file requests.
- fn fs_slave_sync(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- /// Handle virtio-fs file IO requests.
- fn fs_slave_io(&mut self, _fs: &VhostUserFSSlaveMsg, _fd: &dyn AsRawFd) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
-
- // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb);
- // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawFd);
-}
-
-impl<S: VhostUserMasterReqHandlerMut> VhostUserMasterReqHandler for Mutex<S> {
- fn handle_config_change(&self) -> HandlerResult<u64> {
- self.lock().unwrap().handle_config_change()
- }
-
- fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
- self.lock().unwrap().fs_slave_map(fs, fd)
- }
-
- fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- self.lock().unwrap().fs_slave_unmap(fs)
- }
-
- fn fs_slave_sync(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- self.lock().unwrap().fs_slave_sync(fs)
- }
-
- fn fs_slave_io(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
- self.lock().unwrap().fs_slave_io(fs, fd)
- }
-}
-
-/// Server to handle service requests from slaves from the slave communication channel.
-///
-/// The [MasterReqHandler] acts as a server on the master side, to handle service requests from
-/// slaves on the slave communication channel. It's actually a proxy invoking the registered
-/// handler implementing [VhostUserMasterReqHandler] to do the real work.
-///
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-/// [VhostUserMasterReqHandler]: trait.VhostUserMasterReqHandler.html
-pub struct MasterReqHandler<S: VhostUserMasterReqHandler> {
- // underlying Unix domain socket for communication
- sub_sock: Endpoint<SlaveReq>,
- tx_sock: UnixStream,
- // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
- reply_ack_negotiated: bool,
- // the VirtIO backend device object
- backend: Arc<S>,
- // whether the endpoint has encountered any failure
- error: Option<i32>,
-}
-
-impl<S: VhostUserMasterReqHandler> MasterReqHandler<S> {
- /// Create a server to handle service requests from slaves on the slave communication channel.
- ///
- /// This opens a pair of connected anonymous sockets to form the slave communication channel.
- /// The socket fd returned by [Self::get_tx_raw_fd()] should be sent to the slave by
- /// [VhostUserMaster::set_slave_request_fd()].
- ///
- /// [Self::get_tx_raw_fd()]: struct.MasterReqHandler.html#method.get_tx_raw_fd
- /// [VhostUserMaster::set_slave_request_fd()]: trait.VhostUserMaster.html#tymethod.set_slave_request_fd
- pub fn new(backend: Arc<S>) -> Result<Self> {
- let (tx, rx) = UnixStream::pair().map_err(Error::SocketError)?;
-
- Ok(MasterReqHandler {
- sub_sock: Endpoint::<SlaveReq>::from_stream(rx),
- tx_sock: tx,
- reply_ack_negotiated: false,
- backend,
- error: None,
- })
- }
-
- /// Get the socket fd for the slave to communication with the master.
- ///
- /// The returned fd should be sent to the slave by [VhostUserMaster::set_slave_request_fd()].
- ///
- /// [VhostUserMaster::set_slave_request_fd()]: trait.VhostUserMaster.html#tymethod.set_slave_request_fd
- pub fn get_tx_raw_fd(&self) -> RawFd {
- self.tx_sock.as_raw_fd()
- }
-
- /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
- ///
- /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
- /// the "REPLY_ACK" flag will be set in the message header for every slave to master request
- /// message.
- pub fn set_reply_ack_flag(&mut self, enable: bool) {
- self.reply_ack_negotiated = enable;
- }
-
- /// Mark endpoint as failed or in normal state.
- pub fn set_failed(&mut self, error: i32) {
- if error == 0 {
- self.error = None;
- } else {
- self.error = Some(error);
- }
- }
-
- /// Main entrance to server slave request from the slave communication channel.
- ///
- /// The caller needs to:
- /// - serialize calls to this function
- /// - decide what to do when errer happens
- /// - optional recover from failure
- pub fn handle_request(&mut self) -> Result<u64> {
- // Return error if the endpoint is already in failed state.
- self.check_state()?;
-
- // The underlying communication channel is a Unix domain socket in
- // stream mode, and recvmsg() is a little tricky here. To successfully
- // receive attached file descriptors, we need to receive messages and
- // corresponding attached file descriptors in this way:
- // . recv messsage header and optional attached file
- // . validate message header
- // . recv optional message body and payload according size field in
- // message header
- // . validate message body and optional payload
- let (hdr, files) = self.sub_sock.recv_header()?;
- self.check_attached_files(&hdr, &files)?;
- let (size, buf) = match hdr.get_size() {
- 0 => (0, vec![0u8; 0]),
- len => {
- if len as usize > MAX_MSG_SIZE {
- return Err(Error::InvalidMessage);
- }
- let (size2, rbuf) = self.sub_sock.recv_data(len as usize)?;
- if size2 != len as usize {
- return Err(Error::InvalidMessage);
- }
- (size2, rbuf)
- }
- };
-
- let res = match hdr.get_code() {
- Ok(SlaveReq::CONFIG_CHANGE_MSG) => {
- self.check_msg_size(&hdr, size, 0)?;
- self.backend
- .handle_config_change()
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_MAP) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .fs_slave_map(&msg, &files.unwrap()[0])
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_UNMAP) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- self.backend
- .fs_slave_unmap(&msg)
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_SYNC) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- self.backend
- .fs_slave_sync(&msg)
- .map_err(Error::ReqHandlerError)
- }
- Ok(SlaveReq::FS_IO) => {
- let msg = self.extract_msg_body::<VhostUserFSSlaveMsg>(&hdr, size, &buf)?;
- // check_attached_files() has validated files
- self.backend
- .fs_slave_io(&msg, &files.unwrap()[0])
- .map_err(Error::ReqHandlerError)
- }
- _ => Err(Error::InvalidMessage),
- };
-
- self.send_ack_message(&hdr, &res)?;
-
- res
- }
-
- fn check_state(&self) -> Result<()> {
- match self.error {
- Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))),
- None => Ok(()),
- }
- }
-
- fn check_msg_size(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- size: usize,
- expected: usize,
- ) -> Result<()> {
- if hdr.get_size() as usize != expected
- || hdr.is_reply()
- || hdr.get_version() != 0x1
- || size != expected
- {
- return Err(Error::InvalidMessage);
- }
- Ok(())
- }
-
- fn check_attached_files(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- files: &Option<Vec<File>>,
- ) -> Result<()> {
- match hdr.get_code() {
- Ok(SlaveReq::FS_MAP | SlaveReq::FS_IO) => {
- // Expect a single file is passed.
- match files {
- Some(files) if files.len() == 1 => Ok(()),
- _ => Err(Error::InvalidMessage),
- }
- }
- _ if files.is_some() => Err(Error::InvalidMessage),
- _ => Ok(()),
- }
- }
-
- fn extract_msg_body<T: Sized + VhostUserMsgValidator>(
- &self,
- hdr: &VhostUserMsgHeader<SlaveReq>,
- size: usize,
- buf: &[u8],
- ) -> Result<T> {
- self.check_msg_size(hdr, size, mem::size_of::<T>())?;
- // SAFETY: Safe because we checked that `buf` size is equal to T size.
- let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) };
- if !msg.is_valid() {
- return Err(Error::InvalidMessage);
- }
- Ok(msg)
- }
-
- fn new_reply_header<T: Sized>(
- &self,
- req: &VhostUserMsgHeader<SlaveReq>,
- ) -> Result<VhostUserMsgHeader<SlaveReq>> {
- if mem::size_of::<T>() > MAX_MSG_SIZE {
- return Err(Error::InvalidParam);
- }
- self.check_state()?;
- Ok(VhostUserMsgHeader::new(
- req.get_code()?,
- VhostUserHeaderFlag::REPLY.bits(),
- mem::size_of::<T>() as u32,
- ))
- }
-
- fn send_ack_message(
- &mut self,
- req: &VhostUserMsgHeader<SlaveReq>,
- res: &Result<u64>,
- ) -> Result<()> {
- if self.reply_ack_negotiated && req.is_need_reply() {
- let hdr = self.new_reply_header::<VhostUserU64>(req)?;
- let def_err = libc::EINVAL;
- let val = match res {
- Ok(n) => *n,
- Err(e) => match e {
- Error::ReqHandlerError(ioerr) => match ioerr.raw_os_error() {
- Some(rawerr) => -rawerr as u64,
- None => -def_err as u64,
- },
- _ => -def_err as u64,
- },
- };
- let msg = VhostUserU64::new(val);
- self.sub_sock.send_message(&hdr, &msg, None)?;
- }
- Ok(())
- }
-}
-
-impl<S: VhostUserMasterReqHandler> AsRawFd for MasterReqHandler<S> {
- fn as_raw_fd(&self) -> RawFd {
- self.sub_sock.as_raw_fd()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[cfg(feature = "vhost-user-slave")]
- use crate::vhost_user::Slave;
- #[cfg(feature = "vhost-user-slave")]
- use std::os::unix::io::FromRawFd;
-
- struct MockMasterReqHandler {}
-
- impl VhostUserMasterReqHandlerMut for MockMasterReqHandler {
- /// Handle virtio-fs map file requests from the slave.
- fn fs_slave_map(
- &mut self,
- _fs: &VhostUserFSSlaveMsg,
- _fd: &dyn AsRawFd,
- ) -> HandlerResult<u64> {
- Ok(0)
- }
-
- /// Handle virtio-fs unmap file requests from the slave.
- fn fs_slave_unmap(&mut self, _fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- Err(std::io::Error::from_raw_os_error(libc::ENOSYS))
- }
- }
-
- #[test]
- fn test_new_master_req_handler() {
- let backend = Arc::new(Mutex::new(MockMasterReqHandler {}));
- let mut handler = MasterReqHandler::new(backend).unwrap();
-
- assert!(handler.get_tx_raw_fd() >= 0);
- assert!(handler.as_raw_fd() >= 0);
- handler.check_state().unwrap();
-
- assert_eq!(handler.error, None);
- handler.set_failed(libc::EAGAIN);
- assert_eq!(handler.error, Some(libc::EAGAIN));
- handler.check_state().unwrap_err();
- }
-
- #[cfg(feature = "vhost-user-slave")]
- #[test]
- fn test_master_slave_req_handler() {
- let backend = Arc::new(Mutex::new(MockMasterReqHandler {}));
- let mut handler = MasterReqHandler::new(backend).unwrap();
-
- // SAFETY: Safe because `handler` contains valid fds, and we are
- // checking if `dup` returns a valid fd.
- let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) };
- if fd < 0 {
- panic!("failed to duplicated tx fd!");
- }
- // SAFETY: Safe because we checked if fd is valid.
- let stream = unsafe { UnixStream::from_raw_fd(fd) };
- let slave = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &fd)
- .unwrap();
- // When REPLY_ACK has not been negotiated, the master has no way to detect failure from
- // slave side.
- slave
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap();
- }
-
- #[cfg(feature = "vhost-user-slave")]
- #[test]
- fn test_master_slave_req_handler_with_ack() {
- let backend = Arc::new(Mutex::new(MockMasterReqHandler {}));
- let mut handler = MasterReqHandler::new(backend).unwrap();
- handler.set_reply_ack_flag(true);
-
- // SAFETY: Safe because `handler` contains valid fds, and we are
- // checking if `dup` returns a valid fd.
- let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) };
- if fd < 0 {
- panic!("failed to duplicated tx fd!");
- }
- // SAFETY: Safe because we checked if fd is valid.
- let stream = unsafe { UnixStream::from_raw_fd(fd) };
- let slave = Slave::from_stream(stream);
-
- std::thread::spawn(move || {
- let res = handler.handle_request().unwrap();
- assert_eq!(res, 0);
- handler.handle_request().unwrap_err();
- });
-
- slave.set_reply_ack_flag(true);
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &fd)
- .unwrap();
- slave
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap_err();
- }
-}
diff --git a/crates/vhost/src/vhost_user/message.rs b/crates/vhost/src/vhost_user/message.rs
index bbd8eb9..c66bd44 100644
--- a/crates/vhost/src/vhost_user/message.rs
+++ b/crates/vhost/src/vhost_user/message.rs
@@ -15,14 +15,25 @@
use std::marker::PhantomData;
use std::ops::Deref;
+use uuid::Uuid;
+
use vm_memory::{mmap::NewBitmap, ByteValued, Error as MmapError, FileOffset, MmapRegion};
#[cfg(feature = "xen")]
use vm_memory::{GuestAddress, MmapRange, MmapXenFlags};
-use super::{Error, Result};
+use super::{enum_value, Error, Result};
use crate::VringConfigData;
+/*
+TODO: Consider deprecating this. We don't actually have any preallocated buffers except in tests,
+so we should be able to support u32::MAX normally.
+Also this doesn't need to be public api, since Endpoint is private anyway, this doesn't seem
+useful for consumers of this crate.
+
+There are GPU specific messages (GpuBackendReq::UPDATE and CURSOR_UPDATE) that are larger than 4K.
+We can use MsgHeader::MAX_MSG_SIZE, if we want to support larger messages only for GPU headers.
+*/
/// The vhost-user specification uses a field of u32 to store message length.
/// On the other hand, preallocated buffers are needed to receive messages from the Unix domain
/// socket. To preallocating a 4GB buffer for each vhost-user message is really just an overhead.
@@ -50,160 +61,146 @@
pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64;
pub(super) trait Req:
- Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Send + Sync + Into<u32>
+ Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Send + Sync + Into<u32> + TryFrom<u32>
{
- fn is_valid(value: u32) -> bool;
}
-/// Type of requests sending from masters to slaves.
-#[repr(u32)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub enum MasterReq {
- /// Null operation.
- NOOP = 0,
- /// Get from the underlying vhost implementation the features bit mask.
- GET_FEATURES = 1,
- /// Enable features in the underlying vhost implementation using a bit mask.
- SET_FEATURES = 2,
- /// Set the current Master as an owner of the session.
- SET_OWNER = 3,
- /// No longer used.
- RESET_OWNER = 4,
- /// Set the memory map regions on the slave so it can translate the vring addresses.
- SET_MEM_TABLE = 5,
- /// Set logging shared memory space.
- SET_LOG_BASE = 6,
- /// Set the logging file descriptor, which is passed as ancillary data.
- SET_LOG_FD = 7,
- /// Set the size of the queue.
- SET_VRING_NUM = 8,
- /// Set the addresses of the different aspects of the vring.
- SET_VRING_ADDR = 9,
- /// Set the base offset in the available vring.
- SET_VRING_BASE = 10,
- /// Get the available vring base offset.
- GET_VRING_BASE = 11,
- /// Set the event file descriptor for adding buffers to the vring.
- SET_VRING_KICK = 12,
- /// Set the event file descriptor to signal when buffers are used.
- SET_VRING_CALL = 13,
- /// Set the event file descriptor to signal when error occurs.
- SET_VRING_ERR = 14,
- /// Get the protocol feature bit mask from the underlying vhost implementation.
- GET_PROTOCOL_FEATURES = 15,
- /// Enable protocol features in the underlying vhost implementation.
- SET_PROTOCOL_FEATURES = 16,
- /// Query how many queues the backend supports.
- GET_QUEUE_NUM = 17,
- /// Signal slave to enable or disable corresponding vring.
- SET_VRING_ENABLE = 18,
- /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated
- /// for guest that does not support GUEST_ANNOUNCE.
- SEND_RARP = 19,
- /// Set host MTU value exposed to the guest.
- NET_SET_MTU = 20,
- /// Set the socket file descriptor for slave initiated requests.
- SET_SLAVE_REQ_FD = 21,
- /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
- IOTLB_MSG = 22,
- /// Set the endianness of a VQ for legacy devices.
- SET_VRING_ENDIAN = 23,
- /// Fetch the contents of the virtio device configuration space.
- GET_CONFIG = 24,
- /// Change the contents of the virtio device configuration space.
- SET_CONFIG = 25,
- /// Create a session for crypto operation.
- CREATE_CRYPTO_SESSION = 26,
- /// Close a session for crypto operation.
- CLOSE_CRYPTO_SESSION = 27,
- /// Advise slave that a migration with postcopy enabled is underway.
- POSTCOPY_ADVISE = 28,
- /// Advise slave that a transition to postcopy mode has happened.
- POSTCOPY_LISTEN = 29,
- /// Advise that postcopy migration has now completed.
- POSTCOPY_END = 30,
- /// Get a shared buffer from slave.
- GET_INFLIGHT_FD = 31,
- /// Send the shared inflight buffer back to slave.
- SET_INFLIGHT_FD = 32,
- /// Sets the GPU protocol socket file descriptor.
- GPU_SET_SOCKET = 33,
- /// Ask the vhost user backend to disable all rings and reset all internal
- /// device state to the initial state.
- RESET_DEVICE = 34,
- /// Indicate that a buffer was added to the vring instead of signalling it
- /// using the vring’s kick file descriptor.
- VRING_KICK = 35,
- /// Return a u64 payload containing the maximum number of memory slots.
- GET_MAX_MEM_SLOTS = 36,
- /// Update the memory tables by adding the region described.
- ADD_MEM_REG = 37,
- /// Update the memory tables by removing the region described.
- REM_MEM_REG = 38,
- /// Notify the backend with updated device status as defined in the VIRTIO
- /// specification.
- SET_STATUS = 39,
- /// Query the backend for its device status as defined in the VIRTIO
- /// specification.
- GET_STATUS = 40,
- /// Upper bound of valid commands.
- MAX_CMD = 41,
+pub(super) trait MsgHeader: ByteValued + Copy + Default + VhostUserMsgValidator {
+ type Request: Req;
+
+ /// The maximum size of a msg that can be encapsulated by this MsgHeader
+ const MAX_MSG_SIZE: usize;
}
-impl From<MasterReq> for u32 {
- fn from(req: MasterReq) -> u32 {
- req as u32
+enum_value! {
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ /// Type of requests sending from frontends to backends.
+ pub enum FrontendReq: u32 {
+ /// Get from the underlying vhost implementation the features bit mask.
+ GET_FEATURES = 1,
+ /// Enable features in the underlying vhost implementation using a bit mask.
+ SET_FEATURES = 2,
+ /// Set the current Frontend as an owner of the session.
+ SET_OWNER = 3,
+ /// No longer used.
+ RESET_OWNER = 4,
+ /// Set the memory map regions on the backend so it can translate the vring addresses.
+ SET_MEM_TABLE = 5,
+ /// Set logging shared memory space.
+ SET_LOG_BASE = 6,
+ /// Set the logging file descriptor, which is passed as ancillary data.
+ SET_LOG_FD = 7,
+ /// Set the size of the queue.
+ SET_VRING_NUM = 8,
+ /// Set the addresses of the different aspects of the vring.
+ SET_VRING_ADDR = 9,
+ /// Set the base offset in the available vring.
+ SET_VRING_BASE = 10,
+ /// Get the available vring base offset.
+ GET_VRING_BASE = 11,
+ /// Set the event file descriptor for adding buffers to the vring.
+ SET_VRING_KICK = 12,
+ /// Set the event file descriptor to signal when buffers are used.
+ SET_VRING_CALL = 13,
+ /// Set the event file descriptor to signal when error occurs.
+ SET_VRING_ERR = 14,
+ /// Get the protocol feature bit mask from the underlying vhost implementation.
+ GET_PROTOCOL_FEATURES = 15,
+ /// Enable protocol features in the underlying vhost implementation.
+ SET_PROTOCOL_FEATURES = 16,
+ /// Query how many queues the backend supports.
+ GET_QUEUE_NUM = 17,
+ /// Signal backend to enable or disable corresponding vring.
+ SET_VRING_ENABLE = 18,
+ /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated
+ /// for guest that does not support GUEST_ANNOUNCE.
+ SEND_RARP = 19,
+ /// Set host MTU value exposed to the guest.
+ NET_SET_MTU = 20,
+ /// Set the socket file descriptor for backend initiated requests.
+ SET_BACKEND_REQ_FD = 21,
+ /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
+ IOTLB_MSG = 22,
+ /// Set the endianness of a VQ for legacy devices.
+ SET_VRING_ENDIAN = 23,
+ /// Fetch the contents of the virtio device configuration space.
+ GET_CONFIG = 24,
+ /// Change the contents of the virtio device configuration space.
+ SET_CONFIG = 25,
+ /// Create a session for crypto operation.
+ CREATE_CRYPTO_SESSION = 26,
+ /// Close a session for crypto operation.
+ CLOSE_CRYPTO_SESSION = 27,
+ /// Advise backend that a migration with postcopy enabled is underway.
+ POSTCOPY_ADVISE = 28,
+ /// Advise backend that a transition to postcopy mode has happened.
+ POSTCOPY_LISTEN = 29,
+ /// Advise that postcopy migration has now completed.
+ POSTCOPY_END = 30,
+ /// Get a shared buffer from backend.
+ GET_INFLIGHT_FD = 31,
+ /// Send the shared inflight buffer back to backend.
+ SET_INFLIGHT_FD = 32,
+ /// Sets the GPU protocol socket file descriptor.
+ GPU_SET_SOCKET = 33,
+ /// Ask the vhost user backend to disable all rings and reset all internal
+ /// device state to the initial state.
+ RESET_DEVICE = 34,
+ /// Indicate that a buffer was added to the vring instead of signalling it
+ /// using the vring’s kick file descriptor.
+ VRING_KICK = 35,
+ /// Return a u64 payload containing the maximum number of memory slots.
+ GET_MAX_MEM_SLOTS = 36,
+ /// Update the memory tables by adding the region described.
+ ADD_MEM_REG = 37,
+ /// Update the memory tables by removing the region described.
+ REM_MEM_REG = 38,
+ /// Notify the backend with updated device status as defined in the VIRTIO
+ /// specification.
+ SET_STATUS = 39,
+ /// Query the backend for its device status as defined in the VIRTIO
+ /// specification.
+ GET_STATUS = 40,
+ /// Retrieve a shared object from the device.
+ GET_SHARED_OBJECT = 41,
+ /// Begin transfer of internal state to/from the backend for migration
+ /// purposes.
+ SET_DEVICE_STATE_FD = 42,
+ /// After transferring state, check the backend for any errors that may have
+ /// occurred during the transfer
+ CHECK_DEVICE_STATE = 43,
}
}
-impl Req for MasterReq {
- fn is_valid(value: u32) -> bool {
- (value > MasterReq::NOOP as u32) && (value < MasterReq::MAX_CMD as u32)
+impl Req for FrontendReq {}
+
+enum_value! {
+ /// Type of requests sending from backends to frontends.
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum BackendReq: u32 {
+ /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
+ IOTLB_MSG = 1,
+ /// Notify that the virtio device's configuration space has changed.
+ CONFIG_CHANGE_MSG = 2,
+ /// Set host notifier for a specified queue.
+ VRING_HOST_NOTIFIER_MSG = 3,
+ /// Indicate that a buffer was used from the vring.
+ VRING_CALL = 4,
+ /// Indicate that an error occurred on the specific vring.
+ VRING_ERR = 5,
+ /// Add a virtio shared object.
+ SHARED_OBJECT_ADD = 6,
+ /// Remove a virtio shared object.
+ SHARED_OBJECT_REMOVE = 7,
+ /// Lookup for a virtio shared object.
+ SHARED_OBJECT_LOOKUP = 8,
}
}
-/// Type of requests sending from slaves to masters.
-#[repr(u32)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub enum SlaveReq {
- /// Null operation.
- NOOP = 0,
- /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
- IOTLB_MSG = 1,
- /// Notify that the virtio device's configuration space has changed.
- CONFIG_CHANGE_MSG = 2,
- /// Set host notifier for a specified queue.
- VRING_HOST_NOTIFIER_MSG = 3,
- /// Indicate that a buffer was used from the vring.
- VRING_CALL = 4,
- /// Indicate that an error occurred on the specific vring.
- VRING_ERR = 5,
- /// Virtio-fs draft: map file content into the window.
- FS_MAP = 6,
- /// Virtio-fs draft: unmap file content from the window.
- FS_UNMAP = 7,
- /// Virtio-fs draft: sync file content.
- FS_SYNC = 8,
- /// Virtio-fs draft: perform a read/write from an fd directly to GPA.
- FS_IO = 9,
- /// Upper bound of valid commands.
- MAX_CMD = 10,
-}
-
-impl From<SlaveReq> for u32 {
- fn from(req: SlaveReq) -> u32 {
- req as u32
- }
-}
-
-impl Req for SlaveReq {
- fn is_valid(value: u32) -> bool {
- (value > SlaveReq::NOOP as u32) && (value < SlaveReq::MAX_CMD as u32)
- }
-}
+impl Req for BackendReq {}
/// Vhost message Validator.
-pub trait VhostUserMsgValidator {
+pub trait VhostUserMsgValidator: ByteValued {
/// Validate message syntax only.
/// It doesn't validate message semantics such as protocol version number and dependency
/// on feature flags etc.
@@ -232,7 +229,7 @@
/// Common message header for vhost-user requests and replies.
/// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the
/// machine native byte order.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Copy)]
pub(super) struct VhostUserMsgHeader<R: Req> {
request: u32,
@@ -241,6 +238,11 @@
_r: PhantomData<R>,
}
+impl<R: Req> MsgHeader for VhostUserMsgHeader<R> {
+ type Request = R;
+ const MAX_MSG_SIZE: usize = MAX_MSG_SIZE;
+}
+
impl<R: Req> Debug for VhostUserMsgHeader<R> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("VhostUserMsgHeader")
@@ -278,12 +280,7 @@
/// Get message type.
pub fn get_code(&self) -> Result<R> {
- if R::is_valid(self.request) {
- // SAFETY: It's safe because R is marked as repr(u32), and the value is valid.
- Ok(unsafe { std::mem::transmute_copy::<u32, R>(&{ self.request }) })
- } else {
- Err(Error::InvalidMessage)
- }
+ R::try_from(self.request).map_err(|_| Error::InvalidMessage)
}
/// Set message type.
@@ -382,8 +379,11 @@
// Bit mask for transport specific flags in VirtIO feature set defined by vhost-user.
bitflags! {
+ #[derive(Copy, Clone, Debug, Eq, PartialEq)]
/// Transport specific flags in VirtIO feature set defined by vhost-user.
pub struct VhostUserVirtioFeatures: u64 {
+ /// Log dirtied shared memory pages.
+ const LOG_ALL = 0x400_0000;
/// Feature flag for the protocol feature.
const PROTOCOL_FEATURES = 0x4000_0000;
}
@@ -391,6 +391,7 @@
// Bit mask for vhost-user protocol feature flags.
bitflags! {
+ #[derive(Copy, Clone, Debug, Eq, PartialEq)]
/// Vhost-user protocol feature flags.
pub struct VhostUserProtocolFeatures: u64 {
/// Support multiple queues.
@@ -403,19 +404,19 @@
const REPLY_ACK = 0x0000_0008;
/// Support setting MTU for virtio-net devices.
const MTU = 0x0000_0010;
- /// Allow the slave to send requests to the master by an optional communication channel.
- const SLAVE_REQ = 0x0000_0020;
- /// Support setting slave endian by SET_VRING_ENDIAN.
+ /// Allow the backend to send requests to the frontend by an optional communication channel.
+ const BACKEND_REQ = 0x0000_0020;
+ /// Support setting backend endian by SET_VRING_ENDIAN.
const CROSS_ENDIAN = 0x0000_0040;
/// Support crypto operations.
const CRYPTO_SESSION = 0x0000_0080;
- /// Support sending userfault_fd from slaves to masters.
+ /// Support sending userfault_fd from backends to frontends.
const PAGEFAULT = 0x0000_0100;
/// Support Virtio device configuration.
const CONFIG = 0x0000_0200;
- /// Allow the slave to send fds (at most 8 descriptors in each message) to the master.
- const SLAVE_SEND_FD = 0x0000_0400;
- /// Allow the slave to register a host notifier.
+ /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend.
+ const BACKEND_SEND_FD = 0x0000_0400;
+ /// Allow the backend to register a host notifier.
const HOST_NOTIFIER = 0x0000_0800;
/// Support inflight shmfd.
const INFLIGHT_SHMFD = 0x0000_1000;
@@ -429,11 +430,24 @@
const STATUS = 0x0001_0000;
/// Support Xen mmap.
const XEN_MMAP = 0x0002_0000;
+ /// Support shared objects.
+ const SHARED_OBJECT = 0x0004_0000;
+ /// Support transferring internal device state.
+ const DEVICE_STATE = 0x0008_0000;
}
}
+/// An empty message.
+#[derive(Copy, Clone, Default)]
+pub struct VhostUserEmpty;
+
+// SAFETY: Safe because type is zero size.
+unsafe impl ByteValued for VhostUserEmpty {}
+
+impl VhostUserMsgValidator for VhostUserEmpty {}
+
/// A generic message to encapsulate a 64-bit value.
-#[repr(packed)]
+#[repr(transparent)]
#[derive(Copy, Clone, Default)]
pub struct VhostUserU64 {
/// The encapsulated 64-bit common value.
@@ -453,7 +467,7 @@
impl VhostUserMsgValidator for VhostUserU64 {}
/// Memory region descriptor for the SET_MEM_TABLE request.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Copy, Clone, Default)]
pub struct VhostUserMemory {
/// Number of memory regions in the payload.
@@ -488,7 +502,7 @@
}
/// Memory region descriptors as payload for the SET_MEM_TABLE request.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Default, Clone, Copy)]
pub struct VhostUserMemoryRegion {
/// Guest physical address of the memory region.
@@ -594,6 +608,9 @@
}
}
+// SAFETY: Safe because all fields of VhostUserMemoryRegion are POD.
+unsafe impl ByteValued for VhostUserMemoryRegion {}
+
impl VhostUserMsgValidator for VhostUserMemoryRegion {
fn is_valid(&self) -> bool {
self.is_valid()
@@ -668,7 +685,7 @@
impl VhostUserMsgValidator for VhostUserSingleMemoryRegion {}
/// Vring state descriptor.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Copy, Clone, Default)]
pub struct VhostUserVringState {
/// Vring index.
@@ -700,7 +717,7 @@
}
/// Vring address descriptor.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Copy, Clone, Default)]
pub struct VhostUserVringAddr {
/// Vring index.
@@ -738,7 +755,7 @@
}
/// Create a new instance from `VringConfigData`.
- #[cfg_attr(feature = "cargo-clippy", allow(clippy::useless_conversion))]
+ #[allow(clippy::useless_conversion)]
pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self {
let log_addr = config_data.log_addr.unwrap_or(0);
VhostUserVringAddr {
@@ -773,17 +790,18 @@
// Bit mask for the vhost-user device configuration message.
bitflags! {
+ #[derive(Copy, Clone, Debug, Eq, PartialEq)]
/// Flags for the device configuration message.
pub struct VhostUserConfigFlags: u32 {
- /// Vhost master messages used for writeable fields.
+ /// Vhost frontend messages used for writeable fields.
const WRITABLE = 0x1;
- /// Vhost master messages used for live migration.
+ /// Vhost frontend messages used for live migration.
const LIVE_MIGRATION = 0x2;
}
}
/// Message to read/write device configuration space.
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Copy, Clone, Default)]
pub struct VhostUserConfig {
/// Offset of virtio device's configuration space.
@@ -898,82 +916,82 @@
}
}
-/*
- * TODO: support dirty log, live migration and IOTLB operations.
-#[repr(packed)]
-pub struct VhostUserVringArea {
- pub index: u32,
- pub flags: u32,
- pub size: u64,
- pub offset: u64,
-}
-
-#[repr(packed)]
-pub struct VhostUserLog {
- pub size: u64,
- pub offset: u64,
-}
-
-#[repr(packed)]
-pub struct VhostUserIotlb {
- pub iova: u64,
- pub size: u64,
- pub user_addr: u64,
- pub permission: u8,
- pub optype: u8,
-}
-*/
-
-// Bit mask for flags in virtio-fs slave messages
-bitflags! {
- #[derive(Default)]
- /// Flags for virtio-fs slave messages.
- pub struct VhostUserFSSlaveMsgFlags: u64 {
- /// Empty permission.
- const EMPTY = 0x0;
- /// Read permission.
- const MAP_R = 0x1;
- /// Write permission.
- const MAP_W = 0x2;
+enum_value! {
+ /// Direction of state transfer for migration
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum VhostTransferStateDirection: u32 {
+ /// Outgoing migration: Transfer state from back-end to front-end
+ SAVE = 0,
+ /// Incoming migration: Transfer state from front-end to back-end
+ LOAD = 1,
}
}
-/// Max entries in one virtio-fs slave request.
-pub const VHOST_USER_FS_SLAVE_ENTRIES: usize = 8;
-
-/// Slave request message to update the MMIO window.
-#[repr(packed)]
-#[derive(Copy, Clone, Default)]
-pub struct VhostUserFSSlaveMsg {
- /// File offset.
- pub fd_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Offset into the DAX window.
- pub cache_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Size of region to map.
- pub len: [u64; VHOST_USER_FS_SLAVE_ENTRIES],
- /// Flags for the mmap operation
- pub flags: [VhostUserFSSlaveMsgFlags; VHOST_USER_FS_SLAVE_ENTRIES],
+enum_value! {
+ /// Migration phases during which state transfer can occur
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum VhostTransferStatePhase: u32 {
+ /// The device (and all its vrings) are stopped
+ STOPPED = 0,
+ }
}
-// SAFETY: Safe because all fields of VhostUserFSSlaveMsg are POD.
-unsafe impl ByteValued for VhostUserFSSlaveMsg {}
+/// Contains UUID to interact with associated virtio shared object.
+#[repr(C)]
+#[derive(Clone, Copy, Default)]
+pub struct VhostUserSharedMsg {
+ /// UUID of the shared object
+ pub uuid: Uuid,
+}
-impl VhostUserMsgValidator for VhostUserFSSlaveMsg {
+// SAFETY: Safe because VhostUserSharedMsg is a
+// fixed-size array internally and there is no
+// compiler-inserted padding.
+unsafe impl ByteValued for VhostUserSharedMsg {}
+
+impl VhostUserMsgValidator for VhostUserSharedMsg {
fn is_valid(&self) -> bool {
- for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
- if ({ self.flags[i] }.bits() & !VhostUserFSSlaveMsgFlags::all().bits()) != 0
- || self.fd_offset[i].checked_add(self.len[i]).is_none()
- || self.cache_offset[i].checked_add(self.len[i]).is_none()
- {
- return false;
- }
+ !(self.uuid.is_nil() || self.uuid.is_max())
+ }
+}
+
+/// Query/send virtio-fs migration state
+// Note: this struct is not defined as `packed` in the SPEC and although
+// it is not necessary, since the struct has no padding, it simplifies
+// reviewing it because it is a requirement for implementing `ByteValued`.
+#[repr(C, packed)]
+#[derive(Clone, Copy, Default)]
+pub struct VhostUserTransferDeviceState {
+ /// Direction of state transfer (save/load)
+ pub direction: u32,
+ /// Migration phase during which the transfer takes place
+ pub phase: u32,
+}
+
+// SAFETY: Safe because VhostUserTransferDeviceState is a POD
+// (i.e., none of its fields are references or raw pointers),
+// and there is no compiler-inserted padding.
+unsafe impl ByteValued for VhostUserTransferDeviceState {}
+
+impl VhostUserTransferDeviceState {
+ /// Create a new instance.
+ pub fn new(direction: VhostTransferStateDirection, phase: VhostTransferStatePhase) -> Self {
+ VhostUserTransferDeviceState {
+ direction: direction as u32,
+ phase: phase as u32,
}
- true
+ }
+}
+
+impl VhostUserMsgValidator for VhostUserTransferDeviceState {
+ fn is_valid(&self) -> bool {
+ VhostTransferStateDirection::try_from(self.direction).is_ok()
+ && VhostTransferStatePhase::try_from(self.phase).is_ok()
}
}
/// Inflight I/O descriptor state for split virtqueues
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Clone, Copy, Default)]
pub struct DescStateSplit {
/// Indicate whether this descriptor (only head) is inflight or not.
@@ -994,7 +1012,7 @@
}
/// Inflight I/O queue region for split virtqueues
-#[repr(packed)]
+#[repr(C, packed)]
pub struct QueueRegionSplit {
/// Features flags of this region
pub features: u64,
@@ -1025,7 +1043,7 @@
}
/// Inflight I/O descriptor state for packed virtqueues
-#[repr(packed)]
+#[repr(C, packed)]
#[derive(Clone, Copy, Default)]
pub struct DescStatePacked {
/// Indicate whether this descriptor (only head) is inflight or not.
@@ -1058,7 +1076,7 @@
}
/// Inflight I/O queue region for packed virtqueues
-#[repr(packed)]
+#[repr(C, packed)]
pub struct QueueRegionPacked {
/// Features flags of this region
pub features: u64,
@@ -1123,33 +1141,49 @@
}
#[test]
- fn check_master_request_code() {
- assert!(!MasterReq::is_valid(MasterReq::NOOP as _));
- assert!(!MasterReq::is_valid(MasterReq::MAX_CMD as _));
- assert!(MasterReq::MAX_CMD > MasterReq::NOOP);
- let code = MasterReq::GET_FEATURES;
- assert!(MasterReq::is_valid(code as _));
- assert_eq!(code, code.clone());
- assert!(!MasterReq::is_valid(10000));
+ fn check_transfer_state_direction_code() {
+ let load_code: u32 = VhostTransferStateDirection::LOAD.into();
+ assert!(VhostTransferStateDirection::try_from(load_code).is_ok());
+ assert_eq!(load_code, load_code.clone());
+
+ let save_code: u32 = VhostTransferStateDirection::SAVE.into();
+ assert!(VhostTransferStateDirection::try_from(save_code).is_ok());
+ assert_eq!(save_code, save_code.clone());
+
+ assert!(VhostTransferStateDirection::try_from(3).is_err());
}
#[test]
- fn check_slave_request_code() {
- assert!(!SlaveReq::is_valid(SlaveReq::NOOP as _));
- assert!(!SlaveReq::is_valid(SlaveReq::MAX_CMD as _));
- assert!(SlaveReq::MAX_CMD > SlaveReq::NOOP);
- let code = SlaveReq::CONFIG_CHANGE_MSG;
- assert!(SlaveReq::is_valid(code as _));
+ fn check_transfer_state_phase_code() {
+ let code: u32 = VhostTransferStatePhase::STOPPED.into();
+ assert!(VhostTransferStatePhase::try_from(code).is_ok());
assert_eq!(code, code.clone());
- assert!(!SlaveReq::is_valid(10000));
+
+ assert!(VhostTransferStatePhase::try_from(1).is_err());
+ }
+
+ #[test]
+ fn check_frontend_request_code() {
+ let code: u32 = FrontendReq::GET_FEATURES.into();
+ assert!(FrontendReq::try_from(code).is_ok());
+ assert_eq!(code, code.clone());
+ assert!(FrontendReq::try_from(10000).is_err());
+ }
+
+ #[test]
+ fn check_backend_request_code() {
+ let code: u32 = BackendReq::CONFIG_CHANGE_MSG.into();
+ assert!(BackendReq::try_from(code).is_ok());
+ assert_eq!(code, code.clone());
+ assert!(BackendReq::try_from(10000).is_err());
}
#[test]
fn msg_header_ops() {
- let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, 0x100);
- assert_eq!(hdr.get_code().unwrap(), MasterReq::GET_FEATURES);
- hdr.set_code(MasterReq::SET_FEATURES);
- assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_FEATURES);
+ let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100);
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::GET_FEATURES);
+ hdr.set_code(FrontendReq::SET_FEATURES);
+ assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_FEATURES);
assert_eq!(hdr.get_version(), 0x1);
@@ -1178,7 +1212,7 @@
hdr.set_size(0x100);
assert_eq!(hdr.get_size(), 0x100);
assert!(hdr.is_valid());
- hdr.set_size((MAX_MSG_SIZE - mem::size_of::<VhostUserMsgHeader<MasterReq>>()) as u32);
+ hdr.set_size((MAX_MSG_SIZE - mem::size_of::<VhostUserMsgHeader<FrontendReq>>()) as u32);
assert!(hdr.is_valid());
hdr.set_size(0x0);
assert!(hdr.is_valid());
@@ -1313,7 +1347,7 @@
let config = VringConfigData {
queue_max_size: 256,
queue_size: 128,
- flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits,
+ flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(),
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
@@ -1383,21 +1417,4 @@
msg.flags |= 0x4;
assert!(!msg.is_valid());
}
-
- #[test]
- fn test_vhost_user_fs_slave() {
- let mut fs_slave = VhostUserFSSlaveMsg::default();
-
- assert!(fs_slave.is_valid());
-
- fs_slave.fd_offset[0] = 0xffff_ffff_ffff_ffff;
- fs_slave.len[0] = 0x1;
- assert!(!fs_slave.is_valid());
-
- assert_ne!(
- VhostUserFSSlaveMsgFlags::MAP_R,
- VhostUserFSSlaveMsgFlags::MAP_W
- );
- assert_eq!(VhostUserFSSlaveMsgFlags::EMPTY.bits(), 0);
- }
}
diff --git a/crates/vhost/src/vhost_user/mod.rs b/crates/vhost/src/vhost_user/mod.rs
index 7df51f6..fd76932 100644
--- a/crates/vhost/src/vhost_user/mod.rs
+++ b/crates/vhost/src/vhost_user/mod.rs
@@ -2,18 +2,18 @@
// SPDX-License-Identifier: Apache-2.0
//! The protocol for vhost-user is based on the existing implementation of vhost for the Linux
-//! Kernel. The protocol defines two sides of the communication, master and slave. Master is
-//! the application that shares its virtqueues. Slave is the consumer of the virtqueues.
+//! Kernel. The protocol defines two sides of the communication, frontend and backend. Frontend is
+//! the application that shares its virtqueues. Backend is the consumer of the virtqueues.
//!
-//! The communication channel between the master and the slave includes two sub channels. One is
-//! used to send requests from the master to the slave and optional replies from the slave to the
-//! master. This sub channel is created on master startup by connecting to the slave service
-//! endpoint. The other is used to send requests from the slave to the master and optional replies
-//! from the master to the slave. This sub channel is created by the master issuing a
-//! VHOST_USER_SET_SLAVE_REQ_FD request to the slave with an auxiliary file descriptor.
+//! The communication channel between the frontend and the backend includes two sub channels. One is
+//! used to send requests from the frontend to the backend and optional replies from the backend to the
+//! frontend. This sub channel is created on frontend startup by connecting to the backend service
+//! endpoint. The other is used to send requests from the backend to the frontend and optional replies
+//! from the frontend to the backend. This sub channel is created by the frontend issuing a
+//! VHOST_USER_SET_BACKEND_REQ_FD request to the backend with an auxiliary file descriptor.
//!
-//! Unix domain socket is used as the underlying communication channel because the master needs to
-//! send file descriptors to the slave.
+//! Unix domain socket is used as the underlying communication channel because the frontend needs to
+//! send file descriptors to the backend.
//!
//! Most messages that can be sent via the Unix domain socket implementing vhost-user have an
//! equivalent ioctl to the kernel implementation.
@@ -27,31 +27,34 @@
mod connection;
pub use self::connection::Listener;
-#[cfg(feature = "vhost-user-master")]
-mod master;
-#[cfg(feature = "vhost-user-master")]
-pub use self::master::{Master, VhostUserMaster};
+#[cfg(feature = "vhost-user-frontend")]
+mod frontend;
+#[cfg(feature = "vhost-user-frontend")]
+pub use self::frontend::{Frontend, VhostUserFrontend};
#[cfg(feature = "vhost-user")]
-mod master_req_handler;
+mod frontend_req_handler;
#[cfg(feature = "vhost-user")]
-pub use self::master_req_handler::{
- MasterReqHandler, VhostUserMasterReqHandler, VhostUserMasterReqHandlerMut,
+pub use self::frontend_req_handler::{
+ FrontendReqHandler, VhostUserFrontendReqHandler, VhostUserFrontendReqHandlerMut,
};
-#[cfg(feature = "vhost-user-slave")]
-mod slave;
-#[cfg(feature = "vhost-user-slave")]
-pub use self::slave::SlaveListener;
-#[cfg(feature = "vhost-user-slave")]
-mod slave_req_handler;
-#[cfg(feature = "vhost-user-slave")]
-pub use self::slave_req_handler::{
- SlaveReqHandler, VhostUserSlaveReqHandler, VhostUserSlaveReqHandlerMut,
+#[cfg(feature = "vhost-user-backend")]
+mod backend;
+#[cfg(feature = "vhost-user-backend")]
+pub use self::backend::BackendListener;
+#[cfg(feature = "vhost-user-backend")]
+mod backend_req_handler;
+#[cfg(feature = "vhost-user-backend")]
+pub use self::backend_req_handler::{
+ BackendReqHandler, VhostUserBackendReqHandler, VhostUserBackendReqHandlerMut,
};
-#[cfg(feature = "vhost-user-slave")]
-mod slave_req;
-#[cfg(feature = "vhost-user-slave")]
-pub use self::slave_req::Slave;
+#[cfg(feature = "vhost-user-backend")]
+mod backend_req;
+#[cfg(feature = "vhost-user-backend")]
+pub use self::backend_req::Backend;
+mod gpu_backend_req;
+pub mod gpu_message;
+pub use self::gpu_backend_req::GpuBackend;
/// Errors for vhost-user operations
#[derive(Debug)]
@@ -82,10 +85,10 @@
SocketBroken(std::io::Error),
/// Should retry the socket operation again.
SocketRetry(std::io::Error),
- /// Failure from the slave side.
- SlaveInternalError,
- /// Failure from the master side.
- MasterInternalError,
+ /// Failure from the backend side.
+ BackendInternalError,
+ /// Failure from the frontend side.
+ FrontendInternalError,
/// Virtio/protocol features mismatch.
FeatureMismatch,
/// Error from request handler
@@ -116,8 +119,8 @@
Error::SocketConnect(e) => write!(f, "can't connect to peer: {}", e),
Error::SocketBroken(e) => write!(f, "socket is broken: {}", e),
Error::SocketRetry(e) => write!(f, "temporary socket error: {}", e),
- Error::SlaveInternalError => write!(f, "slave internal error"),
- Error::MasterInternalError => write!(f, "Master internal error"),
+ Error::BackendInternalError => write!(f, "backend internal error"),
+ Error::FrontendInternalError => write!(f, "Frontend internal error"),
Error::FeatureMismatch => write!(f, "virtio/protocol features mismatch"),
Error::ReqHandlerError(e) => write!(f, "handler failed to handle request: {}", e),
Error::MemFdCreateError => {
@@ -144,10 +147,10 @@
Error::PartialMessage => true,
// Should reconnect because the underline socket is broken.
Error::SocketBroken(_) => true,
- // Slave internal error, hope it recovers on reconnect.
- Error::SlaveInternalError => true,
- // Master internal error, hope it recovers on reconnect.
- Error::MasterInternalError => true,
+ // Backend internal error, hope it recovers on reconnect.
+ Error::BackendInternalError => true,
+ // Frontend internal error, hope it recovers on reconnect.
+ Error::FrontendInternalError => true,
// Should just retry the IO operation instead of rebuilding the underline connection.
Error::SocketRetry(_) => false,
// Looks like the peer deliberately disconnected the socket.
@@ -218,10 +221,48 @@
Some(files.swap_remove(0))
}
-#[cfg(all(test, feature = "vhost-user-slave"))]
-mod dummy_slave;
+// Utility to generate `TryFrom` and `From` implementation for enums
+macro_rules! enum_value {
+ (
+ $(#[$meta:meta])*
+ $vis:vis enum $enum:ident: $T:tt {
+ $(
+ $(#[$variant_meta:meta])*
+ $variant:ident $(= $val:expr)?,
+ )*
+ }
+ ) => {
+ #[repr($T)]
+ $(#[$meta])*
+ $vis enum $enum {
+ $($(#[$variant_meta])* $variant $(= $val)?,)*
+ }
-#[cfg(all(test, feature = "vhost-user-master", feature = "vhost-user-slave"))]
+ impl std::convert::TryFrom<$T> for $enum {
+ type Error = ();
+
+ fn try_from(v: $T) -> std::result::Result<Self, Self::Error> {
+ match v {
+ $(v if v == $enum::$variant as $T => Ok($enum::$variant),)*
+ _ => Err(()),
+ }
+ }
+ }
+
+ impl std::convert::From<$enum> for $T {
+ fn from(v: $enum) -> $T {
+ v as $T
+ }
+ }
+ }
+}
+
+use enum_value;
+
+#[cfg(all(test, feature = "vhost-user-backend"))]
+mod dummy_backend;
+
+#[cfg(all(test, feature = "vhost-user-frontend", feature = "vhost-user-backend"))]
mod tests {
use std::fs::File;
use std::os::unix::io::AsRawFd;
@@ -231,7 +272,7 @@
use vmm_sys_util::rand::rand_alphanumerics;
use vmm_sys_util::tempfile::TempFile;
- use super::dummy_slave::{DummySlaveReqHandler, VIRTIO_FEATURES};
+ use super::dummy_backend::{DummyBackendReqHandler, VIRTIO_FEATURES};
use super::message::*;
use super::*;
use crate::backend::VhostBackend;
@@ -244,38 +285,38 @@
))
}
- fn create_slave<P, S>(path: P, backend: Arc<S>) -> (Master, SlaveReqHandler<S>)
+ fn create_backend<P, S>(path: P, backend: Arc<S>) -> (Frontend, BackendReqHandler<S>)
where
P: AsRef<Path>,
- S: VhostUserSlaveReqHandler,
+ S: VhostUserBackendReqHandler,
{
let listener = Listener::new(&path, true).unwrap();
- let mut slave_listener = SlaveListener::new(listener, backend).unwrap();
- let master = Master::connect(&path, 1).unwrap();
- (master, slave_listener.accept().unwrap().unwrap())
+ let mut backend_listener = BackendListener::new(listener, backend).unwrap();
+ let frontend = Frontend::connect(&path, 1).unwrap();
+ (frontend, backend_listener.accept().unwrap().unwrap())
}
#[test]
- fn create_dummy_slave() {
- let slave = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
+ fn create_dummy_backend() {
+ let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
- slave.set_owner().unwrap();
- assert!(slave.set_owner().is_err());
+ backend.set_owner().unwrap();
+ assert!(backend.set_owner().is_err());
}
#[test]
fn test_set_owner() {
- let slave_be = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
+ let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
let path = temp_path();
- let (master, mut slave) = create_slave(path, slave_be.clone());
+ let (frontend, mut backend) = create_backend(path, backend_be.clone());
- assert!(!slave_be.lock().unwrap().owned);
- master.set_owner().unwrap();
- slave.handle_request().unwrap();
- assert!(slave_be.lock().unwrap().owned);
- master.set_owner().unwrap();
- assert!(slave.handle_request().is_err());
- assert!(slave_be.lock().unwrap().owned);
+ assert!(!backend_be.lock().unwrap().owned);
+ frontend.set_owner().unwrap();
+ backend.handle_request().unwrap();
+ assert!(backend_be.lock().unwrap().owned);
+ frontend.set_owner().unwrap();
+ assert!(backend.handle_request().is_err());
+ assert!(backend_be.lock().unwrap().owned);
}
#[test]
@@ -283,68 +324,68 @@
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let path = temp_path();
- let slave_be = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let (mut master, mut slave) = create_slave(path, slave_be.clone());
+ let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
+ let (mut frontend, mut backend) = create_backend(path, backend_be.clone());
thread::spawn(move || {
- slave.handle_request().unwrap();
- assert!(slave_be.lock().unwrap().owned);
+ backend.handle_request().unwrap();
+ assert!(backend_be.lock().unwrap().owned);
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
assert_eq!(
- slave_be.lock().unwrap().acked_features,
+ backend_be.lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
assert_eq!(
- slave_be.lock().unwrap().acked_protocol_features,
+ backend_be.lock().unwrap().acked_protocol_features,
VhostUserProtocolFeatures::all().bits()
);
sbar.wait();
});
- master.set_owner().unwrap();
+ frontend.set_owner().unwrap();
// set virtio features
- let features = master.get_features().unwrap();
+ let features = frontend.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
- master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
+ frontend.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
- let features = master.get_protocol_features().unwrap();
+ let features = frontend.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
- master.set_protocol_features(features).unwrap();
+ frontend.set_protocol_features(features).unwrap();
mbar.wait();
}
#[test]
- fn test_master_slave_process() {
+ fn test_frontend_backend_process() {
let mbar = Arc::new(Barrier::new(2));
let sbar = mbar.clone();
let path = temp_path();
- let slave_be = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let (mut master, mut slave) = create_slave(path, slave_be.clone());
+ let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new()));
+ let (mut frontend, mut backend) = create_backend(path, backend_be.clone());
thread::spawn(move || {
// set_own()
- slave.handle_request().unwrap();
- assert!(slave_be.lock().unwrap().owned);
+ backend.handle_request().unwrap();
+ assert!(backend_be.lock().unwrap().owned);
// get/set_features()
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
assert_eq!(
- slave_be.lock().unwrap().acked_features,
+ backend_be.lock().unwrap().acked_features,
VIRTIO_FEATURES & !0x1
);
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
let mut features = VhostUserProtocolFeatures::all();
@@ -354,64 +395,64 @@
}
assert_eq!(
- slave_be.lock().unwrap().acked_protocol_features,
+ backend_be.lock().unwrap().acked_protocol_features,
features.bits()
);
// get_inflight_fd()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// set_inflight_fd()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// get_queue_num()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// set_mem_table()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// get/set_config()
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
- // set_slave_request_fd
- slave.handle_request().unwrap();
+ // set_backend_request_fd
+ backend.handle_request().unwrap();
// set_vring_enable
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// set_log_base,set_log_fd()
- slave.handle_request().unwrap_err();
- slave.handle_request().unwrap_err();
+ backend.handle_request().unwrap_err();
+ backend.handle_request().unwrap_err();
// set_vring_xxx
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
+ backend.handle_request().unwrap();
// get_max_mem_slots()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// add_mem_region()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
// remove_mem_region()
- slave.handle_request().unwrap();
+ backend.handle_request().unwrap();
sbar.wait();
});
- master.set_owner().unwrap();
+ frontend.set_owner().unwrap();
// set virtio features
- let features = master.get_features().unwrap();
+ let features = frontend.get_features().unwrap();
assert_eq!(features, VIRTIO_FEATURES);
- master.set_features(VIRTIO_FEATURES & !0x1).unwrap();
+ frontend.set_features(VIRTIO_FEATURES & !0x1).unwrap();
// set vhost protocol features
- let mut features = master.get_protocol_features().unwrap();
+ let mut features = frontend.get_protocol_features().unwrap();
assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits());
// Disable Xen mmap feature.
@@ -419,10 +460,10 @@
features.remove(VhostUserProtocolFeatures::XEN_MMAP);
}
- master.set_protocol_features(features).unwrap();
+ frontend.set_protocol_features(features).unwrap();
// Retrieve inflight I/O tracking information
- let (inflight_info, inflight_file) = master
+ let (inflight_info, inflight_file) = frontend
.get_inflight_fd(&VhostUserInflight {
num_queues: 2,
queue_size: 256,
@@ -430,11 +471,11 @@
})
.unwrap();
// Set the buffer back to the backend
- master
+ frontend
.set_inflight_fd(&inflight_info, inflight_file.as_raw_fd())
.unwrap();
- let num = master.get_queue_num().unwrap();
+ let num = frontend.get_queue_num().unwrap();
assert_eq!(num, 2);
let eventfd = vmm_sys_util::eventfd::EventFd::new(0).unwrap();
@@ -445,23 +486,23 @@
0,
eventfd.as_raw_fd(),
)];
- master.set_mem_table(&mem).unwrap();
+ frontend.set_mem_table(&mem).unwrap();
- master
+ frontend
.set_config(0x100, VhostUserConfigFlags::WRITABLE, &[0xa5u8; 4])
.unwrap();
let buf = [0x0u8; 4];
- let (reply_body, reply_payload) = master
+ let (reply_body, reply_payload) = frontend
.get_config(0x100, 4, VhostUserConfigFlags::empty(), &buf)
.unwrap();
let offset = reply_body.offset;
assert_eq!(offset, 0x100);
assert_eq!(&reply_payload, &[0xa5; 4]);
- master.set_slave_request_fd(&eventfd).unwrap();
- master.set_vring_enable(0, true).unwrap();
+ frontend.set_backend_request_fd(&eventfd).unwrap();
+ frontend.set_vring_enable(0, true).unwrap();
- master
+ frontend
.set_log_base(
0,
Some(VhostUserDirtyLogRegion {
@@ -471,10 +512,10 @@
}),
)
.unwrap();
- master.set_log_fd(eventfd.as_raw_fd()).unwrap();
+ frontend.set_log_fd(eventfd.as_raw_fd()).unwrap();
- master.set_vring_num(0, 256).unwrap();
- master.set_vring_base(0, 0).unwrap();
+ frontend.set_vring_num(0, 256).unwrap();
+ frontend.set_vring_base(0, 0).unwrap();
let config = VringConfigData {
queue_max_size: 256,
queue_size: 128,
@@ -484,20 +525,20 @@
avail_ring_addr: 0x3000,
log_addr: Some(0x4000),
};
- master.set_vring_addr(0, &config).unwrap();
- master.set_vring_call(0, &eventfd).unwrap();
- master.set_vring_kick(0, &eventfd).unwrap();
- master.set_vring_err(0, &eventfd).unwrap();
+ frontend.set_vring_addr(0, &config).unwrap();
+ frontend.set_vring_call(0, &eventfd).unwrap();
+ frontend.set_vring_kick(0, &eventfd).unwrap();
+ frontend.set_vring_err(0, &eventfd).unwrap();
- let max_mem_slots = master.get_max_mem_slots().unwrap();
- assert_eq!(max_mem_slots, 32);
+ let max_mem_slots = frontend.get_max_mem_slots().unwrap();
+ assert_eq!(max_mem_slots, 509);
let region_file: File = TempFile::new().unwrap().into_file();
let region =
VhostUserMemoryRegionInfo::new(0x10_0000, 0x10_0000, 0, 0, region_file.as_raw_fd());
- master.add_mem_region(®ion).unwrap();
+ frontend.add_mem_region(®ion).unwrap();
- master.remove_mem_region(®ion).unwrap();
+ frontend.remove_mem_region(®ion).unwrap();
mbar.wait();
}
@@ -514,8 +555,8 @@
#[test]
fn test_should_reconnect() {
assert!(Error::PartialMessage.should_reconnect());
- assert!(Error::SlaveInternalError.should_reconnect());
- assert!(Error::MasterInternalError.should_reconnect());
+ assert!(Error::BackendInternalError.should_reconnect());
+ assert!(Error::FrontendInternalError.should_reconnect());
assert!(!Error::InvalidParam.should_reconnect());
assert!(!Error::InvalidOperation("reason").should_reconnect());
assert!(
diff --git a/crates/vhost/src/vhost_user/slave.rs b/crates/vhost/src/vhost_user/slave.rs
deleted file mode 100644
index fb65c41..0000000
--- a/crates/vhost/src/vhost_user/slave.rs
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-//! Traits and Structs for vhost-user slave.
-
-use std::sync::Arc;
-
-use super::connection::{Endpoint, Listener};
-use super::message::*;
-use super::{Result, SlaveReqHandler, VhostUserSlaveReqHandler};
-
-/// Vhost-user slave side connection listener.
-pub struct SlaveListener<S: VhostUserSlaveReqHandler> {
- listener: Listener,
- backend: Option<Arc<S>>,
-}
-
-/// Sets up a listener for incoming master connections, and handles construction
-/// of a Slave on success.
-impl<S: VhostUserSlaveReqHandler> SlaveListener<S> {
- /// Create a unix domain socket for incoming master connections.
- pub fn new(listener: Listener, backend: Arc<S>) -> Result<Self> {
- Ok(SlaveListener {
- listener,
- backend: Some(backend),
- })
- }
-
- /// Accept an incoming connection from the master, returning Some(Slave) on
- /// success, or None if the socket is nonblocking and no incoming connection
- /// was detected
- pub fn accept(&mut self) -> Result<Option<SlaveReqHandler<S>>> {
- if let Some(fd) = self.listener.accept()? {
- return Ok(Some(SlaveReqHandler::new(
- Endpoint::<MasterReq>::from_stream(fd),
- self.backend.take().unwrap(),
- )));
- }
- Ok(None)
- }
-
- /// Change blocking status on the listener.
- pub fn set_nonblocking(&self, block: bool) -> Result<()> {
- self.listener.set_nonblocking(block)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::sync::Mutex;
-
- use super::*;
- use crate::vhost_user::dummy_slave::DummySlaveReqHandler;
-
- #[test]
- fn test_slave_listener_set_nonblocking() {
- let backend = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let listener =
- Listener::new("/tmp/vhost_user_lib_unit_test_slave_nonblocking", true).unwrap();
- let slave_listener = SlaveListener::new(listener, backend).unwrap();
-
- slave_listener.set_nonblocking(true).unwrap();
- slave_listener.set_nonblocking(false).unwrap();
- slave_listener.set_nonblocking(false).unwrap();
- slave_listener.set_nonblocking(true).unwrap();
- slave_listener.set_nonblocking(true).unwrap();
- }
-
- #[cfg(feature = "vhost-user-master")]
- #[test]
- fn test_slave_listener_accept() {
- use super::super::Master;
-
- let path = "/tmp/vhost_user_lib_unit_test_slave_accept";
- let backend = Arc::new(Mutex::new(DummySlaveReqHandler::new()));
- let listener = Listener::new(path, true).unwrap();
- let mut slave_listener = SlaveListener::new(listener, backend).unwrap();
-
- slave_listener.set_nonblocking(true).unwrap();
- assert!(slave_listener.accept().unwrap().is_none());
- assert!(slave_listener.accept().unwrap().is_none());
-
- let _master = Master::connect(path, 1).unwrap();
- let _slave = slave_listener.accept().unwrap().unwrap();
- }
-}
diff --git a/crates/vhost/src/vhost_user/slave_req.rs b/crates/vhost/src/vhost_user/slave_req.rs
deleted file mode 100644
index ade1e91..0000000
--- a/crates/vhost/src/vhost_user/slave_req.rs
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-
-use std::io;
-use std::mem;
-use std::os::unix::io::{AsRawFd, RawFd};
-use std::os::unix::net::UnixStream;
-use std::sync::{Arc, Mutex, MutexGuard};
-
-use super::connection::Endpoint;
-use super::message::*;
-use super::{Error, HandlerResult, Result, VhostUserMasterReqHandler};
-
-use vm_memory::ByteValued;
-
-struct SlaveInternal {
- sock: Endpoint<SlaveReq>,
-
- // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated.
- reply_ack_negotiated: bool,
-
- // whether the endpoint has encountered any failure
- error: Option<i32>,
-}
-
-impl SlaveInternal {
- fn check_state(&self) -> Result<u64> {
- match self.error {
- Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))),
- None => Ok(0),
- }
- }
-
- fn send_message<T: ByteValued>(
- &mut self,
- request: SlaveReq,
- body: &T,
- fds: Option<&[RawFd]>,
- ) -> Result<u64> {
- self.check_state()?;
-
- let len = mem::size_of::<T>();
- let mut hdr = VhostUserMsgHeader::new(request, 0, len as u32);
- if self.reply_ack_negotiated {
- hdr.set_need_reply(true);
- }
- self.sock.send_message(&hdr, body, fds)?;
-
- self.wait_for_ack(&hdr)
- }
-
- fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<SlaveReq>) -> Result<u64> {
- self.check_state()?;
- if !self.reply_ack_negotiated {
- return Ok(0);
- }
-
- let (reply, body, rfds) = self.sock.recv_body::<VhostUserU64>()?;
- if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
- return Err(Error::InvalidMessage);
- }
- if body.value != 0 {
- return Err(Error::MasterInternalError);
- }
-
- Ok(body.value)
- }
-}
-
-/// Request proxy to send vhost-user slave requests to the master through the slave
-/// communication channel.
-///
-/// The [Slave] acts as a message proxy to forward vhost-user slave requests to the
-/// master through the vhost-user slave communication channel. The forwarded messages will be
-/// handled by the [MasterReqHandler] server.
-///
-/// [Slave]: struct.Slave.html
-/// [MasterReqHandler]: struct.MasterReqHandler.html
-#[derive(Clone)]
-pub struct Slave {
- // underlying Unix domain socket for communication
- node: Arc<Mutex<SlaveInternal>>,
-}
-
-impl Slave {
- fn new(ep: Endpoint<SlaveReq>) -> Self {
- Slave {
- node: Arc::new(Mutex::new(SlaveInternal {
- sock: ep,
- reply_ack_negotiated: false,
- error: None,
- })),
- }
- }
-
- fn node(&self) -> MutexGuard<SlaveInternal> {
- self.node.lock().unwrap()
- }
-
- fn send_message<T: ByteValued>(
- &self,
- request: SlaveReq,
- body: &T,
- fds: Option<&[RawFd]>,
- ) -> io::Result<u64> {
- self.node()
- .send_message(request, body, fds)
- .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}", e)))
- }
-
- /// Create a new instance from a `UnixStream` object.
- pub fn from_stream(sock: UnixStream) -> Self {
- Self::new(Endpoint::<SlaveReq>::from_stream(sock))
- }
-
- /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature.
- ///
- /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated,
- /// the "REPLY_ACK" flag will be set in the message header for every slave to master request
- /// message.
- pub fn set_reply_ack_flag(&self, enable: bool) {
- self.node().reply_ack_negotiated = enable;
- }
-
- /// Mark endpoint as failed with specified error code.
- pub fn set_failed(&self, error: i32) {
- self.node().error = Some(error);
- }
-}
-
-impl VhostUserMasterReqHandler for Slave {
- /// Forward vhost-user-fs map file requests to the slave.
- fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
- self.send_message(SlaveReq::FS_MAP, fs, Some(&[fd.as_raw_fd()]))
- }
-
- /// Forward vhost-user-fs unmap file requests to the master.
- fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
- self.send_message(SlaveReq::FS_UNMAP, fs, None)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::os::unix::io::AsRawFd;
-
- use super::*;
-
- #[test]
- fn test_slave_req_set_failed() {
- let (p1, _p2) = UnixStream::pair().unwrap();
- let slave = Slave::from_stream(p1);
-
- assert!(slave.node().error.is_none());
- slave.set_failed(libc::EAGAIN);
- assert_eq!(slave.node().error, Some(libc::EAGAIN));
- }
-
- #[test]
- fn test_slave_req_send_failure() {
- let (p1, p2) = UnixStream::pair().unwrap();
- let slave = Slave::from_stream(p1);
-
- slave.set_failed(libc::ECONNRESET);
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &p2)
- .unwrap_err();
- slave
- .fs_slave_unmap(&VhostUserFSSlaveMsg::default())
- .unwrap_err();
- slave.node().error = None;
- }
-
- #[test]
- fn test_slave_req_recv_negative() {
- let (p1, p2) = UnixStream::pair().unwrap();
- let slave = Slave::from_stream(p1);
- let mut master = Endpoint::<SlaveReq>::from_stream(p2);
-
- let len = mem::size_of::<VhostUserFSSlaveMsg>();
- let mut hdr = VhostUserMsgHeader::new(
- SlaveReq::FS_MAP,
- VhostUserHeaderFlag::REPLY.bits(),
- len as u32,
- );
- let body = VhostUserU64::new(0);
-
- master
- .send_message(&hdr, &body, Some(&[master.as_raw_fd()]))
- .unwrap();
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap();
-
- slave.set_reply_ack_flag(true);
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
-
- hdr.set_code(SlaveReq::FS_UNMAP);
- master.send_message(&hdr, &body, None).unwrap();
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
- hdr.set_code(SlaveReq::FS_MAP);
-
- let body = VhostUserU64::new(1);
- master.send_message(&hdr, &body, None).unwrap();
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap_err();
-
- let body = VhostUserU64::new(0);
- master.send_message(&hdr, &body, None).unwrap();
- slave
- .fs_slave_map(&VhostUserFSSlaveMsg::default(), &master)
- .unwrap();
- }
-}
diff --git a/crates/virtio-bindings/.cargo-checksum.json b/crates/virtio-bindings/.cargo-checksum.json
index e26b258..5053f73 100644
--- a/crates/virtio-bindings/.cargo-checksum.json
+++ b/crates/virtio-bindings/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"e247a960964314db0fdd18f7068f28000920c45b27a9445d76353400c2594efe","CONTRIBUTING.md":"fa97c8841aed048d1627ea56db1843dcd9f6d8ca509a127355fff431fb7d0d09","Cargo.toml":"542cfa5352cc5d0536d675fad152bb72f5987dd14329ccd37000e2d077bdaefb","README.md":"d46437bae9584b47d75f5cdb063dbc91cb7300f7a1340c463e6bb5836b90b396","src/lib.rs":"0b6eeb75110444a241bfc19675d9015478bbdac4a49ee4b0c8f65db077bbaf24","src/virtio_blk.rs":"6734c389ce0c8e8e23c36c7ab66090309ca8a8dae574c6ccc0551e42051075a3","src/virtio_config.rs":"87bd875b098cc97b4fe36993798d5a00b02293a735f5a96831b1ca7cddef864c","src/virtio_gpu.rs":"96de76ab5478efb8ed1cef9eb703116b28cd988c55ac36bbbc1a808b2d9e3d11","src/virtio_ids.rs":"b8f30fcd98a3f006368a87114543dfd4ffb086727c543e99a0fe42852f658ee3","src/virtio_mmio.rs":"3919c3028a1781d0021c248353b80a7b2390d82c499c18445034a54eda8cae50","src/virtio_net/generated.rs":"29b042df5bee5144a322a5c036ae0b696142ef359dbf9dc88d1c4bb9ed9f6332","src/virtio_net/mod.rs":"dd4366b3193e6dcf4eda0fb6dd222e091d6c8ff6384089fd8a3f02cfb0fb335a","src/virtio_ring.rs":"4799d74d4a6f2d4177266b2de9a00932321a21c073c41fb58527a8a248a84571","src/virtio_scsi.rs":"ffda7350cd8484187f39bfc5e57405bc2e44e13bf0c0d336ed116ccee80ec586"},"package":"878bcb1b2812a10c30d53b0ed054999de3d98f25ece91fc173973f9c57aaae86"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"711f5179c2832320f758791aa6473377a6088b2ecdb92399c924b9e781fbcde7","CONTRIBUTING.md":"7f2fea3834a6dca9d6bb526768a1b475b1ccc734ddfca7e4d5ef35be62f8ecb3","Cargo.toml":"69876bf79999c782ad4f1a0f6dbadc69a6d2b57202924bb81e5cbc1c330dd9c2","README.md":"d46437bae9584b47d75f5cdb063dbc91cb7300f7a1340c463e6bb5836b90b396","src/lib.rs":"11b8df8a63eb99c31010b72243b4375dcb73263ece20c93cc90c31c3df434bf9","src/virtio_blk.rs":"234c8f64afadabc0802ba04d9d38e7d036ba03e8e6d3d1458ee71ecee490c98a","src/virtio_config.rs":"1ca8557d5f007c58d87df5136da69253336b70459fc1adc842c3f78769fe89a0","src/virtio_gpu.rs":"e2be1c46f9b0df8a6683955a1e4e931fd52eb8a0dcaaf5d60567a60b3c124322","src/virtio_ids.rs":"352f9f832d92aae51b9ca762fab4b45f4ca61dbe61e17c6d007bfe1963d76c6b","src/virtio_input.rs":"b0ed8f51bc63afa7448458f015ab9d615bd05c0661f890dfcf732c5a0432c608","src/virtio_mmio.rs":"23d1ffe1e8662f3345df8d36ad2c021ac4d08ce2d50b5cd08d72e61912838ad9","src/virtio_net/generated.rs":"241101f04d960a547fa0261171501253a4ffec881301941d07c6325bea4e317e","src/virtio_net/mod.rs":"dd4366b3193e6dcf4eda0fb6dd222e091d6c8ff6384089fd8a3f02cfb0fb335a","src/virtio_ring.rs":"3098db40d6e39e5ec5184659e9322016fc507490641ccdc55babe6509fd52878","src/virtio_scsi.rs":"f48d9a6f2251f8a59b25dbd76da8daea46ce055544a6efc4753dac2c158a3e34"},"package":"1711e61c00f8cb450bd15368152a1e37a12ef195008ddc7d0f4812f9e2b30a68"}
\ No newline at end of file
diff --git a/crates/virtio-bindings/Android.bp b/crates/virtio-bindings/Android.bp
index e4fb095..1f4eb3b 100644
--- a/crates/virtio-bindings/Android.bp
+++ b/crates/virtio-bindings/Android.bp
@@ -17,7 +17,16 @@
name: "libvirtio_bindings",
crate_name: "virtio_bindings",
cargo_env_compat: true,
- cargo_pkg_version: "0.2.2",
+ cargo_pkg_version: "0.2.4",
crate_root: "src/lib.rs",
edition: "2021",
+ compile_multilib: "first",
+ arch: {
+ arm64: {
+ enabled: true,
+ },
+ x86_64: {
+ enabled: true,
+ },
+ },
}
diff --git a/crates/virtio-bindings/CHANGELOG.md b/crates/virtio-bindings/CHANGELOG.md
index 8157c27..50a7657 100644
--- a/crates/virtio-bindings/CHANGELOG.md
+++ b/crates/virtio-bindings/CHANGELOG.md
@@ -1,5 +1,19 @@
# Upcoming Release
+# v0.2.4
+
+## Changed
+
+- Regenerate bindings with bindgen 0.70.1.
+
+# v0.2.3
+
+## Added
+
+- Exposed virtio_ids.h bindings as a public module.
+- Regenerate bindings with Linux 6.10.
+- Added virtio_input.h bindings.
+
# v0.2.2
## Added
diff --git a/crates/virtio-bindings/CONTRIBUTING.md b/crates/virtio-bindings/CONTRIBUTING.md
index b0d98ee..c1f1005 100644
--- a/crates/virtio-bindings/CONTRIBUTING.md
+++ b/crates/virtio-bindings/CONTRIBUTING.md
@@ -4,9 +4,9 @@
### Bindgen
The bindings are currently generated using
-[bindgen](https://rust-lang.github.io/rust-bindgen/) version 0.63.0:
+[bindgen](https://rust-lang.github.io/rust-bindgen/) version 0.70.1:
```bash
-cargo install bindgen-cli --vers 0.63.0
+cargo install bindgen-cli --vers 0.70.1
```
### Linux Kernel
@@ -39,6 +39,7 @@
virtio_config \
virtio_gpu \
virtio_ids \
+ virtio_input \
virtio_mmio \
virtio_net \
virtio_ring \
@@ -53,6 +54,6 @@
cd ~
# Step 6: Copy the generated files to the new version module.
-cp linux/v5_0_headers/*.rs vm-virtio/crates/virtio-bindings/src
-mv vm-virtio/crates/virtio-bindings/src/virtio_net.rs vm-virtio/crates/virtio-bindings/src/virtio_net/generated.rs
+cp linux/v5_0_headers/*.rs vm-virtio/virtio-bindings/src
+mv vm-virtio/virtio-bindings/src/virtio_net.rs vm-virtio/virtio-bindings/src/virtio_net/generated.rs
```
diff --git a/crates/virtio-bindings/Cargo.toml b/crates/virtio-bindings/Cargo.toml
index e9117c1..056f5d7 100644
--- a/crates/virtio-bindings/Cargo.toml
+++ b/crates/virtio-bindings/Cargo.toml
@@ -12,14 +12,22 @@
[package]
edition = "2021"
name = "virtio-bindings"
-version = "0.2.2"
+version = "0.2.4"
authors = ["Sergio Lopez <slp@redhat.com>"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
description = "Rust FFI bindings to virtio generated using bindgen."
readme = "README.md"
keywords = ["virtio"]
license = "BSD-3-Clause OR Apache-2.0"
repository = "https://github.com/rust-vmm/vm-virtio"
-resolver = "1"
+
+[lib]
+name = "virtio_bindings"
+path = "src/lib.rs"
[dependencies]
diff --git a/crates/virtio-bindings/METADATA b/crates/virtio-bindings/METADATA
index c37f983..726cf66 100644
--- a/crates/virtio-bindings/METADATA
+++ b/crates/virtio-bindings/METADATA
@@ -1,17 +1,17 @@
name: "virtio-bindings"
description: "Rust FFI bindings to virtio generated using bindgen."
third_party {
- version: "0.2.2"
+ version: "0.2.4"
license_type: NOTICE
last_upgrade_date {
year: 2024
- month: 2
- day: 6
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/virtio-bindings"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/virtio-bindings/virtio-bindings-0.2.2.crate"
- version: "0.2.2"
+ value: "https://static.crates.io/crates/virtio-bindings/virtio-bindings-0.2.4.crate"
+ version: "0.2.4"
}
}
diff --git a/crates/virtio-bindings/cargo_embargo.json b/crates/virtio-bindings/cargo_embargo.json
index 253af3c..e40812d 100644
--- a/crates/virtio-bindings/cargo_embargo.json
+++ b/crates/virtio-bindings/cargo_embargo.json
@@ -1,7 +1,16 @@
{
"package": {
"virtio-bindings": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first",
+ "arch": {
+ "arm64": {
+ "enabled": true
+ },
+ "x86_64": {
+ "enabled": true
+ }
+ }
}
},
"run_cargo": false
diff --git a/crates/virtio-bindings/src/lib.rs b/crates/virtio-bindings/src/lib.rs
index 8040e4e..be0bf2e 100644
--- a/crates/virtio-bindings/src/lib.rs
+++ b/crates/virtio-bindings/src/lib.rs
@@ -10,6 +10,8 @@
pub mod virtio_blk;
pub mod virtio_config;
pub mod virtio_gpu;
+pub mod virtio_ids;
+pub mod virtio_input;
pub mod virtio_mmio;
pub mod virtio_net;
pub mod virtio_ring;
diff --git a/crates/virtio-bindings/src/virtio_blk.rs b/crates/virtio-bindings/src/virtio_blk.rs
index a2e5fb2..1ba7faf 100644
--- a/crates/virtio-bindings/src/virtio_blk.rs
+++ b/crates/virtio-bindings/src/virtio_blk.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
#[repr(C)]
#[derive(Default)]
@@ -128,58 +128,19 @@
pub heads: __u8,
pub sectors: __u8,
}
-#[test]
-fn bindgen_test_layout_virtio_blk_config_virtio_blk_geometry() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_config_virtio_blk_geometry> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_config_virtio_blk_geometry>(),
- 4usize,
- concat!(
- "Size of: ",
- stringify!(virtio_blk_config_virtio_blk_geometry)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_config_virtio_blk_geometry>(),
- 2usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_blk_config_virtio_blk_geometry)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cylinders) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_geometry),
- "::",
- stringify!(cylinders)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).heads) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_geometry),
- "::",
- stringify!(heads)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sectors) as usize - ptr as usize },
- 3usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_geometry),
- "::",
- stringify!(sectors)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_config_virtio_blk_geometry"]
+ [::std::mem::size_of::<virtio_blk_config_virtio_blk_geometry>() - 4usize];
+ ["Alignment of virtio_blk_config_virtio_blk_geometry"]
+ [::std::mem::align_of::<virtio_blk_config_virtio_blk_geometry>() - 2usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_geometry::cylinders"]
+ [::std::mem::offset_of!(virtio_blk_config_virtio_blk_geometry, cylinders) - 0usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_geometry::heads"]
+ [::std::mem::offset_of!(virtio_blk_config_virtio_blk_geometry, heads) - 2usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_geometry::sectors"]
+ [::std::mem::offset_of!(virtio_blk_config_virtio_blk_geometry, sectors) - 3usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_blk_config_virtio_blk_zoned_characteristics {
@@ -191,345 +152,97 @@
pub model: __u8,
pub unused2: [__u8; 3usize],
}
-#[test]
-fn bindgen_test_layout_virtio_blk_config_virtio_blk_zoned_characteristics() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_config_virtio_blk_zoned_characteristics> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_config_virtio_blk_zoned_characteristics>(),
- 24usize,
- concat!(
- "Size of: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_config_virtio_blk_zoned_characteristics>(),
- 4usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).zone_sectors) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(zone_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_open_zones) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(max_open_zones)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_active_zones) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(max_active_zones)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_append_sectors) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(max_append_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).write_granularity) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(write_granularity)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).model) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(model)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).unused2) as usize - ptr as usize },
- 21usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config_virtio_blk_zoned_characteristics),
- "::",
- stringify!(unused2)
- )
- );
-}
-#[test]
-fn bindgen_test_layout_virtio_blk_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_config> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_config>(),
- 96usize,
- concat!("Size of: ", stringify!(virtio_blk_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_config>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_blk_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capacity) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(capacity)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).size_max) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(size_max)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).seg_max) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(seg_max)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).geometry) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(geometry)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).blk_size) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(blk_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).physical_block_exp) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(physical_block_exp)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).alignment_offset) as usize - ptr as usize },
- 25usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(alignment_offset)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).min_io_size) as usize - ptr as usize },
- 26usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(min_io_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).opt_io_size) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(opt_io_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).wce) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(wce)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).unused) as usize - ptr as usize },
- 33usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(unused)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_queues) as usize - ptr as usize },
- 34usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(num_queues)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_discard_sectors) as usize - ptr as usize },
- 36usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_discard_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_discard_seg) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_discard_seg)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).discard_sector_alignment) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(discard_sector_alignment)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_write_zeroes_sectors) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_write_zeroes_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_write_zeroes_seg) as usize - ptr as usize },
- 52usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_write_zeroes_seg)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).write_zeroes_may_unmap) as usize - ptr as usize },
- 56usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(write_zeroes_may_unmap)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).unused1) as usize - ptr as usize },
- 57usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(unused1)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_secure_erase_sectors) as usize - ptr as usize },
- 60usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_secure_erase_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_secure_erase_seg) as usize - ptr as usize },
- 64usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(max_secure_erase_seg)
- )
- );
- assert_eq!(
- unsafe {
- ::std::ptr::addr_of!((*ptr).secure_erase_sector_alignment) as usize - ptr as usize
- },
- 68usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(secure_erase_sector_alignment)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).zoned) as usize - ptr as usize },
- 72usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_config),
- "::",
- stringify!(zoned)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_config_virtio_blk_zoned_characteristics"]
+ [::std::mem::size_of::<virtio_blk_config_virtio_blk_zoned_characteristics>() - 24usize];
+ ["Alignment of virtio_blk_config_virtio_blk_zoned_characteristics"]
+ [::std::mem::align_of::<virtio_blk_config_virtio_blk_zoned_characteristics>() - 4usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::zone_sectors"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ zone_sectors
+ )
+ - 0usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::max_open_zones"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ max_open_zones
+ )
+ - 4usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::max_active_zones"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ max_active_zones
+ )
+ - 8usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::max_append_sectors"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ max_append_sectors
+ )
+ - 12usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::write_granularity"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ write_granularity
+ )
+ - 16usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::model"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ model
+ ) - 20usize];
+ ["Offset of field: virtio_blk_config_virtio_blk_zoned_characteristics::unused2"][::std::mem::offset_of!(
+ virtio_blk_config_virtio_blk_zoned_characteristics,
+ unused2
+ ) - 21usize];
+};
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_config"][::std::mem::size_of::<virtio_blk_config>() - 96usize];
+ ["Alignment of virtio_blk_config"][::std::mem::align_of::<virtio_blk_config>() - 1usize];
+ ["Offset of field: virtio_blk_config::capacity"]
+ [::std::mem::offset_of!(virtio_blk_config, capacity) - 0usize];
+ ["Offset of field: virtio_blk_config::size_max"]
+ [::std::mem::offset_of!(virtio_blk_config, size_max) - 8usize];
+ ["Offset of field: virtio_blk_config::seg_max"]
+ [::std::mem::offset_of!(virtio_blk_config, seg_max) - 12usize];
+ ["Offset of field: virtio_blk_config::geometry"]
+ [::std::mem::offset_of!(virtio_blk_config, geometry) - 16usize];
+ ["Offset of field: virtio_blk_config::blk_size"]
+ [::std::mem::offset_of!(virtio_blk_config, blk_size) - 20usize];
+ ["Offset of field: virtio_blk_config::physical_block_exp"]
+ [::std::mem::offset_of!(virtio_blk_config, physical_block_exp) - 24usize];
+ ["Offset of field: virtio_blk_config::alignment_offset"]
+ [::std::mem::offset_of!(virtio_blk_config, alignment_offset) - 25usize];
+ ["Offset of field: virtio_blk_config::min_io_size"]
+ [::std::mem::offset_of!(virtio_blk_config, min_io_size) - 26usize];
+ ["Offset of field: virtio_blk_config::opt_io_size"]
+ [::std::mem::offset_of!(virtio_blk_config, opt_io_size) - 28usize];
+ ["Offset of field: virtio_blk_config::wce"]
+ [::std::mem::offset_of!(virtio_blk_config, wce) - 32usize];
+ ["Offset of field: virtio_blk_config::unused"]
+ [::std::mem::offset_of!(virtio_blk_config, unused) - 33usize];
+ ["Offset of field: virtio_blk_config::num_queues"]
+ [::std::mem::offset_of!(virtio_blk_config, num_queues) - 34usize];
+ ["Offset of field: virtio_blk_config::max_discard_sectors"]
+ [::std::mem::offset_of!(virtio_blk_config, max_discard_sectors) - 36usize];
+ ["Offset of field: virtio_blk_config::max_discard_seg"]
+ [::std::mem::offset_of!(virtio_blk_config, max_discard_seg) - 40usize];
+ ["Offset of field: virtio_blk_config::discard_sector_alignment"]
+ [::std::mem::offset_of!(virtio_blk_config, discard_sector_alignment) - 44usize];
+ ["Offset of field: virtio_blk_config::max_write_zeroes_sectors"]
+ [::std::mem::offset_of!(virtio_blk_config, max_write_zeroes_sectors) - 48usize];
+ ["Offset of field: virtio_blk_config::max_write_zeroes_seg"]
+ [::std::mem::offset_of!(virtio_blk_config, max_write_zeroes_seg) - 52usize];
+ ["Offset of field: virtio_blk_config::write_zeroes_may_unmap"]
+ [::std::mem::offset_of!(virtio_blk_config, write_zeroes_may_unmap) - 56usize];
+ ["Offset of field: virtio_blk_config::unused1"]
+ [::std::mem::offset_of!(virtio_blk_config, unused1) - 57usize];
+ ["Offset of field: virtio_blk_config::max_secure_erase_sectors"]
+ [::std::mem::offset_of!(virtio_blk_config, max_secure_erase_sectors) - 60usize];
+ ["Offset of field: virtio_blk_config::max_secure_erase_seg"]
+ [::std::mem::offset_of!(virtio_blk_config, max_secure_erase_seg) - 64usize];
+ ["Offset of field: virtio_blk_config::secure_erase_sector_alignment"]
+ [::std::mem::offset_of!(virtio_blk_config, secure_erase_sector_alignment) - 68usize];
+ ["Offset of field: virtio_blk_config::zoned"]
+ [::std::mem::offset_of!(virtio_blk_config, zoned) - 72usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_blk_outhdr {
@@ -537,51 +250,17 @@
pub ioprio: __virtio32,
pub sector: __virtio64,
}
-#[test]
-fn bindgen_test_layout_virtio_blk_outhdr() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_outhdr> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_outhdr>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_blk_outhdr))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_outhdr>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_blk_outhdr))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_outhdr),
- "::",
- stringify!(type_)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).ioprio) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_outhdr),
- "::",
- stringify!(ioprio)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sector) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_outhdr),
- "::",
- stringify!(sector)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_outhdr"][::std::mem::size_of::<virtio_blk_outhdr>() - 16usize];
+ ["Alignment of virtio_blk_outhdr"][::std::mem::align_of::<virtio_blk_outhdr>() - 8usize];
+ ["Offset of field: virtio_blk_outhdr::type_"]
+ [::std::mem::offset_of!(virtio_blk_outhdr, type_) - 0usize];
+ ["Offset of field: virtio_blk_outhdr::ioprio"]
+ [::std::mem::offset_of!(virtio_blk_outhdr, ioprio) - 4usize];
+ ["Offset of field: virtio_blk_outhdr::sector"]
+ [::std::mem::offset_of!(virtio_blk_outhdr, sector) - 8usize];
+};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct virtio_blk_zone_descriptor {
@@ -592,82 +271,25 @@
pub z_state: __u8,
pub reserved: [__u8; 38usize],
}
-#[test]
-fn bindgen_test_layout_virtio_blk_zone_descriptor() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_zone_descriptor> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_zone_descriptor>(),
- 64usize,
- concat!("Size of: ", stringify!(virtio_blk_zone_descriptor))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_zone_descriptor>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_blk_zone_descriptor))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z_cap) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(z_cap)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z_start) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(z_start)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z_wp) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(z_wp)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z_type) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(z_type)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z_state) as usize - ptr as usize },
- 25usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(z_state)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).reserved) as usize - ptr as usize },
- 26usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_descriptor),
- "::",
- stringify!(reserved)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_zone_descriptor"]
+ [::std::mem::size_of::<virtio_blk_zone_descriptor>() - 64usize];
+ ["Alignment of virtio_blk_zone_descriptor"]
+ [::std::mem::align_of::<virtio_blk_zone_descriptor>() - 8usize];
+ ["Offset of field: virtio_blk_zone_descriptor::z_cap"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, z_cap) - 0usize];
+ ["Offset of field: virtio_blk_zone_descriptor::z_start"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, z_start) - 8usize];
+ ["Offset of field: virtio_blk_zone_descriptor::z_wp"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, z_wp) - 16usize];
+ ["Offset of field: virtio_blk_zone_descriptor::z_type"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, z_type) - 24usize];
+ ["Offset of field: virtio_blk_zone_descriptor::z_state"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, z_state) - 25usize];
+ ["Offset of field: virtio_blk_zone_descriptor::reserved"]
+ [::std::mem::offset_of!(virtio_blk_zone_descriptor, reserved) - 26usize];
+};
impl Default for virtio_blk_zone_descriptor {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -684,52 +306,18 @@
pub reserved: [__u8; 56usize],
pub zones: __IncompleteArrayField<virtio_blk_zone_descriptor>,
}
-#[test]
-fn bindgen_test_layout_virtio_blk_zone_report() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_zone_report> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_zone_report>(),
- 64usize,
- concat!("Size of: ", stringify!(virtio_blk_zone_report))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_zone_report>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_blk_zone_report))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).nr_zones) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_report),
- "::",
- stringify!(nr_zones)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).reserved) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_report),
- "::",
- stringify!(reserved)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).zones) as usize - ptr as usize },
- 64usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_zone_report),
- "::",
- stringify!(zones)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_zone_report"][::std::mem::size_of::<virtio_blk_zone_report>() - 64usize];
+ ["Alignment of virtio_blk_zone_report"]
+ [::std::mem::align_of::<virtio_blk_zone_report>() - 8usize];
+ ["Offset of field: virtio_blk_zone_report::nr_zones"]
+ [::std::mem::offset_of!(virtio_blk_zone_report, nr_zones) - 0usize];
+ ["Offset of field: virtio_blk_zone_report::reserved"]
+ [::std::mem::offset_of!(virtio_blk_zone_report, reserved) - 8usize];
+ ["Offset of field: virtio_blk_zone_report::zones"]
+ [::std::mem::offset_of!(virtio_blk_zone_report, zones) - 64usize];
+};
impl Default for virtio_blk_zone_report {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -746,52 +334,19 @@
pub num_sectors: __le32,
pub flags: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_blk_discard_write_zeroes() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_blk_discard_write_zeroes> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_blk_discard_write_zeroes>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_blk_discard_write_zeroes))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_blk_discard_write_zeroes>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_blk_discard_write_zeroes))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sector) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_discard_write_zeroes),
- "::",
- stringify!(sector)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_sectors) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_discard_write_zeroes),
- "::",
- stringify!(num_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_blk_discard_write_zeroes),
- "::",
- stringify!(flags)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_blk_discard_write_zeroes"]
+ [::std::mem::size_of::<virtio_blk_discard_write_zeroes>() - 16usize];
+ ["Alignment of virtio_blk_discard_write_zeroes"]
+ [::std::mem::align_of::<virtio_blk_discard_write_zeroes>() - 8usize];
+ ["Offset of field: virtio_blk_discard_write_zeroes::sector"]
+ [::std::mem::offset_of!(virtio_blk_discard_write_zeroes, sector) - 0usize];
+ ["Offset of field: virtio_blk_discard_write_zeroes::num_sectors"]
+ [::std::mem::offset_of!(virtio_blk_discard_write_zeroes, num_sectors) - 8usize];
+ ["Offset of field: virtio_blk_discard_write_zeroes::flags"]
+ [::std::mem::offset_of!(virtio_blk_discard_write_zeroes, flags) - 12usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_inhdr {
@@ -800,58 +355,16 @@
pub sense_len: __virtio32,
pub residual: __virtio32,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_inhdr() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_inhdr> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_inhdr>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_scsi_inhdr))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_inhdr>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_scsi_inhdr))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).errors) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_inhdr),
- "::",
- stringify!(errors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).data_len) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_inhdr),
- "::",
- stringify!(data_len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sense_len) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_inhdr),
- "::",
- stringify!(sense_len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).residual) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_inhdr),
- "::",
- stringify!(residual)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_inhdr"][::std::mem::size_of::<virtio_scsi_inhdr>() - 16usize];
+ ["Alignment of virtio_scsi_inhdr"][::std::mem::align_of::<virtio_scsi_inhdr>() - 4usize];
+ ["Offset of field: virtio_scsi_inhdr::errors"]
+ [::std::mem::offset_of!(virtio_scsi_inhdr, errors) - 0usize];
+ ["Offset of field: virtio_scsi_inhdr::data_len"]
+ [::std::mem::offset_of!(virtio_scsi_inhdr, data_len) - 4usize];
+ ["Offset of field: virtio_scsi_inhdr::sense_len"]
+ [::std::mem::offset_of!(virtio_scsi_inhdr, sense_len) - 8usize];
+ ["Offset of field: virtio_scsi_inhdr::residual"]
+ [::std::mem::offset_of!(virtio_scsi_inhdr, residual) - 12usize];
+};
diff --git a/crates/virtio-bindings/src/virtio_config.rs b/crates/virtio-bindings/src/virtio_config.rs
index 7a6d163..6d4ed5d 100644
--- a/crates/virtio-bindings/src/virtio_config.rs
+++ b/crates/virtio-bindings/src/virtio_config.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
pub const VIRTIO_CONFIG_S_ACKNOWLEDGE: u32 = 1;
pub const VIRTIO_CONFIG_S_DRIVER: u32 = 2;
@@ -7,7 +7,7 @@
pub const VIRTIO_CONFIG_S_NEEDS_RESET: u32 = 64;
pub const VIRTIO_CONFIG_S_FAILED: u32 = 128;
pub const VIRTIO_TRANSPORT_F_START: u32 = 28;
-pub const VIRTIO_TRANSPORT_F_END: u32 = 41;
+pub const VIRTIO_TRANSPORT_F_END: u32 = 42;
pub const VIRTIO_F_NOTIFY_ON_EMPTY: u32 = 24;
pub const VIRTIO_F_ANY_LAYOUT: u32 = 27;
pub const VIRTIO_F_VERSION_1: u32 = 32;
@@ -18,4 +18,6 @@
pub const VIRTIO_F_ORDER_PLATFORM: u32 = 36;
pub const VIRTIO_F_SR_IOV: u32 = 37;
pub const VIRTIO_F_NOTIFICATION_DATA: u32 = 38;
+pub const VIRTIO_F_NOTIF_CONFIG_DATA: u32 = 39;
pub const VIRTIO_F_RING_RESET: u32 = 40;
+pub const VIRTIO_F_ADMIN_VQ: u32 = 41;
diff --git a/crates/virtio-bindings/src/virtio_gpu.rs b/crates/virtio-bindings/src/virtio_gpu.rs
index ba83c90..775c5a1 100644
--- a/crates/virtio-bindings/src/virtio_gpu.rs
+++ b/crates/virtio-bindings/src/virtio_gpu.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
#[repr(C)]
#[derive(Default)]
@@ -42,6 +42,7 @@
pub const VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK: u32 = 255;
pub const VIRTIO_GPU_CAPSET_VIRGL: u32 = 1;
pub const VIRTIO_GPU_CAPSET_VIRGL2: u32 = 2;
+pub const VIRTIO_GPU_CAPSET_VENUS: u32 = 4;
pub const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1;
pub const VIRTIO_GPU_BLOB_MEM_GUEST: u32 = 1;
pub const VIRTIO_GPU_BLOB_MEM_HOST3D: u32 = 2;
@@ -113,81 +114,23 @@
pub ring_idx: __u8,
pub padding: [__u8; 3usize],
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_ctrl_hdr() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_ctrl_hdr> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_ctrl_hdr>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_gpu_ctrl_hdr))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_ctrl_hdr>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_ctrl_hdr))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(type_)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).fence_id) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(fence_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).ctx_id) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(ctx_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).ring_idx) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(ring_idx)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 21usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctrl_hdr),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_ctrl_hdr"][::std::mem::size_of::<virtio_gpu_ctrl_hdr>() - 24usize];
+ ["Alignment of virtio_gpu_ctrl_hdr"][::std::mem::align_of::<virtio_gpu_ctrl_hdr>() - 8usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::type_"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, type_) - 0usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::flags"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, flags) - 4usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::fence_id"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, fence_id) - 8usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::ctx_id"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, ctx_id) - 16usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::ring_idx"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, ring_idx) - 20usize];
+ ["Offset of field: virtio_gpu_ctrl_hdr::padding"]
+ [::std::mem::offset_of!(virtio_gpu_ctrl_hdr, padding) - 21usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_cursor_pos {
@@ -196,62 +139,20 @@
pub y: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_cursor_pos() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_cursor_pos> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_cursor_pos>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_gpu_cursor_pos))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_cursor_pos>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_gpu_cursor_pos))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).scanout_id) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cursor_pos),
- "::",
- stringify!(scanout_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cursor_pos),
- "::",
- stringify!(x)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cursor_pos),
- "::",
- stringify!(y)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cursor_pos),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_cursor_pos"][::std::mem::size_of::<virtio_gpu_cursor_pos>() - 16usize];
+ ["Alignment of virtio_gpu_cursor_pos"]
+ [::std::mem::align_of::<virtio_gpu_cursor_pos>() - 4usize];
+ ["Offset of field: virtio_gpu_cursor_pos::scanout_id"]
+ [::std::mem::offset_of!(virtio_gpu_cursor_pos, scanout_id) - 0usize];
+ ["Offset of field: virtio_gpu_cursor_pos::x"]
+ [::std::mem::offset_of!(virtio_gpu_cursor_pos, x) - 4usize];
+ ["Offset of field: virtio_gpu_cursor_pos::y"]
+ [::std::mem::offset_of!(virtio_gpu_cursor_pos, y) - 8usize];
+ ["Offset of field: virtio_gpu_cursor_pos::padding"]
+ [::std::mem::offset_of!(virtio_gpu_cursor_pos, padding) - 12usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_update_cursor {
@@ -262,82 +163,25 @@
pub hot_y: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_update_cursor() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_update_cursor> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_update_cursor>(),
- 56usize,
- concat!("Size of: ", stringify!(virtio_gpu_update_cursor))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_update_cursor>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_update_cursor))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).pos) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(pos)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hot_x) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(hot_x)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hot_y) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(hot_y)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 52usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_update_cursor),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_update_cursor"]
+ [::std::mem::size_of::<virtio_gpu_update_cursor>() - 56usize];
+ ["Alignment of virtio_gpu_update_cursor"]
+ [::std::mem::align_of::<virtio_gpu_update_cursor>() - 8usize];
+ ["Offset of field: virtio_gpu_update_cursor::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_update_cursor::pos"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, pos) - 24usize];
+ ["Offset of field: virtio_gpu_update_cursor::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, resource_id) - 40usize];
+ ["Offset of field: virtio_gpu_update_cursor::hot_x"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, hot_x) - 44usize];
+ ["Offset of field: virtio_gpu_update_cursor::hot_y"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, hot_y) - 48usize];
+ ["Offset of field: virtio_gpu_update_cursor::padding"]
+ [::std::mem::offset_of!(virtio_gpu_update_cursor, padding) - 52usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_rect {
@@ -346,61 +190,17 @@
pub width: __le32,
pub height: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_rect() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_rect> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_rect>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_gpu_rect))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_rect>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_gpu_rect))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_rect),
- "::",
- stringify!(x)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_rect),
- "::",
- stringify!(y)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_rect),
- "::",
- stringify!(width)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).height) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_rect),
- "::",
- stringify!(height)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_rect"][::std::mem::size_of::<virtio_gpu_rect>() - 16usize];
+ ["Alignment of virtio_gpu_rect"][::std::mem::align_of::<virtio_gpu_rect>() - 4usize];
+ ["Offset of field: virtio_gpu_rect::x"][::std::mem::offset_of!(virtio_gpu_rect, x) - 0usize];
+ ["Offset of field: virtio_gpu_rect::y"][::std::mem::offset_of!(virtio_gpu_rect, y) - 4usize];
+ ["Offset of field: virtio_gpu_rect::width"]
+ [::std::mem::offset_of!(virtio_gpu_rect, width) - 8usize];
+ ["Offset of field: virtio_gpu_rect::height"]
+ [::std::mem::offset_of!(virtio_gpu_rect, height) - 12usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_unref {
@@ -408,52 +208,19 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_unref() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_unref> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_unref>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_unref))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_unref>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_unref))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unref),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unref),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unref),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_unref"]
+ [::std::mem::size_of::<virtio_gpu_resource_unref>() - 32usize];
+ ["Alignment of virtio_gpu_resource_unref"]
+ [::std::mem::align_of::<virtio_gpu_resource_unref>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_unref::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unref, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_unref::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unref, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_unref::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unref, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_create_2d {
@@ -463,72 +230,23 @@
pub width: __le32,
pub height: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_create_2d() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_create_2d> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_create_2d>(),
- 40usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_create_2d))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_create_2d>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_create_2d))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_2d),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_2d),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).format) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_2d),
- "::",
- stringify!(format)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_2d),
- "::",
- stringify!(width)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).height) as usize - ptr as usize },
- 36usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_2d),
- "::",
- stringify!(height)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_create_2d"]
+ [::std::mem::size_of::<virtio_gpu_resource_create_2d>() - 40usize];
+ ["Alignment of virtio_gpu_resource_create_2d"]
+ [::std::mem::align_of::<virtio_gpu_resource_create_2d>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_create_2d::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_2d, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_create_2d::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_2d, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_create_2d::format"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_2d, format) - 28usize];
+ ["Offset of field: virtio_gpu_resource_create_2d::width"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_2d, width) - 32usize];
+ ["Offset of field: virtio_gpu_resource_create_2d::height"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_2d, height) - 36usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_set_scanout {
@@ -537,62 +255,20 @@
pub scanout_id: __le32,
pub resource_id: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_set_scanout() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_set_scanout> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_set_scanout>(),
- 48usize,
- concat!("Size of: ", stringify!(virtio_gpu_set_scanout))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_set_scanout>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_set_scanout))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).r) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout),
- "::",
- stringify!(r)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).scanout_id) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout),
- "::",
- stringify!(scanout_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout),
- "::",
- stringify!(resource_id)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_set_scanout"][::std::mem::size_of::<virtio_gpu_set_scanout>() - 48usize];
+ ["Alignment of virtio_gpu_set_scanout"]
+ [::std::mem::align_of::<virtio_gpu_set_scanout>() - 8usize];
+ ["Offset of field: virtio_gpu_set_scanout::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_set_scanout::r"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout, r) - 24usize];
+ ["Offset of field: virtio_gpu_set_scanout::scanout_id"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout, scanout_id) - 40usize];
+ ["Offset of field: virtio_gpu_set_scanout::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout, resource_id) - 44usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_flush {
@@ -601,62 +277,21 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_flush() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_flush> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_flush>(),
- 48usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_flush))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_flush>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_flush))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_flush),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).r) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_flush),
- "::",
- stringify!(r)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_flush),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_flush),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_flush"]
+ [::std::mem::size_of::<virtio_gpu_resource_flush>() - 48usize];
+ ["Alignment of virtio_gpu_resource_flush"]
+ [::std::mem::align_of::<virtio_gpu_resource_flush>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_flush::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_flush, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_flush::r"]
+ [::std::mem::offset_of!(virtio_gpu_resource_flush, r) - 24usize];
+ ["Offset of field: virtio_gpu_resource_flush::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_flush, resource_id) - 40usize];
+ ["Offset of field: virtio_gpu_resource_flush::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_flush, padding) - 44usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_transfer_to_host_2d {
@@ -666,72 +301,23 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_transfer_to_host_2d() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_transfer_to_host_2d> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_transfer_to_host_2d>(),
- 56usize,
- concat!("Size of: ", stringify!(virtio_gpu_transfer_to_host_2d))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_transfer_to_host_2d>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_transfer_to_host_2d))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_to_host_2d),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).r) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_to_host_2d),
- "::",
- stringify!(r)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_to_host_2d),
- "::",
- stringify!(offset)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_to_host_2d),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 52usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_to_host_2d),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_transfer_to_host_2d"]
+ [::std::mem::size_of::<virtio_gpu_transfer_to_host_2d>() - 56usize];
+ ["Alignment of virtio_gpu_transfer_to_host_2d"]
+ [::std::mem::align_of::<virtio_gpu_transfer_to_host_2d>() - 8usize];
+ ["Offset of field: virtio_gpu_transfer_to_host_2d::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_to_host_2d, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_transfer_to_host_2d::r"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_to_host_2d, r) - 24usize];
+ ["Offset of field: virtio_gpu_transfer_to_host_2d::offset"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_to_host_2d, offset) - 40usize];
+ ["Offset of field: virtio_gpu_transfer_to_host_2d::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_to_host_2d, resource_id) - 48usize];
+ ["Offset of field: virtio_gpu_transfer_to_host_2d::padding"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_to_host_2d, padding) - 52usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_mem_entry {
@@ -739,51 +325,17 @@
pub length: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_mem_entry() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_mem_entry> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_mem_entry>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_gpu_mem_entry))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_mem_entry>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_mem_entry))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).addr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_mem_entry),
- "::",
- stringify!(addr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).length) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_mem_entry),
- "::",
- stringify!(length)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_mem_entry),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_mem_entry"][::std::mem::size_of::<virtio_gpu_mem_entry>() - 16usize];
+ ["Alignment of virtio_gpu_mem_entry"][::std::mem::align_of::<virtio_gpu_mem_entry>() - 8usize];
+ ["Offset of field: virtio_gpu_mem_entry::addr"]
+ [::std::mem::offset_of!(virtio_gpu_mem_entry, addr) - 0usize];
+ ["Offset of field: virtio_gpu_mem_entry::length"]
+ [::std::mem::offset_of!(virtio_gpu_mem_entry, length) - 8usize];
+ ["Offset of field: virtio_gpu_mem_entry::padding"]
+ [::std::mem::offset_of!(virtio_gpu_mem_entry, padding) - 12usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_attach_backing {
@@ -791,55 +343,19 @@
pub resource_id: __le32,
pub nr_entries: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_attach_backing() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_attach_backing> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_attach_backing>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_attach_backing))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_attach_backing>(),
- 8usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_gpu_resource_attach_backing)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_attach_backing),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_attach_backing),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).nr_entries) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_attach_backing),
- "::",
- stringify!(nr_entries)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_attach_backing"]
+ [::std::mem::size_of::<virtio_gpu_resource_attach_backing>() - 32usize];
+ ["Alignment of virtio_gpu_resource_attach_backing"]
+ [::std::mem::align_of::<virtio_gpu_resource_attach_backing>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_attach_backing::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_attach_backing, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_attach_backing::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_attach_backing, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_attach_backing::nr_entries"]
+ [::std::mem::offset_of!(virtio_gpu_resource_attach_backing, nr_entries) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_detach_backing {
@@ -847,55 +363,19 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_detach_backing() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_detach_backing> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_detach_backing>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_detach_backing))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_detach_backing>(),
- 8usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_gpu_resource_detach_backing)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_detach_backing),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_detach_backing),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_detach_backing),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_detach_backing"]
+ [::std::mem::size_of::<virtio_gpu_resource_detach_backing>() - 32usize];
+ ["Alignment of virtio_gpu_resource_detach_backing"]
+ [::std::mem::align_of::<virtio_gpu_resource_detach_backing>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_detach_backing::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_detach_backing, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_detach_backing::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_detach_backing, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_detach_backing::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_detach_backing, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resp_display_info {
@@ -909,94 +389,34 @@
pub enabled: __le32,
pub flags: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_display_info_virtio_gpu_display_one() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_display_info_virtio_gpu_display_one> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_display_info_virtio_gpu_display_one>(),
- 24usize,
- concat!(
- "Size of: ",
- stringify!(virtio_gpu_resp_display_info_virtio_gpu_display_one)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_display_info_virtio_gpu_display_one>(),
- 4usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_gpu_resp_display_info_virtio_gpu_display_one)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).r) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_display_info_virtio_gpu_display_one),
- "::",
- stringify!(r)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).enabled) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_display_info_virtio_gpu_display_one),
- "::",
- stringify!(enabled)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_display_info_virtio_gpu_display_one),
- "::",
- stringify!(flags)
- )
- );
-}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_display_info() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_display_info> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_display_info>(),
- 408usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_display_info))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_display_info>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_display_info))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_display_info),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).pmodes) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_display_info),
- "::",
- stringify!(pmodes)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_display_info_virtio_gpu_display_one"]
+ [::std::mem::size_of::<virtio_gpu_resp_display_info_virtio_gpu_display_one>() - 24usize];
+ ["Alignment of virtio_gpu_resp_display_info_virtio_gpu_display_one"]
+ [::std::mem::align_of::<virtio_gpu_resp_display_info_virtio_gpu_display_one>() - 4usize];
+ ["Offset of field: virtio_gpu_resp_display_info_virtio_gpu_display_one::r"]
+ [::std::mem::offset_of!(virtio_gpu_resp_display_info_virtio_gpu_display_one, r) - 0usize];
+ ["Offset of field: virtio_gpu_resp_display_info_virtio_gpu_display_one::enabled"][::std::mem::offset_of!(
+ virtio_gpu_resp_display_info_virtio_gpu_display_one,
+ enabled
+ ) - 16usize];
+ ["Offset of field: virtio_gpu_resp_display_info_virtio_gpu_display_one::flags"][::std::mem::offset_of!(
+ virtio_gpu_resp_display_info_virtio_gpu_display_one,
+ flags
+ ) - 20usize];
+};
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_display_info"]
+ [::std::mem::size_of::<virtio_gpu_resp_display_info>() - 408usize];
+ ["Alignment of virtio_gpu_resp_display_info"]
+ [::std::mem::align_of::<virtio_gpu_resp_display_info>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_display_info::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_display_info, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_display_info::pmodes"]
+ [::std::mem::offset_of!(virtio_gpu_resp_display_info, pmodes) - 24usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_box {
@@ -1007,81 +427,17 @@
pub h: __le32,
pub d: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_box() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_box> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_box>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_gpu_box))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_box>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_gpu_box))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).x) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(x)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).y) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(y)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).z) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(z)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).w) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(w)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).h) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(h)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).d) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_box),
- "::",
- stringify!(d)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_box"][::std::mem::size_of::<virtio_gpu_box>() - 24usize];
+ ["Alignment of virtio_gpu_box"][::std::mem::align_of::<virtio_gpu_box>() - 4usize];
+ ["Offset of field: virtio_gpu_box::x"][::std::mem::offset_of!(virtio_gpu_box, x) - 0usize];
+ ["Offset of field: virtio_gpu_box::y"][::std::mem::offset_of!(virtio_gpu_box, y) - 4usize];
+ ["Offset of field: virtio_gpu_box::z"][::std::mem::offset_of!(virtio_gpu_box, z) - 8usize];
+ ["Offset of field: virtio_gpu_box::w"][::std::mem::offset_of!(virtio_gpu_box, w) - 12usize];
+ ["Offset of field: virtio_gpu_box::h"][::std::mem::offset_of!(virtio_gpu_box, h) - 16usize];
+ ["Offset of field: virtio_gpu_box::d"][::std::mem::offset_of!(virtio_gpu_box, d) - 20usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_transfer_host_3d {
@@ -1093,92 +449,27 @@
pub stride: __le32,
pub layer_stride: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_transfer_host_3d() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_transfer_host_3d> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_transfer_host_3d>(),
- 72usize,
- concat!("Size of: ", stringify!(virtio_gpu_transfer_host_3d))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_transfer_host_3d>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_transfer_host_3d))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).box_) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(box_)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(offset)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 56usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).level) as usize - ptr as usize },
- 60usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(level)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).stride) as usize - ptr as usize },
- 64usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(stride)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).layer_stride) as usize - ptr as usize },
- 68usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_transfer_host_3d),
- "::",
- stringify!(layer_stride)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_transfer_host_3d"]
+ [::std::mem::size_of::<virtio_gpu_transfer_host_3d>() - 72usize];
+ ["Alignment of virtio_gpu_transfer_host_3d"]
+ [::std::mem::align_of::<virtio_gpu_transfer_host_3d>() - 8usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::box_"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, box_) - 24usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::offset"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, offset) - 48usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, resource_id) - 56usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::level"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, level) - 60usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::stride"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, stride) - 64usize];
+ ["Offset of field: virtio_gpu_transfer_host_3d::layer_stride"]
+ [::std::mem::offset_of!(virtio_gpu_transfer_host_3d, layer_stride) - 68usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_create_3d {
@@ -1196,152 +487,39 @@
pub flags: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_create_3d() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_create_3d> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_create_3d>(),
- 72usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_create_3d))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_create_3d>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_create_3d))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).target) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(target)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).format) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(format)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).bind) as usize - ptr as usize },
- 36usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(bind)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(width)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).height) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(height)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).depth) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(depth)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).array_size) as usize - ptr as usize },
- 52usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(array_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).last_level) as usize - ptr as usize },
- 56usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(last_level)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).nr_samples) as usize - ptr as usize },
- 60usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(nr_samples)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 64usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 68usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_3d),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_create_3d"]
+ [::std::mem::size_of::<virtio_gpu_resource_create_3d>() - 72usize];
+ ["Alignment of virtio_gpu_resource_create_3d"]
+ [::std::mem::align_of::<virtio_gpu_resource_create_3d>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::target"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, target) - 28usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::format"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, format) - 32usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::bind"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, bind) - 36usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::width"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, width) - 40usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::height"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, height) - 44usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::depth"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, depth) - 48usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::array_size"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, array_size) - 52usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::last_level"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, last_level) - 56usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::nr_samples"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, nr_samples) - 60usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::flags"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, flags) - 64usize];
+ ["Offset of field: virtio_gpu_resource_create_3d::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_3d, padding) - 68usize];
+};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct virtio_gpu_ctx_create {
@@ -1350,62 +528,20 @@
pub context_init: __le32,
pub debug_name: [::std::os::raw::c_char; 64usize],
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_ctx_create() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_ctx_create> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_ctx_create>(),
- 96usize,
- concat!("Size of: ", stringify!(virtio_gpu_ctx_create))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_ctx_create>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_ctx_create))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_create),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).nlen) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_create),
- "::",
- stringify!(nlen)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).context_init) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_create),
- "::",
- stringify!(context_init)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).debug_name) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_create),
- "::",
- stringify!(debug_name)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_ctx_create"][::std::mem::size_of::<virtio_gpu_ctx_create>() - 96usize];
+ ["Alignment of virtio_gpu_ctx_create"]
+ [::std::mem::align_of::<virtio_gpu_ctx_create>() - 8usize];
+ ["Offset of field: virtio_gpu_ctx_create::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_create, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_ctx_create::nlen"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_create, nlen) - 24usize];
+ ["Offset of field: virtio_gpu_ctx_create::context_init"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_create, context_init) - 28usize];
+ ["Offset of field: virtio_gpu_ctx_create::debug_name"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_create, debug_name) - 32usize];
+};
impl Default for virtio_gpu_ctx_create {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -1420,32 +556,14 @@
pub struct virtio_gpu_ctx_destroy {
pub hdr: virtio_gpu_ctrl_hdr,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_ctx_destroy() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_ctx_destroy> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_ctx_destroy>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_gpu_ctx_destroy))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_ctx_destroy>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_ctx_destroy))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_destroy),
- "::",
- stringify!(hdr)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_ctx_destroy"][::std::mem::size_of::<virtio_gpu_ctx_destroy>() - 24usize];
+ ["Alignment of virtio_gpu_ctx_destroy"]
+ [::std::mem::align_of::<virtio_gpu_ctx_destroy>() - 8usize];
+ ["Offset of field: virtio_gpu_ctx_destroy::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_destroy, hdr) - 0usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_ctx_resource {
@@ -1453,52 +571,18 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_ctx_resource() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_ctx_resource> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_ctx_resource>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_ctx_resource))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_ctx_resource>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_ctx_resource))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_resource),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_resource),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_ctx_resource),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_ctx_resource"][::std::mem::size_of::<virtio_gpu_ctx_resource>() - 32usize];
+ ["Alignment of virtio_gpu_ctx_resource"]
+ [::std::mem::align_of::<virtio_gpu_ctx_resource>() - 8usize];
+ ["Offset of field: virtio_gpu_ctx_resource::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_resource, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_ctx_resource::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_resource, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_ctx_resource::padding"]
+ [::std::mem::offset_of!(virtio_gpu_ctx_resource, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_cmd_submit {
@@ -1506,52 +590,18 @@
pub size: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_cmd_submit() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_cmd_submit> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_cmd_submit>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_cmd_submit))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_cmd_submit>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_cmd_submit))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_submit),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_submit),
- "::",
- stringify!(size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_submit),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_cmd_submit"][::std::mem::size_of::<virtio_gpu_cmd_submit>() - 32usize];
+ ["Alignment of virtio_gpu_cmd_submit"]
+ [::std::mem::align_of::<virtio_gpu_cmd_submit>() - 8usize];
+ ["Offset of field: virtio_gpu_cmd_submit::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_submit, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_cmd_submit::size"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_submit, size) - 24usize];
+ ["Offset of field: virtio_gpu_cmd_submit::padding"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_submit, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_get_capset_info {
@@ -1559,52 +609,19 @@
pub capset_index: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_get_capset_info() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_get_capset_info> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_get_capset_info>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_get_capset_info))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_get_capset_info>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_get_capset_info))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset_info),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_index) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset_info),
- "::",
- stringify!(capset_index)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset_info),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_get_capset_info"]
+ [::std::mem::size_of::<virtio_gpu_get_capset_info>() - 32usize];
+ ["Alignment of virtio_gpu_get_capset_info"]
+ [::std::mem::align_of::<virtio_gpu_get_capset_info>() - 8usize];
+ ["Offset of field: virtio_gpu_get_capset_info::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset_info, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_get_capset_info::capset_index"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset_info, capset_index) - 24usize];
+ ["Offset of field: virtio_gpu_get_capset_info::padding"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset_info, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resp_capset_info {
@@ -1614,72 +631,23 @@
pub capset_max_size: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_capset_info() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_capset_info> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_capset_info>(),
- 40usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_capset_info))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_capset_info>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_capset_info))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset_info),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset_info),
- "::",
- stringify!(capset_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_max_version) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset_info),
- "::",
- stringify!(capset_max_version)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_max_size) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset_info),
- "::",
- stringify!(capset_max_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 36usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset_info),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_capset_info"]
+ [::std::mem::size_of::<virtio_gpu_resp_capset_info>() - 40usize];
+ ["Alignment of virtio_gpu_resp_capset_info"]
+ [::std::mem::align_of::<virtio_gpu_resp_capset_info>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_capset_info::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset_info, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_capset_info::capset_id"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset_info, capset_id) - 24usize];
+ ["Offset of field: virtio_gpu_resp_capset_info::capset_max_version"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset_info, capset_max_version) - 28usize];
+ ["Offset of field: virtio_gpu_resp_capset_info::capset_max_size"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset_info, capset_max_size) - 32usize];
+ ["Offset of field: virtio_gpu_resp_capset_info::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset_info, padding) - 36usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_get_capset {
@@ -1687,94 +655,34 @@
pub capset_id: __le32,
pub capset_version: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_get_capset() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_get_capset> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_get_capset>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_get_capset))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_get_capset>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_get_capset))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset),
- "::",
- stringify!(capset_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_version) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_get_capset),
- "::",
- stringify!(capset_version)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_get_capset"][::std::mem::size_of::<virtio_gpu_get_capset>() - 32usize];
+ ["Alignment of virtio_gpu_get_capset"]
+ [::std::mem::align_of::<virtio_gpu_get_capset>() - 8usize];
+ ["Offset of field: virtio_gpu_get_capset::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_get_capset::capset_id"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset, capset_id) - 24usize];
+ ["Offset of field: virtio_gpu_get_capset::capset_version"]
+ [::std::mem::offset_of!(virtio_gpu_get_capset, capset_version) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default)]
pub struct virtio_gpu_resp_capset {
pub hdr: virtio_gpu_ctrl_hdr,
pub capset_data: __IncompleteArrayField<__u8>,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_capset() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_capset> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_capset>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_capset))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_capset>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_capset))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).capset_data) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_capset),
- "::",
- stringify!(capset_data)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_capset"][::std::mem::size_of::<virtio_gpu_resp_capset>() - 24usize];
+ ["Alignment of virtio_gpu_resp_capset"]
+ [::std::mem::align_of::<virtio_gpu_resp_capset>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_capset::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_capset::capset_data"]
+ [::std::mem::offset_of!(virtio_gpu_resp_capset, capset_data) - 24usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_cmd_get_edid {
@@ -1782,52 +690,18 @@
pub scanout: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_cmd_get_edid() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_cmd_get_edid> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_cmd_get_edid>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_cmd_get_edid))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_cmd_get_edid>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_cmd_get_edid))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_get_edid),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).scanout) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_get_edid),
- "::",
- stringify!(scanout)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_cmd_get_edid),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_cmd_get_edid"][::std::mem::size_of::<virtio_gpu_cmd_get_edid>() - 32usize];
+ ["Alignment of virtio_gpu_cmd_get_edid"]
+ [::std::mem::align_of::<virtio_gpu_cmd_get_edid>() - 8usize];
+ ["Offset of field: virtio_gpu_cmd_get_edid::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_get_edid, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_cmd_get_edid::scanout"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_get_edid, scanout) - 24usize];
+ ["Offset of field: virtio_gpu_cmd_get_edid::padding"]
+ [::std::mem::offset_of!(virtio_gpu_cmd_get_edid, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resp_edid {
@@ -1836,61 +710,19 @@
pub padding: __le32,
pub edid: [__u8; 1024usize],
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_edid() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_edid> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_edid>(),
- 1056usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_edid))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_edid>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_edid))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_edid),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_edid),
- "::",
- stringify!(size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_edid),
- "::",
- stringify!(padding)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).edid) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_edid),
- "::",
- stringify!(edid)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_edid"][::std::mem::size_of::<virtio_gpu_resp_edid>() - 1056usize];
+ ["Alignment of virtio_gpu_resp_edid"][::std::mem::align_of::<virtio_gpu_resp_edid>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_edid::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_edid, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_edid::size"]
+ [::std::mem::offset_of!(virtio_gpu_resp_edid, size) - 24usize];
+ ["Offset of field: virtio_gpu_resp_edid::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resp_edid, padding) - 28usize];
+ ["Offset of field: virtio_gpu_resp_edid::edid"]
+ [::std::mem::offset_of!(virtio_gpu_resp_edid, edid) - 32usize];
+};
impl Default for virtio_gpu_resp_edid {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -1908,61 +740,19 @@
pub num_scanouts: __le32,
pub num_capsets: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_config> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_config>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_gpu_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_config>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_gpu_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).events_read) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_config),
- "::",
- stringify!(events_read)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).events_clear) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_config),
- "::",
- stringify!(events_clear)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_scanouts) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_config),
- "::",
- stringify!(num_scanouts)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_capsets) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_config),
- "::",
- stringify!(num_capsets)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_config"][::std::mem::size_of::<virtio_gpu_config>() - 16usize];
+ ["Alignment of virtio_gpu_config"][::std::mem::align_of::<virtio_gpu_config>() - 4usize];
+ ["Offset of field: virtio_gpu_config::events_read"]
+ [::std::mem::offset_of!(virtio_gpu_config, events_read) - 0usize];
+ ["Offset of field: virtio_gpu_config::events_clear"]
+ [::std::mem::offset_of!(virtio_gpu_config, events_clear) - 4usize];
+ ["Offset of field: virtio_gpu_config::num_scanouts"]
+ [::std::mem::offset_of!(virtio_gpu_config, num_scanouts) - 8usize];
+ ["Offset of field: virtio_gpu_config::num_capsets"]
+ [::std::mem::offset_of!(virtio_gpu_config, num_capsets) - 12usize];
+};
pub const virtio_gpu_formats_VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: virtio_gpu_formats = 1;
pub const virtio_gpu_formats_VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: virtio_gpu_formats = 2;
pub const virtio_gpu_formats_VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: virtio_gpu_formats = 3;
@@ -1979,94 +769,36 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_assign_uuid() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_assign_uuid> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_assign_uuid>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_assign_uuid))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_assign_uuid>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_assign_uuid))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_assign_uuid),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_assign_uuid),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_assign_uuid),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_assign_uuid"]
+ [::std::mem::size_of::<virtio_gpu_resource_assign_uuid>() - 32usize];
+ ["Alignment of virtio_gpu_resource_assign_uuid"]
+ [::std::mem::align_of::<virtio_gpu_resource_assign_uuid>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_assign_uuid::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_assign_uuid, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_assign_uuid::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_assign_uuid, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_assign_uuid::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_assign_uuid, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resp_resource_uuid {
pub hdr: virtio_gpu_ctrl_hdr,
pub uuid: [__u8; 16usize],
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_resource_uuid() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_resource_uuid> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_resource_uuid>(),
- 40usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_resource_uuid))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_resource_uuid>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_resource_uuid))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_resource_uuid),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).uuid) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_resource_uuid),
- "::",
- stringify!(uuid)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_resource_uuid"]
+ [::std::mem::size_of::<virtio_gpu_resp_resource_uuid>() - 40usize];
+ ["Alignment of virtio_gpu_resp_resource_uuid"]
+ [::std::mem::align_of::<virtio_gpu_resp_resource_uuid>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_resource_uuid::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_resource_uuid, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_resource_uuid::uuid"]
+ [::std::mem::offset_of!(virtio_gpu_resp_resource_uuid, uuid) - 24usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_create_blob {
@@ -2078,92 +810,27 @@
pub blob_id: __le64,
pub size: __le64,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_create_blob() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_create_blob> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_create_blob>(),
- 56usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_create_blob))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_create_blob>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_create_blob))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).blob_mem) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(blob_mem)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).blob_flags) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(blob_flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).nr_entries) as usize - ptr as usize },
- 36usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(nr_entries)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).blob_id) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(blob_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_create_blob),
- "::",
- stringify!(size)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_create_blob"]
+ [::std::mem::size_of::<virtio_gpu_resource_create_blob>() - 56usize];
+ ["Alignment of virtio_gpu_resource_create_blob"]
+ [::std::mem::align_of::<virtio_gpu_resource_create_blob>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::blob_mem"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, blob_mem) - 28usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::blob_flags"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, blob_flags) - 32usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::nr_entries"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, nr_entries) - 36usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::blob_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, blob_id) - 40usize];
+ ["Offset of field: virtio_gpu_resource_create_blob::size"]
+ [::std::mem::offset_of!(virtio_gpu_resource_create_blob, size) - 48usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_set_scanout_blob {
@@ -2178,122 +845,33 @@
pub strides: [__le32; 4usize],
pub offsets: [__le32; 4usize],
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_set_scanout_blob() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_set_scanout_blob> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_set_scanout_blob>(),
- 96usize,
- concat!("Size of: ", stringify!(virtio_gpu_set_scanout_blob))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_set_scanout_blob>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_set_scanout_blob))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).r) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(r)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).scanout_id) as usize - ptr as usize },
- 40usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(scanout_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 44usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize },
- 48usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(width)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).height) as usize - ptr as usize },
- 52usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(height)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).format) as usize - ptr as usize },
- 56usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(format)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 60usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(padding)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).strides) as usize - ptr as usize },
- 64usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(strides)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).offsets) as usize - ptr as usize },
- 80usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_set_scanout_blob),
- "::",
- stringify!(offsets)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_set_scanout_blob"]
+ [::std::mem::size_of::<virtio_gpu_set_scanout_blob>() - 96usize];
+ ["Alignment of virtio_gpu_set_scanout_blob"]
+ [::std::mem::align_of::<virtio_gpu_set_scanout_blob>() - 8usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::r"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, r) - 24usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::scanout_id"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, scanout_id) - 40usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, resource_id) - 44usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::width"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, width) - 48usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::height"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, height) - 52usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::format"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, format) - 56usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::padding"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, padding) - 60usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::strides"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, strides) - 64usize];
+ ["Offset of field: virtio_gpu_set_scanout_blob::offsets"]
+ [::std::mem::offset_of!(virtio_gpu_set_scanout_blob, offsets) - 80usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_map_blob {
@@ -2302,62 +880,21 @@
pub padding: __le32,
pub offset: __le64,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_map_blob() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_map_blob> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_map_blob>(),
- 40usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_map_blob))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_map_blob>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_map_blob))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_map_blob),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_map_blob),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_map_blob),
- "::",
- stringify!(padding)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_map_blob),
- "::",
- stringify!(offset)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_map_blob"]
+ [::std::mem::size_of::<virtio_gpu_resource_map_blob>() - 40usize];
+ ["Alignment of virtio_gpu_resource_map_blob"]
+ [::std::mem::align_of::<virtio_gpu_resource_map_blob>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_map_blob::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_map_blob, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_map_blob::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_map_blob, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_map_blob::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_map_blob, padding) - 28usize];
+ ["Offset of field: virtio_gpu_resource_map_blob::offset"]
+ [::std::mem::offset_of!(virtio_gpu_resource_map_blob, offset) - 32usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resp_map_info {
@@ -2365,52 +902,19 @@
pub map_info: __u32,
pub padding: __u32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resp_map_info() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resp_map_info> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resp_map_info>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resp_map_info))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resp_map_info>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resp_map_info))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_map_info),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).map_info) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_map_info),
- "::",
- stringify!(map_info)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resp_map_info),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resp_map_info"]
+ [::std::mem::size_of::<virtio_gpu_resp_map_info>() - 32usize];
+ ["Alignment of virtio_gpu_resp_map_info"]
+ [::std::mem::align_of::<virtio_gpu_resp_map_info>() - 8usize];
+ ["Offset of field: virtio_gpu_resp_map_info::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resp_map_info, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resp_map_info::map_info"]
+ [::std::mem::offset_of!(virtio_gpu_resp_map_info, map_info) - 24usize];
+ ["Offset of field: virtio_gpu_resp_map_info::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resp_map_info, padding) - 28usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_gpu_resource_unmap_blob {
@@ -2418,49 +922,16 @@
pub resource_id: __le32,
pub padding: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_gpu_resource_unmap_blob() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_gpu_resource_unmap_blob> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_gpu_resource_unmap_blob>(),
- 32usize,
- concat!("Size of: ", stringify!(virtio_gpu_resource_unmap_blob))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_gpu_resource_unmap_blob>(),
- 8usize,
- concat!("Alignment of ", stringify!(virtio_gpu_resource_unmap_blob))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unmap_blob),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resource_id) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unmap_blob),
- "::",
- stringify!(resource_id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_gpu_resource_unmap_blob),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_gpu_resource_unmap_blob"]
+ [::std::mem::size_of::<virtio_gpu_resource_unmap_blob>() - 32usize];
+ ["Alignment of virtio_gpu_resource_unmap_blob"]
+ [::std::mem::align_of::<virtio_gpu_resource_unmap_blob>() - 8usize];
+ ["Offset of field: virtio_gpu_resource_unmap_blob::hdr"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unmap_blob, hdr) - 0usize];
+ ["Offset of field: virtio_gpu_resource_unmap_blob::resource_id"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unmap_blob, resource_id) - 24usize];
+ ["Offset of field: virtio_gpu_resource_unmap_blob::padding"]
+ [::std::mem::offset_of!(virtio_gpu_resource_unmap_blob, padding) - 28usize];
+};
diff --git a/crates/virtio-bindings/src/virtio_ids.rs b/crates/virtio-bindings/src/virtio_ids.rs
index 71dab81..58472bc 100644
--- a/crates/virtio-bindings/src/virtio_ids.rs
+++ b/crates/virtio-bindings/src/virtio_ids.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
pub const VIRTIO_ID_NET: u32 = 1;
pub const VIRTIO_ID_BLOCK: u32 = 2;
diff --git a/crates/virtio-bindings/src/virtio_input.rs b/crates/virtio-bindings/src/virtio_input.rs
new file mode 100644
index 0000000..6629769
--- /dev/null
+++ b/crates/virtio-bindings/src/virtio_input.rs
@@ -0,0 +1,143 @@
+/* automatically generated by rust-bindgen 0.70.1 */
+
+pub type __u8 = ::std::os::raw::c_uchar;
+pub type __u16 = ::std::os::raw::c_ushort;
+pub type __u32 = ::std::os::raw::c_uint;
+pub type __le16 = __u16;
+pub type __le32 = __u32;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_UNSET: virtio_input_config_select = 0;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_ID_NAME: virtio_input_config_select = 1;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_ID_SERIAL: virtio_input_config_select = 2;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_ID_DEVIDS: virtio_input_config_select = 3;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_PROP_BITS: virtio_input_config_select = 16;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_EV_BITS: virtio_input_config_select = 17;
+pub const virtio_input_config_select_VIRTIO_INPUT_CFG_ABS_INFO: virtio_input_config_select = 18;
+pub type virtio_input_config_select = ::std::os::raw::c_uint;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_input_absinfo {
+ pub min: __le32,
+ pub max: __le32,
+ pub fuzz: __le32,
+ pub flat: __le32,
+ pub res: __le32,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_input_absinfo"][::std::mem::size_of::<virtio_input_absinfo>() - 20usize];
+ ["Alignment of virtio_input_absinfo"][::std::mem::align_of::<virtio_input_absinfo>() - 4usize];
+ ["Offset of field: virtio_input_absinfo::min"]
+ [::std::mem::offset_of!(virtio_input_absinfo, min) - 0usize];
+ ["Offset of field: virtio_input_absinfo::max"]
+ [::std::mem::offset_of!(virtio_input_absinfo, max) - 4usize];
+ ["Offset of field: virtio_input_absinfo::fuzz"]
+ [::std::mem::offset_of!(virtio_input_absinfo, fuzz) - 8usize];
+ ["Offset of field: virtio_input_absinfo::flat"]
+ [::std::mem::offset_of!(virtio_input_absinfo, flat) - 12usize];
+ ["Offset of field: virtio_input_absinfo::res"]
+ [::std::mem::offset_of!(virtio_input_absinfo, res) - 16usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_input_devids {
+ pub bustype: __le16,
+ pub vendor: __le16,
+ pub product: __le16,
+ pub version: __le16,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_input_devids"][::std::mem::size_of::<virtio_input_devids>() - 8usize];
+ ["Alignment of virtio_input_devids"][::std::mem::align_of::<virtio_input_devids>() - 2usize];
+ ["Offset of field: virtio_input_devids::bustype"]
+ [::std::mem::offset_of!(virtio_input_devids, bustype) - 0usize];
+ ["Offset of field: virtio_input_devids::vendor"]
+ [::std::mem::offset_of!(virtio_input_devids, vendor) - 2usize];
+ ["Offset of field: virtio_input_devids::product"]
+ [::std::mem::offset_of!(virtio_input_devids, product) - 4usize];
+ ["Offset of field: virtio_input_devids::version"]
+ [::std::mem::offset_of!(virtio_input_devids, version) - 6usize];
+};
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct virtio_input_config {
+ pub select: __u8,
+ pub subsel: __u8,
+ pub size: __u8,
+ pub reserved: [__u8; 5usize],
+ pub u: virtio_input_config__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union virtio_input_config__bindgen_ty_1 {
+ pub string: [::std::os::raw::c_char; 128usize],
+ pub bitmap: [__u8; 128usize],
+ pub abs: virtio_input_absinfo,
+ pub ids: virtio_input_devids,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_input_config__bindgen_ty_1"]
+ [::std::mem::size_of::<virtio_input_config__bindgen_ty_1>() - 128usize];
+ ["Alignment of virtio_input_config__bindgen_ty_1"]
+ [::std::mem::align_of::<virtio_input_config__bindgen_ty_1>() - 4usize];
+ ["Offset of field: virtio_input_config__bindgen_ty_1::string"]
+ [::std::mem::offset_of!(virtio_input_config__bindgen_ty_1, string) - 0usize];
+ ["Offset of field: virtio_input_config__bindgen_ty_1::bitmap"]
+ [::std::mem::offset_of!(virtio_input_config__bindgen_ty_1, bitmap) - 0usize];
+ ["Offset of field: virtio_input_config__bindgen_ty_1::abs"]
+ [::std::mem::offset_of!(virtio_input_config__bindgen_ty_1, abs) - 0usize];
+ ["Offset of field: virtio_input_config__bindgen_ty_1::ids"]
+ [::std::mem::offset_of!(virtio_input_config__bindgen_ty_1, ids) - 0usize];
+};
+impl Default for virtio_input_config__bindgen_ty_1 {
+ fn default() -> Self {
+ let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_input_config"][::std::mem::size_of::<virtio_input_config>() - 136usize];
+ ["Alignment of virtio_input_config"][::std::mem::align_of::<virtio_input_config>() - 4usize];
+ ["Offset of field: virtio_input_config::select"]
+ [::std::mem::offset_of!(virtio_input_config, select) - 0usize];
+ ["Offset of field: virtio_input_config::subsel"]
+ [::std::mem::offset_of!(virtio_input_config, subsel) - 1usize];
+ ["Offset of field: virtio_input_config::size"]
+ [::std::mem::offset_of!(virtio_input_config, size) - 2usize];
+ ["Offset of field: virtio_input_config::reserved"]
+ [::std::mem::offset_of!(virtio_input_config, reserved) - 3usize];
+ ["Offset of field: virtio_input_config::u"]
+ [::std::mem::offset_of!(virtio_input_config, u) - 8usize];
+};
+impl Default for virtio_input_config {
+ fn default() -> Self {
+ let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_input_event {
+ pub type_: __le16,
+ pub code: __le16,
+ pub value: __le32,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_input_event"][::std::mem::size_of::<virtio_input_event>() - 8usize];
+ ["Alignment of virtio_input_event"][::std::mem::align_of::<virtio_input_event>() - 4usize];
+ ["Offset of field: virtio_input_event::type_"]
+ [::std::mem::offset_of!(virtio_input_event, type_) - 0usize];
+ ["Offset of field: virtio_input_event::code"]
+ [::std::mem::offset_of!(virtio_input_event, code) - 2usize];
+ ["Offset of field: virtio_input_event::value"]
+ [::std::mem::offset_of!(virtio_input_event, value) - 4usize];
+};
diff --git a/crates/virtio-bindings/src/virtio_mmio.rs b/crates/virtio-bindings/src/virtio_mmio.rs
index 2f936cd..cc42ff0 100644
--- a/crates/virtio-bindings/src/virtio_mmio.rs
+++ b/crates/virtio-bindings/src/virtio_mmio.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
pub const VIRTIO_MMIO_MAGIC_VALUE: u32 = 0;
pub const VIRTIO_MMIO_VERSION: u32 = 4;
diff --git a/crates/virtio-bindings/src/virtio_net/generated.rs b/crates/virtio-bindings/src/virtio_net/generated.rs
index c8f5b2b..250803a 100644
--- a/crates/virtio-bindings/src/virtio_net/generated.rs
+++ b/crates/virtio-bindings/src/virtio_net/generated.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
#[repr(C)]
#[derive(Default)]
@@ -52,6 +52,8 @@
pub const VIRTIO_NET_F_GUEST_ANNOUNCE: u32 = 21;
pub const VIRTIO_NET_F_MQ: u32 = 22;
pub const VIRTIO_NET_F_CTRL_MAC_ADDR: u32 = 23;
+pub const VIRTIO_NET_F_DEVICE_STATS: u32 = 50;
+pub const VIRTIO_NET_F_VQ_NOTF_COAL: u32 = 52;
pub const VIRTIO_NET_F_NOTF_COAL: u32 = 53;
pub const VIRTIO_NET_F_GUEST_USO4: u32 = 54;
pub const VIRTIO_NET_F_GUEST_USO6: u32 = 55;
@@ -121,11 +123,36 @@
pub const VIRTIO_NET_CTRL_NOTF_COAL: u32 = 6;
pub const VIRTIO_NET_CTRL_NOTF_COAL_TX_SET: u32 = 0;
pub const VIRTIO_NET_CTRL_NOTF_COAL_RX_SET: u32 = 1;
+pub const VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET: u32 = 2;
+pub const VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET: u32 = 3;
+pub const VIRTIO_NET_CTRL_STATS: u32 = 8;
+pub const VIRTIO_NET_CTRL_STATS_QUERY: u32 = 0;
+pub const VIRTIO_NET_CTRL_STATS_GET: u32 = 1;
+pub const VIRTIO_NET_STATS_TYPE_CVQ: u64 = 4294967296;
+pub const VIRTIO_NET_STATS_TYPE_RX_BASIC: u32 = 1;
+pub const VIRTIO_NET_STATS_TYPE_RX_CSUM: u32 = 2;
+pub const VIRTIO_NET_STATS_TYPE_RX_GSO: u32 = 4;
+pub const VIRTIO_NET_STATS_TYPE_RX_SPEED: u32 = 8;
+pub const VIRTIO_NET_STATS_TYPE_TX_BASIC: u32 = 65536;
+pub const VIRTIO_NET_STATS_TYPE_TX_CSUM: u32 = 131072;
+pub const VIRTIO_NET_STATS_TYPE_TX_GSO: u32 = 262144;
+pub const VIRTIO_NET_STATS_TYPE_TX_SPEED: u32 = 524288;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_CVQ: u32 = 32;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC: u32 = 0;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM: u32 = 1;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO: u32 = 2;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED: u32 = 3;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC: u32 = 16;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM: u32 = 17;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO: u32 = 18;
+pub const VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED: u32 = 19;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __u32 = ::std::os::raw::c_uint;
+pub type __u64 = ::std::os::raw::c_ulonglong;
pub type __le16 = __u16;
pub type __le32 = __u32;
+pub type __le64 = __u64;
pub type __virtio16 = __u16;
pub type __virtio32 = __u32;
#[repr(C, packed)]
@@ -141,113 +168,29 @@
pub rss_max_indirection_table_length: __le16,
pub supported_hash_types: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_net_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_config> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_config>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_net_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_config>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_net_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).mac) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(mac)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).status) as usize - ptr as usize },
- 6usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(status)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_virtqueue_pairs) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(max_virtqueue_pairs)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).mtu) as usize - ptr as usize },
- 10usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(mtu)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).speed) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(speed)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).duplex) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(duplex)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).rss_max_key_size) as usize - ptr as usize },
- 17usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(rss_max_key_size)
- )
- );
- assert_eq!(
- unsafe {
- ::std::ptr::addr_of!((*ptr).rss_max_indirection_table_length) as usize - ptr as usize
- },
- 18usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(rss_max_indirection_table_length)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).supported_hash_types) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_config),
- "::",
- stringify!(supported_hash_types)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_config"][::std::mem::size_of::<virtio_net_config>() - 24usize];
+ ["Alignment of virtio_net_config"][::std::mem::align_of::<virtio_net_config>() - 1usize];
+ ["Offset of field: virtio_net_config::mac"]
+ [::std::mem::offset_of!(virtio_net_config, mac) - 0usize];
+ ["Offset of field: virtio_net_config::status"]
+ [::std::mem::offset_of!(virtio_net_config, status) - 6usize];
+ ["Offset of field: virtio_net_config::max_virtqueue_pairs"]
+ [::std::mem::offset_of!(virtio_net_config, max_virtqueue_pairs) - 8usize];
+ ["Offset of field: virtio_net_config::mtu"]
+ [::std::mem::offset_of!(virtio_net_config, mtu) - 10usize];
+ ["Offset of field: virtio_net_config::speed"]
+ [::std::mem::offset_of!(virtio_net_config, speed) - 12usize];
+ ["Offset of field: virtio_net_config::duplex"]
+ [::std::mem::offset_of!(virtio_net_config, duplex) - 16usize];
+ ["Offset of field: virtio_net_config::rss_max_key_size"]
+ [::std::mem::offset_of!(virtio_net_config, rss_max_key_size) - 17usize];
+ ["Offset of field: virtio_net_config::rss_max_indirection_table_length"]
+ [::std::mem::offset_of!(virtio_net_config, rss_max_indirection_table_length) - 18usize];
+ ["Offset of field: virtio_net_config::supported_hash_types"]
+ [::std::mem::offset_of!(virtio_net_config, supported_hash_types) - 20usize];
+};
#[repr(C)]
#[derive(Copy, Clone)]
pub struct virtio_net_hdr_v1 {
@@ -271,180 +214,66 @@
pub csum_start: __virtio16,
pub csum_offset: __virtio16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>(),
- 4usize,
- concat!(
- "Size of: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>(),
- 2usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).csum_start) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1),
- "::",
- stringify!(csum_start)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).csum_offset) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1),
- "::",
- stringify!(csum_offset)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1"]
+ [::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>() - 4usize];
+ ["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1"]
+ [::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1>() - 2usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1::csum_start"][::std::mem::offset_of!(
+ virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1,
+ csum_start
+ ) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1::csum_offset"][::std::mem::offset_of!(
+ virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_1,
+ csum_offset
+ ) - 2usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2 {
pub start: __virtio16,
pub offset: __virtio16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>(),
- 4usize,
- concat!(
- "Size of: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>(),
- 2usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).start) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2),
- "::",
- stringify!(start)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2),
- "::",
- stringify!(offset)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2"]
+ [::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>() - 4usize];
+ ["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2"]
+ [::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2>() - 2usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2::start"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2, start) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2::offset"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_2, offset) - 2usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3 {
pub segments: __le16,
pub dup_acks: __le16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>(),
- 4usize,
- concat!(
- "Size of: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3)
- )
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>(),
- 2usize,
- concat!(
- "Alignment of ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).segments) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3),
- "::",
- stringify!(segments)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).dup_acks) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3),
- "::",
- stringify!(dup_acks)
- )
- );
-}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1__bindgen_ty_1() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1__bindgen_ty_1> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1>(),
- 4usize,
- concat!("Size of: ", stringify!(virtio_net_hdr_v1__bindgen_ty_1))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1>(),
- 2usize,
- concat!("Alignment of ", stringify!(virtio_net_hdr_v1__bindgen_ty_1))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).csum) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1),
- "::",
- stringify!(csum)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).rsc) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1__bindgen_ty_1),
- "::",
- stringify!(rsc)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3"]
+ [::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>() - 4usize];
+ ["Alignment of virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3"]
+ [::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3>() - 2usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3::segments"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3, segments) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3::dup_acks"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1__bindgen_ty_3, dup_acks) - 2usize];
+};
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1__bindgen_ty_1"]
+ [::std::mem::size_of::<virtio_net_hdr_v1__bindgen_ty_1>() - 4usize];
+ ["Alignment of virtio_net_hdr_v1__bindgen_ty_1"]
+ [::std::mem::align_of::<virtio_net_hdr_v1__bindgen_ty_1>() - 2usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1::csum"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1, csum) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1__bindgen_ty_1::rsc"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1__bindgen_ty_1, rsc) - 0usize];
+};
impl Default for virtio_net_hdr_v1__bindgen_ty_1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -454,71 +283,21 @@
}
}
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1>(),
- 12usize,
- concat!("Size of: ", stringify!(virtio_net_hdr_v1))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1>(),
- 2usize,
- concat!("Alignment of ", stringify!(virtio_net_hdr_v1))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).gso_type) as usize - ptr as usize },
- 1usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1),
- "::",
- stringify!(gso_type)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr_len) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1),
- "::",
- stringify!(hdr_len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).gso_size) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1),
- "::",
- stringify!(gso_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_buffers) as usize - ptr as usize },
- 10usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1),
- "::",
- stringify!(num_buffers)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1"][::std::mem::size_of::<virtio_net_hdr_v1>() - 12usize];
+ ["Alignment of virtio_net_hdr_v1"][::std::mem::align_of::<virtio_net_hdr_v1>() - 2usize];
+ ["Offset of field: virtio_net_hdr_v1::flags"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1, flags) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1::gso_type"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1, gso_type) - 1usize];
+ ["Offset of field: virtio_net_hdr_v1::hdr_len"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1, hdr_len) - 2usize];
+ ["Offset of field: virtio_net_hdr_v1::gso_size"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1, gso_size) - 4usize];
+ ["Offset of field: virtio_net_hdr_v1::num_buffers"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1, num_buffers) - 10usize];
+};
impl Default for virtio_net_hdr_v1 {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -536,62 +315,20 @@
pub hash_report: __le16,
pub padding: __le16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_v1_hash() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_v1_hash> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_v1_hash>(),
- 20usize,
- concat!("Size of: ", stringify!(virtio_net_hdr_v1_hash))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_v1_hash>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_net_hdr_v1_hash))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1_hash),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_value) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1_hash),
- "::",
- stringify!(hash_value)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_report) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1_hash),
- "::",
- stringify!(hash_report)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).padding) as usize - ptr as usize },
- 18usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_v1_hash),
- "::",
- stringify!(padding)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_v1_hash"][::std::mem::size_of::<virtio_net_hdr_v1_hash>() - 20usize];
+ ["Alignment of virtio_net_hdr_v1_hash"]
+ [::std::mem::align_of::<virtio_net_hdr_v1_hash>() - 4usize];
+ ["Offset of field: virtio_net_hdr_v1_hash::hdr"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1_hash, hdr) - 0usize];
+ ["Offset of field: virtio_net_hdr_v1_hash::hash_value"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1_hash, hash_value) - 12usize];
+ ["Offset of field: virtio_net_hdr_v1_hash::hash_report"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1_hash, hash_report) - 16usize];
+ ["Offset of field: virtio_net_hdr_v1_hash::padding"]
+ [::std::mem::offset_of!(virtio_net_hdr_v1_hash, padding) - 18usize];
+};
impl Default for virtio_net_hdr_v1_hash {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -611,205 +348,70 @@
pub csum_start: __virtio16,
pub csum_offset: __virtio16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr>(),
- 10usize,
- concat!("Size of: ", stringify!(virtio_net_hdr))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr>(),
- 2usize,
- concat!("Alignment of ", stringify!(virtio_net_hdr))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).gso_type) as usize - ptr as usize },
- 1usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(gso_type)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr_len) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(hdr_len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).gso_size) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(gso_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).csum_start) as usize - ptr as usize },
- 6usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(csum_start)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).csum_offset) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr),
- "::",
- stringify!(csum_offset)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr"][::std::mem::size_of::<virtio_net_hdr>() - 10usize];
+ ["Alignment of virtio_net_hdr"][::std::mem::align_of::<virtio_net_hdr>() - 2usize];
+ ["Offset of field: virtio_net_hdr::flags"]
+ [::std::mem::offset_of!(virtio_net_hdr, flags) - 0usize];
+ ["Offset of field: virtio_net_hdr::gso_type"]
+ [::std::mem::offset_of!(virtio_net_hdr, gso_type) - 1usize];
+ ["Offset of field: virtio_net_hdr::hdr_len"]
+ [::std::mem::offset_of!(virtio_net_hdr, hdr_len) - 2usize];
+ ["Offset of field: virtio_net_hdr::gso_size"]
+ [::std::mem::offset_of!(virtio_net_hdr, gso_size) - 4usize];
+ ["Offset of field: virtio_net_hdr::csum_start"]
+ [::std::mem::offset_of!(virtio_net_hdr, csum_start) - 6usize];
+ ["Offset of field: virtio_net_hdr::csum_offset"]
+ [::std::mem::offset_of!(virtio_net_hdr, csum_offset) - 8usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_hdr_mrg_rxbuf {
pub hdr: virtio_net_hdr,
pub num_buffers: __virtio16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hdr_mrg_rxbuf() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hdr_mrg_rxbuf> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hdr_mrg_rxbuf>(),
- 12usize,
- concat!("Size of: ", stringify!(virtio_net_hdr_mrg_rxbuf))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hdr_mrg_rxbuf>(),
- 2usize,
- concat!("Alignment of ", stringify!(virtio_net_hdr_mrg_rxbuf))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hdr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_mrg_rxbuf),
- "::",
- stringify!(hdr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_buffers) as usize - ptr as usize },
- 10usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hdr_mrg_rxbuf),
- "::",
- stringify!(num_buffers)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hdr_mrg_rxbuf"]
+ [::std::mem::size_of::<virtio_net_hdr_mrg_rxbuf>() - 12usize];
+ ["Alignment of virtio_net_hdr_mrg_rxbuf"]
+ [::std::mem::align_of::<virtio_net_hdr_mrg_rxbuf>() - 2usize];
+ ["Offset of field: virtio_net_hdr_mrg_rxbuf::hdr"]
+ [::std::mem::offset_of!(virtio_net_hdr_mrg_rxbuf, hdr) - 0usize];
+ ["Offset of field: virtio_net_hdr_mrg_rxbuf::num_buffers"]
+ [::std::mem::offset_of!(virtio_net_hdr_mrg_rxbuf, num_buffers) - 10usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_ctrl_hdr {
pub class: __u8,
pub cmd: __u8,
}
-#[test]
-fn bindgen_test_layout_virtio_net_ctrl_hdr() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_ctrl_hdr> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_ctrl_hdr>(),
- 2usize,
- concat!("Size of: ", stringify!(virtio_net_ctrl_hdr))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_ctrl_hdr>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_net_ctrl_hdr))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).class) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_hdr),
- "::",
- stringify!(class)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cmd) as usize - ptr as usize },
- 1usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_hdr),
- "::",
- stringify!(cmd)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_hdr"][::std::mem::size_of::<virtio_net_ctrl_hdr>() - 2usize];
+ ["Alignment of virtio_net_ctrl_hdr"][::std::mem::align_of::<virtio_net_ctrl_hdr>() - 1usize];
+ ["Offset of field: virtio_net_ctrl_hdr::class"]
+ [::std::mem::offset_of!(virtio_net_ctrl_hdr, class) - 0usize];
+ ["Offset of field: virtio_net_ctrl_hdr::cmd"]
+ [::std::mem::offset_of!(virtio_net_ctrl_hdr, cmd) - 1usize];
+};
pub type virtio_net_ctrl_ack = __u8;
#[repr(C, packed)]
pub struct virtio_net_ctrl_mac {
pub entries: __virtio32,
pub macs: __IncompleteArrayField<[__u8; 6usize]>,
}
-#[test]
-fn bindgen_test_layout_virtio_net_ctrl_mac() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_ctrl_mac> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_ctrl_mac>(),
- 4usize,
- concat!("Size of: ", stringify!(virtio_net_ctrl_mac))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_ctrl_mac>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_net_ctrl_mac))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).entries) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_mac),
- "::",
- stringify!(entries)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).macs) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_mac),
- "::",
- stringify!(macs)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_mac"][::std::mem::size_of::<virtio_net_ctrl_mac>() - 4usize];
+ ["Alignment of virtio_net_ctrl_mac"][::std::mem::align_of::<virtio_net_ctrl_mac>() - 1usize];
+ ["Offset of field: virtio_net_ctrl_mac::entries"]
+ [::std::mem::offset_of!(virtio_net_ctrl_mac, entries) - 0usize];
+ ["Offset of field: virtio_net_ctrl_mac::macs"]
+ [::std::mem::offset_of!(virtio_net_ctrl_mac, macs) - 4usize];
+};
impl Default for virtio_net_ctrl_mac {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -824,31 +426,13 @@
pub struct virtio_net_ctrl_mq {
pub virtqueue_pairs: __virtio16,
}
-#[test]
-fn bindgen_test_layout_virtio_net_ctrl_mq() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_ctrl_mq> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_ctrl_mq>(),
- 2usize,
- concat!("Size of: ", stringify!(virtio_net_ctrl_mq))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_ctrl_mq>(),
- 2usize,
- concat!("Alignment of ", stringify!(virtio_net_ctrl_mq))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).virtqueue_pairs) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_mq),
- "::",
- stringify!(virtqueue_pairs)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_mq"][::std::mem::size_of::<virtio_net_ctrl_mq>() - 2usize];
+ ["Alignment of virtio_net_ctrl_mq"][::std::mem::align_of::<virtio_net_ctrl_mq>() - 2usize];
+ ["Offset of field: virtio_net_ctrl_mq::virtqueue_pairs"]
+ [::std::mem::offset_of!(virtio_net_ctrl_mq, virtqueue_pairs) - 0usize];
+};
#[repr(C)]
#[derive(Debug, Default)]
pub struct virtio_net_rss_config {
@@ -860,92 +444,26 @@
pub hash_key_length: __u8,
pub hash_key_data: __IncompleteArrayField<__u8>,
}
-#[test]
-fn bindgen_test_layout_virtio_net_rss_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_rss_config> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_rss_config>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_net_rss_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_rss_config>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_net_rss_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_types) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(hash_types)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).indirection_table_mask) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(indirection_table_mask)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).unclassified_queue) as usize - ptr as usize },
- 6usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(unclassified_queue)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).indirection_table) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(indirection_table)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_tx_vq) as usize - ptr as usize },
- 10usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(max_tx_vq)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_key_length) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(hash_key_length)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_key_data) as usize - ptr as usize },
- 13usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_rss_config),
- "::",
- stringify!(hash_key_data)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_rss_config"][::std::mem::size_of::<virtio_net_rss_config>() - 16usize];
+ ["Alignment of virtio_net_rss_config"]
+ [::std::mem::align_of::<virtio_net_rss_config>() - 4usize];
+ ["Offset of field: virtio_net_rss_config::hash_types"]
+ [::std::mem::offset_of!(virtio_net_rss_config, hash_types) - 0usize];
+ ["Offset of field: virtio_net_rss_config::indirection_table_mask"]
+ [::std::mem::offset_of!(virtio_net_rss_config, indirection_table_mask) - 4usize];
+ ["Offset of field: virtio_net_rss_config::unclassified_queue"]
+ [::std::mem::offset_of!(virtio_net_rss_config, unclassified_queue) - 6usize];
+ ["Offset of field: virtio_net_rss_config::indirection_table"]
+ [::std::mem::offset_of!(virtio_net_rss_config, indirection_table) - 8usize];
+ ["Offset of field: virtio_net_rss_config::max_tx_vq"]
+ [::std::mem::offset_of!(virtio_net_rss_config, max_tx_vq) - 10usize];
+ ["Offset of field: virtio_net_rss_config::hash_key_length"]
+ [::std::mem::offset_of!(virtio_net_rss_config, hash_key_length) - 12usize];
+ ["Offset of field: virtio_net_rss_config::hash_key_data"]
+ [::std::mem::offset_of!(virtio_net_rss_config, hash_key_data) - 13usize];
+};
#[repr(C)]
#[derive(Debug, Default)]
pub struct virtio_net_hash_config {
@@ -954,143 +472,381 @@
pub hash_key_length: __u8,
pub hash_key_data: __IncompleteArrayField<__u8>,
}
-#[test]
-fn bindgen_test_layout_virtio_net_hash_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_hash_config> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_hash_config>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_net_hash_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_hash_config>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_net_hash_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_types) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hash_config),
- "::",
- stringify!(hash_types)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).reserved) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hash_config),
- "::",
- stringify!(reserved)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_key_length) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hash_config),
- "::",
- stringify!(hash_key_length)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).hash_key_data) as usize - ptr as usize },
- 13usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_hash_config),
- "::",
- stringify!(hash_key_data)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_hash_config"][::std::mem::size_of::<virtio_net_hash_config>() - 16usize];
+ ["Alignment of virtio_net_hash_config"]
+ [::std::mem::align_of::<virtio_net_hash_config>() - 4usize];
+ ["Offset of field: virtio_net_hash_config::hash_types"]
+ [::std::mem::offset_of!(virtio_net_hash_config, hash_types) - 0usize];
+ ["Offset of field: virtio_net_hash_config::reserved"]
+ [::std::mem::offset_of!(virtio_net_hash_config, reserved) - 4usize];
+ ["Offset of field: virtio_net_hash_config::hash_key_length"]
+ [::std::mem::offset_of!(virtio_net_hash_config, hash_key_length) - 12usize];
+ ["Offset of field: virtio_net_hash_config::hash_key_data"]
+ [::std::mem::offset_of!(virtio_net_hash_config, hash_key_data) - 13usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_ctrl_coal_tx {
pub tx_max_packets: __le32,
pub tx_usecs: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_net_ctrl_coal_tx() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_ctrl_coal_tx> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_ctrl_coal_tx>(),
- 8usize,
- concat!("Size of: ", stringify!(virtio_net_ctrl_coal_tx))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_ctrl_coal_tx>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_net_ctrl_coal_tx))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).tx_max_packets) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_coal_tx),
- "::",
- stringify!(tx_max_packets)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).tx_usecs) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_coal_tx),
- "::",
- stringify!(tx_usecs)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_coal_tx"][::std::mem::size_of::<virtio_net_ctrl_coal_tx>() - 8usize];
+ ["Alignment of virtio_net_ctrl_coal_tx"]
+ [::std::mem::align_of::<virtio_net_ctrl_coal_tx>() - 4usize];
+ ["Offset of field: virtio_net_ctrl_coal_tx::tx_max_packets"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_tx, tx_max_packets) - 0usize];
+ ["Offset of field: virtio_net_ctrl_coal_tx::tx_usecs"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_tx, tx_usecs) - 4usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_net_ctrl_coal_rx {
pub rx_max_packets: __le32,
pub rx_usecs: __le32,
}
-#[test]
-fn bindgen_test_layout_virtio_net_ctrl_coal_rx() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_net_ctrl_coal_rx> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_net_ctrl_coal_rx>(),
- 8usize,
- concat!("Size of: ", stringify!(virtio_net_ctrl_coal_rx))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_net_ctrl_coal_rx>(),
- 4usize,
- concat!("Alignment of ", stringify!(virtio_net_ctrl_coal_rx))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).rx_max_packets) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_coal_rx),
- "::",
- stringify!(rx_max_packets)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).rx_usecs) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_net_ctrl_coal_rx),
- "::",
- stringify!(rx_usecs)
- )
- );
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_coal_rx"][::std::mem::size_of::<virtio_net_ctrl_coal_rx>() - 8usize];
+ ["Alignment of virtio_net_ctrl_coal_rx"]
+ [::std::mem::align_of::<virtio_net_ctrl_coal_rx>() - 4usize];
+ ["Offset of field: virtio_net_ctrl_coal_rx::rx_max_packets"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_rx, rx_max_packets) - 0usize];
+ ["Offset of field: virtio_net_ctrl_coal_rx::rx_usecs"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_rx, rx_usecs) - 4usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_ctrl_coal {
+ pub max_packets: __le32,
+ pub max_usecs: __le32,
}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_coal"][::std::mem::size_of::<virtio_net_ctrl_coal>() - 8usize];
+ ["Alignment of virtio_net_ctrl_coal"][::std::mem::align_of::<virtio_net_ctrl_coal>() - 4usize];
+ ["Offset of field: virtio_net_ctrl_coal::max_packets"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal, max_packets) - 0usize];
+ ["Offset of field: virtio_net_ctrl_coal::max_usecs"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal, max_usecs) - 4usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_ctrl_coal_vq {
+ pub vqn: __le16,
+ pub reserved: __le16,
+ pub coal: virtio_net_ctrl_coal,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_coal_vq"][::std::mem::size_of::<virtio_net_ctrl_coal_vq>() - 12usize];
+ ["Alignment of virtio_net_ctrl_coal_vq"]
+ [::std::mem::align_of::<virtio_net_ctrl_coal_vq>() - 4usize];
+ ["Offset of field: virtio_net_ctrl_coal_vq::vqn"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_vq, vqn) - 0usize];
+ ["Offset of field: virtio_net_ctrl_coal_vq::reserved"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_vq, reserved) - 2usize];
+ ["Offset of field: virtio_net_ctrl_coal_vq::coal"]
+ [::std::mem::offset_of!(virtio_net_ctrl_coal_vq, coal) - 4usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_capabilities {
+ pub supported_stats_types: [__le64; 1usize],
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_capabilities"]
+ [::std::mem::size_of::<virtio_net_stats_capabilities>() - 8usize];
+ ["Alignment of virtio_net_stats_capabilities"]
+ [::std::mem::align_of::<virtio_net_stats_capabilities>() - 8usize];
+ ["Offset of field: virtio_net_stats_capabilities::supported_stats_types"]
+ [::std::mem::offset_of!(virtio_net_stats_capabilities, supported_stats_types) - 0usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_ctrl_queue_stats {
+ pub stats: [virtio_net_ctrl_queue_stats__bindgen_ty_1; 1usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_ctrl_queue_stats__bindgen_ty_1 {
+ pub vq_index: __le16,
+ pub reserved: [__le16; 3usize],
+ pub types_bitmap: [__le64; 1usize],
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_queue_stats__bindgen_ty_1"]
+ [::std::mem::size_of::<virtio_net_ctrl_queue_stats__bindgen_ty_1>() - 16usize];
+ ["Alignment of virtio_net_ctrl_queue_stats__bindgen_ty_1"]
+ [::std::mem::align_of::<virtio_net_ctrl_queue_stats__bindgen_ty_1>() - 8usize];
+ ["Offset of field: virtio_net_ctrl_queue_stats__bindgen_ty_1::vq_index"]
+ [::std::mem::offset_of!(virtio_net_ctrl_queue_stats__bindgen_ty_1, vq_index) - 0usize];
+ ["Offset of field: virtio_net_ctrl_queue_stats__bindgen_ty_1::reserved"]
+ [::std::mem::offset_of!(virtio_net_ctrl_queue_stats__bindgen_ty_1, reserved) - 2usize];
+ ["Offset of field: virtio_net_ctrl_queue_stats__bindgen_ty_1::types_bitmap"]
+ [::std::mem::offset_of!(virtio_net_ctrl_queue_stats__bindgen_ty_1, types_bitmap) - 8usize];
+};
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_ctrl_queue_stats"]
+ [::std::mem::size_of::<virtio_net_ctrl_queue_stats>() - 16usize];
+ ["Alignment of virtio_net_ctrl_queue_stats"]
+ [::std::mem::align_of::<virtio_net_ctrl_queue_stats>() - 8usize];
+ ["Offset of field: virtio_net_ctrl_queue_stats::stats"]
+ [::std::mem::offset_of!(virtio_net_ctrl_queue_stats, stats) - 0usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_reply_hdr {
+ pub type_: __u8,
+ pub reserved: __u8,
+ pub vq_index: __le16,
+ pub reserved1: __le16,
+ pub size: __le16,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_reply_hdr"]
+ [::std::mem::size_of::<virtio_net_stats_reply_hdr>() - 8usize];
+ ["Alignment of virtio_net_stats_reply_hdr"]
+ [::std::mem::align_of::<virtio_net_stats_reply_hdr>() - 2usize];
+ ["Offset of field: virtio_net_stats_reply_hdr::type_"]
+ [::std::mem::offset_of!(virtio_net_stats_reply_hdr, type_) - 0usize];
+ ["Offset of field: virtio_net_stats_reply_hdr::reserved"]
+ [::std::mem::offset_of!(virtio_net_stats_reply_hdr, reserved) - 1usize];
+ ["Offset of field: virtio_net_stats_reply_hdr::vq_index"]
+ [::std::mem::offset_of!(virtio_net_stats_reply_hdr, vq_index) - 2usize];
+ ["Offset of field: virtio_net_stats_reply_hdr::reserved1"]
+ [::std::mem::offset_of!(virtio_net_stats_reply_hdr, reserved1) - 4usize];
+ ["Offset of field: virtio_net_stats_reply_hdr::size"]
+ [::std::mem::offset_of!(virtio_net_stats_reply_hdr, size) - 6usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_cvq {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub command_num: __le64,
+ pub ok_num: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_cvq"][::std::mem::size_of::<virtio_net_stats_cvq>() - 24usize];
+ ["Alignment of virtio_net_stats_cvq"][::std::mem::align_of::<virtio_net_stats_cvq>() - 8usize];
+ ["Offset of field: virtio_net_stats_cvq::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_cvq, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_cvq::command_num"]
+ [::std::mem::offset_of!(virtio_net_stats_cvq, command_num) - 8usize];
+ ["Offset of field: virtio_net_stats_cvq::ok_num"]
+ [::std::mem::offset_of!(virtio_net_stats_cvq, ok_num) - 16usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_rx_basic {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub rx_notifications: __le64,
+ pub rx_packets: __le64,
+ pub rx_bytes: __le64,
+ pub rx_interrupts: __le64,
+ pub rx_drops: __le64,
+ pub rx_drop_overruns: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_rx_basic"]
+ [::std::mem::size_of::<virtio_net_stats_rx_basic>() - 56usize];
+ ["Alignment of virtio_net_stats_rx_basic"]
+ [::std::mem::align_of::<virtio_net_stats_rx_basic>() - 8usize];
+ ["Offset of field: virtio_net_stats_rx_basic::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_notifications"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_notifications) - 8usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_packets) - 16usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_bytes) - 24usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_interrupts"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_interrupts) - 32usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_drops"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_drops) - 40usize];
+ ["Offset of field: virtio_net_stats_rx_basic::rx_drop_overruns"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_basic, rx_drop_overruns) - 48usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_tx_basic {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub tx_notifications: __le64,
+ pub tx_packets: __le64,
+ pub tx_bytes: __le64,
+ pub tx_interrupts: __le64,
+ pub tx_drops: __le64,
+ pub tx_drop_malformed: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_tx_basic"]
+ [::std::mem::size_of::<virtio_net_stats_tx_basic>() - 56usize];
+ ["Alignment of virtio_net_stats_tx_basic"]
+ [::std::mem::align_of::<virtio_net_stats_tx_basic>() - 8usize];
+ ["Offset of field: virtio_net_stats_tx_basic::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_notifications"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_notifications) - 8usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_packets) - 16usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_bytes) - 24usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_interrupts"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_interrupts) - 32usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_drops"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_drops) - 40usize];
+ ["Offset of field: virtio_net_stats_tx_basic::tx_drop_malformed"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_basic, tx_drop_malformed) - 48usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_rx_csum {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub rx_csum_valid: __le64,
+ pub rx_needs_csum: __le64,
+ pub rx_csum_none: __le64,
+ pub rx_csum_bad: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_rx_csum"]
+ [::std::mem::size_of::<virtio_net_stats_rx_csum>() - 40usize];
+ ["Alignment of virtio_net_stats_rx_csum"]
+ [::std::mem::align_of::<virtio_net_stats_rx_csum>() - 8usize];
+ ["Offset of field: virtio_net_stats_rx_csum::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_csum, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_rx_csum::rx_csum_valid"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_csum, rx_csum_valid) - 8usize];
+ ["Offset of field: virtio_net_stats_rx_csum::rx_needs_csum"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_csum, rx_needs_csum) - 16usize];
+ ["Offset of field: virtio_net_stats_rx_csum::rx_csum_none"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_csum, rx_csum_none) - 24usize];
+ ["Offset of field: virtio_net_stats_rx_csum::rx_csum_bad"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_csum, rx_csum_bad) - 32usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_tx_csum {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub tx_csum_none: __le64,
+ pub tx_needs_csum: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_tx_csum"]
+ [::std::mem::size_of::<virtio_net_stats_tx_csum>() - 24usize];
+ ["Alignment of virtio_net_stats_tx_csum"]
+ [::std::mem::align_of::<virtio_net_stats_tx_csum>() - 8usize];
+ ["Offset of field: virtio_net_stats_tx_csum::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_csum, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_tx_csum::tx_csum_none"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_csum, tx_csum_none) - 8usize];
+ ["Offset of field: virtio_net_stats_tx_csum::tx_needs_csum"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_csum, tx_needs_csum) - 16usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_rx_gso {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub rx_gso_packets: __le64,
+ pub rx_gso_bytes: __le64,
+ pub rx_gso_packets_coalesced: __le64,
+ pub rx_gso_bytes_coalesced: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_rx_gso"][::std::mem::size_of::<virtio_net_stats_rx_gso>() - 40usize];
+ ["Alignment of virtio_net_stats_rx_gso"]
+ [::std::mem::align_of::<virtio_net_stats_rx_gso>() - 8usize];
+ ["Offset of field: virtio_net_stats_rx_gso::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_gso, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_rx_gso::rx_gso_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_gso, rx_gso_packets) - 8usize];
+ ["Offset of field: virtio_net_stats_rx_gso::rx_gso_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_gso, rx_gso_bytes) - 16usize];
+ ["Offset of field: virtio_net_stats_rx_gso::rx_gso_packets_coalesced"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_gso, rx_gso_packets_coalesced) - 24usize];
+ ["Offset of field: virtio_net_stats_rx_gso::rx_gso_bytes_coalesced"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_gso, rx_gso_bytes_coalesced) - 32usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_tx_gso {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub tx_gso_packets: __le64,
+ pub tx_gso_bytes: __le64,
+ pub tx_gso_segments: __le64,
+ pub tx_gso_segments_bytes: __le64,
+ pub tx_gso_packets_noseg: __le64,
+ pub tx_gso_bytes_noseg: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_tx_gso"][::std::mem::size_of::<virtio_net_stats_tx_gso>() - 56usize];
+ ["Alignment of virtio_net_stats_tx_gso"]
+ [::std::mem::align_of::<virtio_net_stats_tx_gso>() - 8usize];
+ ["Offset of field: virtio_net_stats_tx_gso::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_packets) - 8usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_bytes) - 16usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_segments"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_segments) - 24usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_segments_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_segments_bytes) - 32usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_packets_noseg"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_packets_noseg) - 40usize];
+ ["Offset of field: virtio_net_stats_tx_gso::tx_gso_bytes_noseg"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_gso, tx_gso_bytes_noseg) - 48usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_rx_speed {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub rx_ratelimit_packets: __le64,
+ pub rx_ratelimit_bytes: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_rx_speed"]
+ [::std::mem::size_of::<virtio_net_stats_rx_speed>() - 24usize];
+ ["Alignment of virtio_net_stats_rx_speed"]
+ [::std::mem::align_of::<virtio_net_stats_rx_speed>() - 8usize];
+ ["Offset of field: virtio_net_stats_rx_speed::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_speed, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_rx_speed::rx_ratelimit_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_speed, rx_ratelimit_packets) - 8usize];
+ ["Offset of field: virtio_net_stats_rx_speed::rx_ratelimit_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_rx_speed, rx_ratelimit_bytes) - 16usize];
+};
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, PartialEq)]
+pub struct virtio_net_stats_tx_speed {
+ pub hdr: virtio_net_stats_reply_hdr,
+ pub tx_ratelimit_packets: __le64,
+ pub tx_ratelimit_bytes: __le64,
+}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_net_stats_tx_speed"]
+ [::std::mem::size_of::<virtio_net_stats_tx_speed>() - 24usize];
+ ["Alignment of virtio_net_stats_tx_speed"]
+ [::std::mem::align_of::<virtio_net_stats_tx_speed>() - 8usize];
+ ["Offset of field: virtio_net_stats_tx_speed::hdr"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_speed, hdr) - 0usize];
+ ["Offset of field: virtio_net_stats_tx_speed::tx_ratelimit_packets"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_speed, tx_ratelimit_packets) - 8usize];
+ ["Offset of field: virtio_net_stats_tx_speed::tx_ratelimit_bytes"]
+ [::std::mem::offset_of!(virtio_net_stats_tx_speed, tx_ratelimit_bytes) - 16usize];
+};
diff --git a/crates/virtio-bindings/src/virtio_ring.rs b/crates/virtio-bindings/src/virtio_ring.rs
index 9512aaa..68411c6 100644
--- a/crates/virtio-bindings/src/virtio_ring.rs
+++ b/crates/virtio-bindings/src/virtio_ring.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
#[repr(C)]
#[derive(Default)]
@@ -64,61 +64,15 @@
pub flags: __virtio16,
pub next: __virtio16,
}
-#[test]
-fn bindgen_test_layout_vring_desc() {
- const UNINIT: ::std::mem::MaybeUninit<vring_desc> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_desc>(),
- 16usize,
- concat!("Size of: ", stringify!(vring_desc))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_desc>(),
- 8usize,
- concat!("Alignment of ", stringify!(vring_desc))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).addr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_desc),
- "::",
- stringify!(addr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_desc),
- "::",
- stringify!(len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_desc),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).next) as usize - ptr as usize },
- 14usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_desc),
- "::",
- stringify!(next)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_desc"][::std::mem::size_of::<vring_desc>() - 16usize];
+ ["Alignment of vring_desc"][::std::mem::align_of::<vring_desc>() - 8usize];
+ ["Offset of field: vring_desc::addr"][::std::mem::offset_of!(vring_desc, addr) - 0usize];
+ ["Offset of field: vring_desc::len"][::std::mem::offset_of!(vring_desc, len) - 8usize];
+ ["Offset of field: vring_desc::flags"][::std::mem::offset_of!(vring_desc, flags) - 12usize];
+ ["Offset of field: vring_desc::next"][::std::mem::offset_of!(vring_desc, next) - 14usize];
+};
#[repr(C)]
#[derive(Debug, Default)]
pub struct vring_avail {
@@ -126,92 +80,28 @@
pub idx: __virtio16,
pub ring: __IncompleteArrayField<__virtio16>,
}
-#[test]
-fn bindgen_test_layout_vring_avail() {
- const UNINIT: ::std::mem::MaybeUninit<vring_avail> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_avail>(),
- 4usize,
- concat!("Size of: ", stringify!(vring_avail))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_avail>(),
- 2usize,
- concat!("Alignment of ", stringify!(vring_avail))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_avail),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).idx) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_avail),
- "::",
- stringify!(idx)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).ring) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_avail),
- "::",
- stringify!(ring)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_avail"][::std::mem::size_of::<vring_avail>() - 4usize];
+ ["Alignment of vring_avail"][::std::mem::align_of::<vring_avail>() - 2usize];
+ ["Offset of field: vring_avail::flags"][::std::mem::offset_of!(vring_avail, flags) - 0usize];
+ ["Offset of field: vring_avail::idx"][::std::mem::offset_of!(vring_avail, idx) - 2usize];
+ ["Offset of field: vring_avail::ring"][::std::mem::offset_of!(vring_avail, ring) - 4usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct vring_used_elem {
pub id: __virtio32,
pub len: __virtio32,
}
-#[test]
-fn bindgen_test_layout_vring_used_elem() {
- const UNINIT: ::std::mem::MaybeUninit<vring_used_elem> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_used_elem>(),
- 8usize,
- concat!("Size of: ", stringify!(vring_used_elem))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_used_elem>(),
- 4usize,
- concat!("Alignment of ", stringify!(vring_used_elem))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).id) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_used_elem),
- "::",
- stringify!(id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_used_elem),
- "::",
- stringify!(len)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_used_elem"][::std::mem::size_of::<vring_used_elem>() - 8usize];
+ ["Alignment of vring_used_elem"][::std::mem::align_of::<vring_used_elem>() - 4usize];
+ ["Offset of field: vring_used_elem::id"][::std::mem::offset_of!(vring_used_elem, id) - 0usize];
+ ["Offset of field: vring_used_elem::len"]
+ [::std::mem::offset_of!(vring_used_elem, len) - 4usize];
+};
pub type vring_used_elem_t = vring_used_elem;
#[repr(C)]
#[derive(Debug, Default)]
@@ -220,51 +110,14 @@
pub idx: __virtio16,
pub ring: __IncompleteArrayField<vring_used_elem_t>,
}
-#[test]
-fn bindgen_test_layout_vring_used() {
- const UNINIT: ::std::mem::MaybeUninit<vring_used> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_used>(),
- 4usize,
- concat!("Size of: ", stringify!(vring_used))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_used>(),
- 4usize,
- concat!("Alignment of ", stringify!(vring_used))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_used),
- "::",
- stringify!(flags)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).idx) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_used),
- "::",
- stringify!(idx)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).ring) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_used),
- "::",
- stringify!(ring)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_used"][::std::mem::size_of::<vring_used>() - 4usize];
+ ["Alignment of vring_used"][::std::mem::align_of::<vring_used>() - 4usize];
+ ["Offset of field: vring_used::flags"][::std::mem::offset_of!(vring_used, flags) - 0usize];
+ ["Offset of field: vring_used::idx"][::std::mem::offset_of!(vring_used, idx) - 2usize];
+ ["Offset of field: vring_used::ring"][::std::mem::offset_of!(vring_used, ring) - 4usize];
+};
#[doc = " struct vring_desc - Virtio ring descriptors,\n 16 bytes long. These can chain together via @next.\n\n @addr: buffer address (guest-physical)\n @len: buffer length\n @flags: descriptor flags\n @next: index of the next descriptor in the chain,\n if the VRING_DESC_F_NEXT flag is set. We chain unused\n descriptors via this, too."]
pub type vring_desc_t = vring_desc;
pub type vring_avail_t = vring_avail;
@@ -277,61 +130,15 @@
pub avail: *mut vring_avail_t,
pub used: *mut vring_used_t,
}
-#[test]
-fn bindgen_test_layout_vring() {
- const UNINIT: ::std::mem::MaybeUninit<vring> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring>(),
- 32usize,
- concat!("Size of: ", stringify!(vring))
- );
- assert_eq!(
- ::std::mem::align_of::<vring>(),
- 8usize,
- concat!("Alignment of ", stringify!(vring))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring),
- "::",
- stringify!(num)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).desc) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(vring),
- "::",
- stringify!(desc)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).avail) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(vring),
- "::",
- stringify!(avail)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).used) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(vring),
- "::",
- stringify!(used)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring"][::std::mem::size_of::<vring>() - 32usize];
+ ["Alignment of vring"][::std::mem::align_of::<vring>() - 8usize];
+ ["Offset of field: vring::num"][::std::mem::offset_of!(vring, num) - 0usize];
+ ["Offset of field: vring::desc"][::std::mem::offset_of!(vring, desc) - 8usize];
+ ["Offset of field: vring::avail"][::std::mem::offset_of!(vring, avail) - 16usize];
+ ["Offset of field: vring::used"][::std::mem::offset_of!(vring, used) - 24usize];
+};
impl Default for vring {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -347,42 +154,16 @@
pub off_wrap: __le16,
pub flags: __le16,
}
-#[test]
-fn bindgen_test_layout_vring_packed_desc_event() {
- const UNINIT: ::std::mem::MaybeUninit<vring_packed_desc_event> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_packed_desc_event>(),
- 4usize,
- concat!("Size of: ", stringify!(vring_packed_desc_event))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_packed_desc_event>(),
- 2usize,
- concat!("Alignment of ", stringify!(vring_packed_desc_event))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).off_wrap) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc_event),
- "::",
- stringify!(off_wrap)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 2usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc_event),
- "::",
- stringify!(flags)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_packed_desc_event"][::std::mem::size_of::<vring_packed_desc_event>() - 4usize];
+ ["Alignment of vring_packed_desc_event"]
+ [::std::mem::align_of::<vring_packed_desc_event>() - 2usize];
+ ["Offset of field: vring_packed_desc_event::off_wrap"]
+ [::std::mem::offset_of!(vring_packed_desc_event, off_wrap) - 0usize];
+ ["Offset of field: vring_packed_desc_event::flags"]
+ [::std::mem::offset_of!(vring_packed_desc_event, flags) - 2usize];
+};
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct vring_packed_desc {
@@ -391,58 +172,16 @@
pub id: __le16,
pub flags: __le16,
}
-#[test]
-fn bindgen_test_layout_vring_packed_desc() {
- const UNINIT: ::std::mem::MaybeUninit<vring_packed_desc> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<vring_packed_desc>(),
- 16usize,
- concat!("Size of: ", stringify!(vring_packed_desc))
- );
- assert_eq!(
- ::std::mem::align_of::<vring_packed_desc>(),
- 8usize,
- concat!("Alignment of ", stringify!(vring_packed_desc))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).addr) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc),
- "::",
- stringify!(addr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc),
- "::",
- stringify!(len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).id) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc),
- "::",
- stringify!(id)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize },
- 14usize,
- concat!(
- "Offset of field: ",
- stringify!(vring_packed_desc),
- "::",
- stringify!(flags)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of vring_packed_desc"][::std::mem::size_of::<vring_packed_desc>() - 16usize];
+ ["Alignment of vring_packed_desc"][::std::mem::align_of::<vring_packed_desc>() - 8usize];
+ ["Offset of field: vring_packed_desc::addr"]
+ [::std::mem::offset_of!(vring_packed_desc, addr) - 0usize];
+ ["Offset of field: vring_packed_desc::len"]
+ [::std::mem::offset_of!(vring_packed_desc, len) - 8usize];
+ ["Offset of field: vring_packed_desc::id"]
+ [::std::mem::offset_of!(vring_packed_desc, id) - 12usize];
+ ["Offset of field: vring_packed_desc::flags"]
+ [::std::mem::offset_of!(vring_packed_desc, flags) - 14usize];
+};
diff --git a/crates/virtio-bindings/src/virtio_scsi.rs b/crates/virtio-bindings/src/virtio_scsi.rs
index caa48ca..d762acf 100644
--- a/crates/virtio-bindings/src/virtio_scsi.rs
+++ b/crates/virtio-bindings/src/virtio_scsi.rs
@@ -1,4 +1,4 @@
-/* automatically generated by rust-bindgen 0.63.0 */
+/* automatically generated by rust-bindgen 0.70.1 */
pub const VIRTIO_SCSI_CDB_DEFAULT_SIZE: u32 = 32;
pub const VIRTIO_SCSI_SENSE_DEFAULT_SIZE: u32 = 96;
@@ -61,81 +61,23 @@
pub crn: __u8,
pub cdb: [__u8; 32usize],
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_cmd_req() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_cmd_req> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_cmd_req>(),
- 51usize,
- concat!("Size of: ", stringify!(virtio_scsi_cmd_req))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_cmd_req>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_cmd_req))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).lun) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).tag) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(tag)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).task_attr) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(task_attr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).prio) as usize - ptr as usize },
- 17usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(prio)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).crn) as usize - ptr as usize },
- 18usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(crn)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cdb) as usize - ptr as usize },
- 19usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req),
- "::",
- stringify!(cdb)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_cmd_req"][::std::mem::size_of::<virtio_scsi_cmd_req>() - 51usize];
+ ["Alignment of virtio_scsi_cmd_req"][::std::mem::align_of::<virtio_scsi_cmd_req>() - 1usize];
+ ["Offset of field: virtio_scsi_cmd_req::lun"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, lun) - 0usize];
+ ["Offset of field: virtio_scsi_cmd_req::tag"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, tag) - 8usize];
+ ["Offset of field: virtio_scsi_cmd_req::task_attr"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, task_attr) - 16usize];
+ ["Offset of field: virtio_scsi_cmd_req::prio"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, prio) - 17usize];
+ ["Offset of field: virtio_scsi_cmd_req::crn"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, crn) - 18usize];
+ ["Offset of field: virtio_scsi_cmd_req::cdb"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req, cdb) - 19usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_cmd_req_pi {
@@ -148,102 +90,28 @@
pub pi_bytesin: __virtio32,
pub cdb: [__u8; 32usize],
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_cmd_req_pi() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_cmd_req_pi> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_cmd_req_pi>(),
- 59usize,
- concat!("Size of: ", stringify!(virtio_scsi_cmd_req_pi))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_cmd_req_pi>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_cmd_req_pi))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).lun) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).tag) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(tag)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).task_attr) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(task_attr)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).prio) as usize - ptr as usize },
- 17usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(prio)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).crn) as usize - ptr as usize },
- 18usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(crn)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).pi_bytesout) as usize - ptr as usize },
- 19usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(pi_bytesout)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).pi_bytesin) as usize - ptr as usize },
- 23usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(pi_bytesin)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cdb) as usize - ptr as usize },
- 27usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_req_pi),
- "::",
- stringify!(cdb)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_cmd_req_pi"][::std::mem::size_of::<virtio_scsi_cmd_req_pi>() - 59usize];
+ ["Alignment of virtio_scsi_cmd_req_pi"]
+ [::std::mem::align_of::<virtio_scsi_cmd_req_pi>() - 1usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::lun"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, lun) - 0usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::tag"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, tag) - 8usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::task_attr"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, task_attr) - 16usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::prio"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, prio) - 17usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::crn"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, crn) - 18usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::pi_bytesout"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, pi_bytesout) - 19usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::pi_bytesin"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, pi_bytesin) - 23usize];
+ ["Offset of field: virtio_scsi_cmd_req_pi::cdb"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_req_pi, cdb) - 27usize];
+};
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct virtio_scsi_cmd_resp {
@@ -254,81 +122,23 @@
pub response: __u8,
pub sense: [__u8; 96usize],
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_cmd_resp() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_cmd_resp> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_cmd_resp>(),
- 108usize,
- concat!("Size of: ", stringify!(virtio_scsi_cmd_resp))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_cmd_resp>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_cmd_resp))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sense_len) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(sense_len)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).resid) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(resid)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).status_qualifier) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(status_qualifier)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).status) as usize - ptr as usize },
- 10usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(status)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).response) as usize - ptr as usize },
- 11usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(response)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sense) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_cmd_resp),
- "::",
- stringify!(sense)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_cmd_resp"][::std::mem::size_of::<virtio_scsi_cmd_resp>() - 108usize];
+ ["Alignment of virtio_scsi_cmd_resp"][::std::mem::align_of::<virtio_scsi_cmd_resp>() - 1usize];
+ ["Offset of field: virtio_scsi_cmd_resp::sense_len"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, sense_len) - 0usize];
+ ["Offset of field: virtio_scsi_cmd_resp::resid"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, resid) - 4usize];
+ ["Offset of field: virtio_scsi_cmd_resp::status_qualifier"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, status_qualifier) - 8usize];
+ ["Offset of field: virtio_scsi_cmd_resp::status"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, status) - 10usize];
+ ["Offset of field: virtio_scsi_cmd_resp::response"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, response) - 11usize];
+ ["Offset of field: virtio_scsi_cmd_resp::sense"]
+ [::std::mem::offset_of!(virtio_scsi_cmd_resp, sense) - 12usize];
+};
impl Default for virtio_scsi_cmd_resp {
fn default() -> Self {
let mut s = ::std::mem::MaybeUninit::<Self>::uninit();
@@ -346,93 +156,35 @@
pub lun: [__u8; 8usize],
pub tag: __virtio64,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_ctrl_tmf_req() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_ctrl_tmf_req> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_ctrl_tmf_req>(),
- 24usize,
- concat!("Size of: ", stringify!(virtio_scsi_ctrl_tmf_req))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_ctrl_tmf_req>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_ctrl_tmf_req))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_tmf_req),
- "::",
- stringify!(type_)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).subtype) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_tmf_req),
- "::",
- stringify!(subtype)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).lun) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_tmf_req),
- "::",
- stringify!(lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).tag) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_tmf_req),
- "::",
- stringify!(tag)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_ctrl_tmf_req"]
+ [::std::mem::size_of::<virtio_scsi_ctrl_tmf_req>() - 24usize];
+ ["Alignment of virtio_scsi_ctrl_tmf_req"]
+ [::std::mem::align_of::<virtio_scsi_ctrl_tmf_req>() - 1usize];
+ ["Offset of field: virtio_scsi_ctrl_tmf_req::type_"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_tmf_req, type_) - 0usize];
+ ["Offset of field: virtio_scsi_ctrl_tmf_req::subtype"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_tmf_req, subtype) - 4usize];
+ ["Offset of field: virtio_scsi_ctrl_tmf_req::lun"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_tmf_req, lun) - 8usize];
+ ["Offset of field: virtio_scsi_ctrl_tmf_req::tag"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_tmf_req, tag) - 16usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_ctrl_tmf_resp {
pub response: __u8,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_ctrl_tmf_resp() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_ctrl_tmf_resp> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_ctrl_tmf_resp>(),
- 1usize,
- concat!("Size of: ", stringify!(virtio_scsi_ctrl_tmf_resp))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_ctrl_tmf_resp>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_ctrl_tmf_resp))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).response) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_tmf_resp),
- "::",
- stringify!(response)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_ctrl_tmf_resp"]
+ [::std::mem::size_of::<virtio_scsi_ctrl_tmf_resp>() - 1usize];
+ ["Alignment of virtio_scsi_ctrl_tmf_resp"]
+ [::std::mem::align_of::<virtio_scsi_ctrl_tmf_resp>() - 1usize];
+ ["Offset of field: virtio_scsi_ctrl_tmf_resp::response"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_tmf_resp, response) - 0usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_ctrl_an_req {
@@ -440,94 +192,35 @@
pub lun: [__u8; 8usize],
pub event_requested: __virtio32,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_ctrl_an_req() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_ctrl_an_req> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_ctrl_an_req>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_scsi_ctrl_an_req))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_ctrl_an_req>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_ctrl_an_req))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_an_req),
- "::",
- stringify!(type_)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).lun) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_an_req),
- "::",
- stringify!(lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).event_requested) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_an_req),
- "::",
- stringify!(event_requested)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_ctrl_an_req"][::std::mem::size_of::<virtio_scsi_ctrl_an_req>() - 16usize];
+ ["Alignment of virtio_scsi_ctrl_an_req"]
+ [::std::mem::align_of::<virtio_scsi_ctrl_an_req>() - 1usize];
+ ["Offset of field: virtio_scsi_ctrl_an_req::type_"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_an_req, type_) - 0usize];
+ ["Offset of field: virtio_scsi_ctrl_an_req::lun"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_an_req, lun) - 4usize];
+ ["Offset of field: virtio_scsi_ctrl_an_req::event_requested"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_an_req, event_requested) - 12usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_ctrl_an_resp {
pub event_actual: __virtio32,
pub response: __u8,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_ctrl_an_resp() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_ctrl_an_resp> =
- ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_ctrl_an_resp>(),
- 5usize,
- concat!("Size of: ", stringify!(virtio_scsi_ctrl_an_resp))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_ctrl_an_resp>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_ctrl_an_resp))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).event_actual) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_an_resp),
- "::",
- stringify!(event_actual)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).response) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_ctrl_an_resp),
- "::",
- stringify!(response)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_ctrl_an_resp"]
+ [::std::mem::size_of::<virtio_scsi_ctrl_an_resp>() - 5usize];
+ ["Alignment of virtio_scsi_ctrl_an_resp"]
+ [::std::mem::align_of::<virtio_scsi_ctrl_an_resp>() - 1usize];
+ ["Offset of field: virtio_scsi_ctrl_an_resp::event_actual"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_an_resp, event_actual) - 0usize];
+ ["Offset of field: virtio_scsi_ctrl_an_resp::response"]
+ [::std::mem::offset_of!(virtio_scsi_ctrl_an_resp, response) - 4usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_event {
@@ -535,51 +228,17 @@
pub lun: [__u8; 8usize],
pub reason: __virtio32,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_event() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_event> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_event>(),
- 16usize,
- concat!("Size of: ", stringify!(virtio_scsi_event))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_event>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_event))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).event) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_event),
- "::",
- stringify!(event)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).lun) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_event),
- "::",
- stringify!(lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).reason) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_event),
- "::",
- stringify!(reason)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_event"][::std::mem::size_of::<virtio_scsi_event>() - 16usize];
+ ["Alignment of virtio_scsi_event"][::std::mem::align_of::<virtio_scsi_event>() - 1usize];
+ ["Offset of field: virtio_scsi_event::event"]
+ [::std::mem::offset_of!(virtio_scsi_event, event) - 0usize];
+ ["Offset of field: virtio_scsi_event::lun"]
+ [::std::mem::offset_of!(virtio_scsi_event, lun) - 4usize];
+ ["Offset of field: virtio_scsi_event::reason"]
+ [::std::mem::offset_of!(virtio_scsi_event, reason) - 12usize];
+};
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct virtio_scsi_config {
@@ -594,118 +253,28 @@
pub max_target: __virtio16,
pub max_lun: __virtio32,
}
-#[test]
-fn bindgen_test_layout_virtio_scsi_config() {
- const UNINIT: ::std::mem::MaybeUninit<virtio_scsi_config> = ::std::mem::MaybeUninit::uninit();
- let ptr = UNINIT.as_ptr();
- assert_eq!(
- ::std::mem::size_of::<virtio_scsi_config>(),
- 36usize,
- concat!("Size of: ", stringify!(virtio_scsi_config))
- );
- assert_eq!(
- ::std::mem::align_of::<virtio_scsi_config>(),
- 1usize,
- concat!("Alignment of ", stringify!(virtio_scsi_config))
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).num_queues) as usize - ptr as usize },
- 0usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(num_queues)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).seg_max) as usize - ptr as usize },
- 4usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(seg_max)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_sectors) as usize - ptr as usize },
- 8usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(max_sectors)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cmd_per_lun) as usize - ptr as usize },
- 12usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(cmd_per_lun)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).event_info_size) as usize - ptr as usize },
- 16usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(event_info_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).sense_size) as usize - ptr as usize },
- 20usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(sense_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).cdb_size) as usize - ptr as usize },
- 24usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(cdb_size)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_channel) as usize - ptr as usize },
- 28usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(max_channel)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_target) as usize - ptr as usize },
- 30usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(max_target)
- )
- );
- assert_eq!(
- unsafe { ::std::ptr::addr_of!((*ptr).max_lun) as usize - ptr as usize },
- 32usize,
- concat!(
- "Offset of field: ",
- stringify!(virtio_scsi_config),
- "::",
- stringify!(max_lun)
- )
- );
-}
+#[allow(clippy::unnecessary_operation, clippy::identity_op)]
+const _: () = {
+ ["Size of virtio_scsi_config"][::std::mem::size_of::<virtio_scsi_config>() - 36usize];
+ ["Alignment of virtio_scsi_config"][::std::mem::align_of::<virtio_scsi_config>() - 1usize];
+ ["Offset of field: virtio_scsi_config::num_queues"]
+ [::std::mem::offset_of!(virtio_scsi_config, num_queues) - 0usize];
+ ["Offset of field: virtio_scsi_config::seg_max"]
+ [::std::mem::offset_of!(virtio_scsi_config, seg_max) - 4usize];
+ ["Offset of field: virtio_scsi_config::max_sectors"]
+ [::std::mem::offset_of!(virtio_scsi_config, max_sectors) - 8usize];
+ ["Offset of field: virtio_scsi_config::cmd_per_lun"]
+ [::std::mem::offset_of!(virtio_scsi_config, cmd_per_lun) - 12usize];
+ ["Offset of field: virtio_scsi_config::event_info_size"]
+ [::std::mem::offset_of!(virtio_scsi_config, event_info_size) - 16usize];
+ ["Offset of field: virtio_scsi_config::sense_size"]
+ [::std::mem::offset_of!(virtio_scsi_config, sense_size) - 20usize];
+ ["Offset of field: virtio_scsi_config::cdb_size"]
+ [::std::mem::offset_of!(virtio_scsi_config, cdb_size) - 24usize];
+ ["Offset of field: virtio_scsi_config::max_channel"]
+ [::std::mem::offset_of!(virtio_scsi_config, max_channel) - 28usize];
+ ["Offset of field: virtio_scsi_config::max_target"]
+ [::std::mem::offset_of!(virtio_scsi_config, max_target) - 30usize];
+ ["Offset of field: virtio_scsi_config::max_lun"]
+ [::std::mem::offset_of!(virtio_scsi_config, max_lun) - 32usize];
+};
diff --git a/crates/virtio-queue/.cargo-checksum.json b/crates/virtio-queue/.cargo-checksum.json
index 5b15966..7298f6f 100644
--- a/crates/virtio-queue/.cargo-checksum.json
+++ b/crates/virtio-queue/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"1e77196a9050b180e070474e0dff9b4ae3663ca0f9879fe8e96d77f44d53d118","Cargo.toml":"ea64d1e622a8f852df8219926a59c7f43bb10f3c526f7b4ecd9e5f11717bf712","README.md":"33375d4000f394d2fb6f9fbccf0e0adf2dd09714cda44294c9cd154ab7237b06","benches/main.rs":"f0f719ca1b50e9d36f5b826063b321b4e87f0fc0401680b444dcb2a5f064931b","benches/queue/mod.rs":"ecb0aa66197958701d092d63ad7b0554c6be0df3eabf372760852cf0aab91f96","docs/TESTING.md":"6c30fc4fc331e735a7dc5a0d236e970eda5b3b312fd5b227ab44e09fb589cbdb","docs/images/descriptor.png":"a06ab6e9ed12d72068ade4df96c5c8cbb5887eb1cab89437cbbc7ac8c40e036d","docs/images/queue.png":"ecb0f59dbc021086dea450315ac0b67a0c4769cd2f4a115b6e9edcedcee93f35","src/chain.rs":"a76e7cd274f9e2127bc55db937f093985dd6722154736bafa5cc036c3cb59813","src/defs.rs":"b7f4d43b35b34b8a3d5765fc10e845bd01b652afb162fb4af6c8b9e668176d04","src/descriptor.rs":"85056e0bc56af14f745e6405d12e8dcbb68c0c5977c3b7a5cfe7058c47df064f","src/lib.rs":"2fcc55ec04a02a5783c1cf688eca125200254a4af9e28bce516a1e8df36dff28","src/mock.rs":"084b8a9cfea58bf9e4d4ac12e20230359a31a265eb5149ecf4acacb4c9a2f835","src/queue.rs":"2e3b96905666c0a14e69a416ca3e52630ac088369b0059004d1303a7104f24e3","src/queue_sync.rs":"703129fec19ae487d055d5694cc9e7714f066a3edb9c5a032e855e64049c7ae1","src/state.rs":"1a272134cc0dafbf64852ab3dc9eaaa7248b3f41628a90c03ae7b149d476597e"},"package":"e3f69a13d6610db9312acbb438b0390362af905d37634a2106be70c0f734986d"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"3d923fbfe0218e2ed6a29b1a3dfbc80e4f4d5dcd66ca96284fdadb028a010551","Cargo.toml":"909510359848bfd4ce75476e9c726c868451199e2240e4a2daa2924c259ac1b0","README.md":"33375d4000f394d2fb6f9fbccf0e0adf2dd09714cda44294c9cd154ab7237b06","benches/main.rs":"f0f719ca1b50e9d36f5b826063b321b4e87f0fc0401680b444dcb2a5f064931b","benches/queue/mod.rs":"ecb0aa66197958701d092d63ad7b0554c6be0df3eabf372760852cf0aab91f96","docs/TESTING.md":"6c30fc4fc331e735a7dc5a0d236e970eda5b3b312fd5b227ab44e09fb589cbdb","docs/images/descriptor.png":"a06ab6e9ed12d72068ade4df96c5c8cbb5887eb1cab89437cbbc7ac8c40e036d","docs/images/queue.png":"ecb0f59dbc021086dea450315ac0b67a0c4769cd2f4a115b6e9edcedcee93f35","src/chain.rs":"3acd0244e074e892163f506a8cac778712e67a9fdf54cccfa172b9f63676e0b9","src/defs.rs":"b7f4d43b35b34b8a3d5765fc10e845bd01b652afb162fb4af6c8b9e668176d04","src/descriptor.rs":"85056e0bc56af14f745e6405d12e8dcbb68c0c5977c3b7a5cfe7058c47df064f","src/descriptor_utils.rs":"af4c034b5883a768fb3da419d86bb65a88da3d774361f1bfa3b610a71bafd49c","src/lib.rs":"e3cf4dc2f47cecc19ab688c668743cdcd46a96634cfe91a7fb4172de93c106ba","src/mock.rs":"084b8a9cfea58bf9e4d4ac12e20230359a31a265eb5149ecf4acacb4c9a2f835","src/queue.rs":"ac1e5dc4655f555d017761cc958c865f42fa78b4c2510152c55fabf7921acb25","src/queue_sync.rs":"703129fec19ae487d055d5694cc9e7714f066a3edb9c5a032e855e64049c7ae1","src/state.rs":"1a272134cc0dafbf64852ab3dc9eaaa7248b3f41628a90c03ae7b149d476597e"},"package":"872e2f3fbd70a7e6f01689720cce3d5c2c5efe52b484dd07b674246ada0e9a8d"}
\ No newline at end of file
diff --git a/crates/virtio-queue/Android.bp b/crates/virtio-queue/Android.bp
index 6cf31f1..55dd12f 100644
--- a/crates/virtio-queue/Android.bp
+++ b/crates/virtio-queue/Android.bp
@@ -17,7 +17,7 @@
name: "libvirtio_queue",
crate_name: "virtio_queue",
cargo_env_compat: true,
- cargo_pkg_version: "0.11.0",
+ cargo_pkg_version: "0.14.0",
crate_root: "src/lib.rs",
edition: "2021",
rustlibs: [
@@ -26,4 +26,5 @@
"libvm_memory_android",
"libvmm_sys_util",
],
+ compile_multilib: "first",
}
diff --git a/crates/virtio-queue/CHANGELOG.md b/crates/virtio-queue/CHANGELOG.md
index 6815552..b1c498e 100644
--- a/crates/virtio-queue/CHANGELOG.md
+++ b/crates/virtio-queue/CHANGELOG.md
@@ -1,5 +1,24 @@
# Upcoming
+# v0.14.0
+
+## Changed
+
+- Updated vm-memory from 0.15.0 to 0.16.0
+- Updated virtio-bindings from 0.2.3 to 0.2.4.
+
+# v0.13.0
+
+## Changed
+
+- Updated vm-memory from 0.14.0 to 0.15.0
+- Updated virtio-bindings from 0.2.2 to 0.2.3.
+
+# v0.12.0
+
+## Added
+- `Reader`/`Writer` classes to iterate over descriptors
+
# v0.11.0
## Changed
diff --git a/crates/virtio-queue/Cargo.toml b/crates/virtio-queue/Cargo.toml
index 9b6bc53..014aa31 100644
--- a/crates/virtio-queue/Cargo.toml
+++ b/crates/virtio-queue/Cargo.toml
@@ -12,27 +12,36 @@
[package]
edition = "2021"
name = "virtio-queue"
-version = "0.11.0"
+version = "0.14.0"
authors = ["The Chromium OS Authors"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
+autobenches = false
description = "virtio queue implementation"
readme = "README.md"
keywords = ["virtio"]
-license = "Apache-2.0 OR BSD-3-Clause"
+license = "Apache-2.0 AND BSD-3-Clause"
repository = "https://github.com/rust-vmm/vm-virtio"
-resolver = "1"
+
+[lib]
+name = "virtio_queue"
+path = "src/lib.rs"
[[bench]]
name = "main"
+path = "benches/main.rs"
harness = false
[dependencies.log]
version = "0.4.17"
[dependencies.virtio-bindings]
-version = "0.2.2"
+version = "0.2.4"
[dependencies.vm-memory]
-version = "0.14.0"
+version = "0.16.0"
[dependencies.vmm-sys-util]
version = "0.12.1"
@@ -44,7 +53,7 @@
version = "0.9.0"
[dev-dependencies.vm-memory]
-version = "0.14.0"
+version = "0.16.0"
features = [
"backend-mmap",
"backend-atomic",
diff --git a/crates/virtio-queue/METADATA b/crates/virtio-queue/METADATA
index e7b2c6a..68f5541 100644
--- a/crates/virtio-queue/METADATA
+++ b/crates/virtio-queue/METADATA
@@ -1,17 +1,17 @@
name: "virtio-queue"
description: "virtio queue implementation"
third_party {
- version: "0.11.0"
+ version: "0.14.0"
license_type: NOTICE
last_upgrade_date {
year: 2024
- month: 2
- day: 6
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/virtio-queue"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/virtio-queue/virtio-queue-0.11.0.crate"
- version: "0.11.0"
+ value: "https://static.crates.io/crates/virtio-queue/virtio-queue-0.14.0.crate"
+ version: "0.14.0"
}
}
diff --git a/crates/virtio-queue/cargo_embargo.json b/crates/virtio-queue/cargo_embargo.json
index 31753da..4ab15d8 100644
--- a/crates/virtio-queue/cargo_embargo.json
+++ b/crates/virtio-queue/cargo_embargo.json
@@ -4,7 +4,8 @@
},
"package": {
"virtio-queue": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first"
}
},
"run_cargo": false
diff --git a/crates/virtio-queue/src/chain.rs b/crates/virtio-queue/src/chain.rs
index 45e0f17..19598e1 100644
--- a/crates/virtio-queue/src/chain.rs
+++ b/crates/virtio-queue/src/chain.rs
@@ -14,9 +14,10 @@
use std::mem::size_of;
use std::ops::Deref;
-use vm_memory::{Address, Bytes, GuestAddress, GuestMemory};
+use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
+use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryRegion};
-use crate::{Descriptor, Error};
+use crate::{Descriptor, Error, Reader, Writer};
use virtio_bindings::bindings::virtio_ring::VRING_DESC_ALIGN_SIZE;
/// A virtio descriptor chain.
@@ -88,6 +89,24 @@
}
}
+ /// Return a new instance of Writer
+ pub fn writer<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Writer<'a, B>, Error>
+ where
+ M::Target: Sized,
+ <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
+ {
+ Writer::new(mem, self).map_err(|_| Error::InvalidChain)
+ }
+
+ /// Return a new instance of Reader
+ pub fn reader<'a, B: BitmapSlice>(self, mem: &'a M::Target) -> Result<Reader<'a, B>, Error>
+ where
+ M::Target: Sized,
+ <<M::Target as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
+ {
+ Reader::new(mem, self).map_err(|_| Error::InvalidChain)
+ }
+
/// Return an iterator that only yields the writable descriptors in the chain.
pub fn writable(self) -> DescriptorChainRwIter<M> {
DescriptorChainRwIter {
diff --git a/crates/virtio-queue/src/descriptor_utils.rs b/crates/virtio-queue/src/descriptor_utils.rs
new file mode 100644
index 0000000..2e9bac1
--- /dev/null
+++ b/crates/virtio-queue/src/descriptor_utils.rs
@@ -0,0 +1,879 @@
+// Portions Copyright 2019 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE-BSD-3-Clause file.
+//
+// Copyright (C) 2024 Red Hat, Inc. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
+
+use std::collections::VecDeque;
+use std::io::{self, Read, Write};
+use std::mem::{size_of, MaybeUninit};
+use std::ops::Deref;
+use std::ptr::copy_nonoverlapping;
+use std::{cmp, result};
+
+use crate::{DescriptorChain, Error};
+use vm_memory::bitmap::{BitmapSlice, WithBitmapSlice};
+use vm_memory::{
+ Address, ByteValued, GuestMemory, GuestMemoryRegion, MemoryRegionAddress, VolatileSlice,
+};
+
+pub type Result<T> = result::Result<T, Error>;
+
+#[derive(Clone)]
+struct DescriptorChainConsumer<'a, B> {
+ buffers: VecDeque<VolatileSlice<'a, B>>,
+ bytes_consumed: usize,
+}
+
+impl<'a, B: BitmapSlice> DescriptorChainConsumer<'a, B> {
+ fn available_bytes(&self) -> usize {
+ // This is guaranteed not to overflow because the total length of the chain
+ // is checked during all creations of `DescriptorChainConsumer` (see
+ // `Reader::new()` and `Writer::new()`).
+ self.buffers
+ .iter()
+ .fold(0usize, |count, vs| count + vs.len())
+ }
+
+ fn bytes_consumed(&self) -> usize {
+ self.bytes_consumed
+ }
+
+ /// Consumes at most `count` bytes from the `DescriptorChain`. Callers must provide a function
+ /// that takes a `&[VolatileSlice]` and returns the total number of bytes consumed. This
+ /// function guarantees that the combined length of all the slices in the `&[VolatileSlice]` is
+ /// less than or equal to `count`.
+ ///
+ /// # Errors
+ ///
+ /// If the provided function returns any error then no bytes are consumed from the buffer and
+ /// the error is returned to the caller.
+ fn consume<F>(&mut self, count: usize, f: F) -> io::Result<usize>
+ where
+ F: FnOnce(&[&VolatileSlice<B>]) -> io::Result<usize>,
+ {
+ let mut buflen = 0;
+ let mut bufs = Vec::with_capacity(self.buffers.len());
+ for vs in &self.buffers {
+ if buflen >= count {
+ break;
+ }
+
+ bufs.push(vs);
+
+ let rem = count - buflen;
+ if rem < vs.len() {
+ buflen += rem;
+ } else {
+ buflen += vs.len();
+ }
+ }
+
+ if bufs.is_empty() {
+ return Ok(0);
+ }
+
+ let bytes_consumed = f(&bufs)?;
+
+ // This can happen if a driver tricks a device into reading/writing more data than
+ // fits in a `usize`.
+ let total_bytes_consumed =
+ self.bytes_consumed
+ .checked_add(bytes_consumed)
+ .ok_or_else(|| {
+ io::Error::new(io::ErrorKind::InvalidData, Error::DescriptorChainOverflow)
+ })?;
+
+ let mut rem = bytes_consumed;
+ while let Some(vs) = self.buffers.pop_front() {
+ if rem < vs.len() {
+ // Split the slice and push the remainder back into the buffer list. Safe because we
+ // know that `rem` is not out of bounds due to the check and we checked the bounds
+ // on `vs` when we added it to the buffer list.
+ self.buffers.push_front(vs.offset(rem).unwrap());
+ break;
+ }
+
+ // No need for checked math because we know that `vs.size() <= rem`.
+ rem -= vs.len();
+ }
+
+ self.bytes_consumed = total_bytes_consumed;
+
+ Ok(bytes_consumed)
+ }
+
+ fn split_at(&mut self, offset: usize) -> Result<DescriptorChainConsumer<'a, B>> {
+ let mut rem = offset;
+ let pos = self.buffers.iter().position(|vs| {
+ if rem < vs.len() {
+ true
+ } else {
+ rem -= vs.len();
+ false
+ }
+ });
+
+ if let Some(at) = pos {
+ let mut other = self.buffers.split_off(at);
+
+ if rem > 0 {
+ // There must be at least one element in `other` because we checked
+ // its `size` value in the call to `position` above.
+ let front = other.pop_front().expect("empty VecDeque after split");
+ self.buffers
+ .push_back(front.subslice(0, rem).map_err(Error::VolatileMemoryError)?);
+ other.push_front(front.offset(rem).map_err(Error::VolatileMemoryError)?);
+ }
+
+ Ok(DescriptorChainConsumer {
+ buffers: other,
+ bytes_consumed: 0,
+ })
+ } else if rem == 0 {
+ Ok(DescriptorChainConsumer {
+ buffers: VecDeque::new(),
+ bytes_consumed: 0,
+ })
+ } else {
+ Err(Error::SplitOutOfBounds(offset))
+ }
+ }
+}
+
+/// Provides high-level interface over the sequence of memory regions
+/// defined by readable descriptors in the descriptor chain.
+///
+/// Note that virtio spec requires driver to place any device-writable
+/// descriptors after any device-readable descriptors (2.6.4.2 in Virtio Spec v1.1).
+/// Reader will skip iterating over descriptor chain when first writable
+/// descriptor is encountered.
+#[derive(Clone)]
+pub struct Reader<'a, B = ()> {
+ buffer: DescriptorChainConsumer<'a, B>,
+}
+
+impl<'a, B: BitmapSlice> Reader<'a, B> {
+ /// Construct a new Reader wrapper over `desc_chain`.
+ pub fn new<M, T>(mem: &'a M, desc_chain: DescriptorChain<T>) -> Result<Reader<'a, B>>
+ where
+ M: GuestMemory,
+ <<M as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
+ T: Deref,
+ T::Target: GuestMemory + Sized,
+ {
+ let mut total_len: usize = 0;
+ let buffers = desc_chain
+ .readable()
+ .map(|desc| {
+ // Verify that summing the descriptor sizes does not overflow.
+ // This can happen if a driver tricks a device into reading more data than
+ // fits in a `usize`.
+ total_len = total_len
+ .checked_add(desc.len() as usize)
+ .ok_or(Error::DescriptorChainOverflow)?;
+
+ let region = mem
+ .find_region(desc.addr())
+ .ok_or(Error::FindMemoryRegion)?;
+ let offset = desc
+ .addr()
+ .checked_sub(region.start_addr().raw_value())
+ .unwrap();
+ region
+ .get_slice(MemoryRegionAddress(offset.raw_value()), desc.len() as usize)
+ .map_err(Error::GuestMemoryError)
+ })
+ .collect::<Result<VecDeque<VolatileSlice<'a, B>>>>()?;
+ Ok(Reader {
+ buffer: DescriptorChainConsumer {
+ buffers,
+ bytes_consumed: 0,
+ },
+ })
+ }
+
+ /// Reads an object from the descriptor chain buffer.
+ pub fn read_obj<T: ByteValued>(&mut self) -> io::Result<T> {
+ let mut obj = MaybeUninit::<T>::uninit();
+
+ // SAFETY: `MaybeUninit` guarantees that the pointer is valid for
+ // `size_of::<T>()` bytes.
+ let buf = unsafe {
+ ::std::slice::from_raw_parts_mut(obj.as_mut_ptr() as *mut u8, size_of::<T>())
+ };
+
+ self.read_exact(buf)?;
+
+ // SAFETY: any type that implements `ByteValued` can be considered initialized
+ // even if it is filled with random data.
+ Ok(unsafe { obj.assume_init() })
+ }
+
+ /// Returns number of bytes available for reading. May return an error if the combined
+ /// lengths of all the buffers in the DescriptorChain would cause an integer overflow.
+ pub fn available_bytes(&self) -> usize {
+ self.buffer.available_bytes()
+ }
+
+ /// Returns number of bytes already read from the descriptor chain buffer.
+ pub fn bytes_read(&self) -> usize {
+ self.buffer.bytes_consumed()
+ }
+
+ /// Splits this `Reader` into two at the given offset in the `DescriptorChain` buffer.
+ /// After the split, `self` will be able to read up to `offset` bytes while the returned
+ /// `Reader` can read up to `available_bytes() - offset` bytes. Returns an error if
+ /// `offset > self.available_bytes()`.
+ pub fn split_at(&mut self, offset: usize) -> Result<Reader<'a, B>> {
+ self.buffer.split_at(offset).map(|buffer| Reader { buffer })
+ }
+}
+
+impl<'a, B: BitmapSlice> io::Read for Reader<'a, B> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.buffer.consume(buf.len(), |bufs| {
+ let mut rem = buf;
+ let mut total = 0;
+ for vs in bufs {
+ let copy_len = cmp::min(rem.len(), vs.len());
+
+ // SAFETY: Safe because we verify that we do not read outside
+ // of the slice's bound. The slice guard will only get dropped
+ // after the function returns. This will keep the pointer valid
+ // while reads are happening.
+ unsafe {
+ copy_nonoverlapping(vs.ptr_guard().as_ptr(), rem.as_mut_ptr(), copy_len);
+ }
+ rem = &mut rem[copy_len..];
+ total += copy_len;
+ }
+ Ok(total)
+ })
+ }
+}
+
+/// Provides high-level interface over the sequence of memory regions
+/// defined by writable descriptors in the descriptor chain.
+///
+/// Note that virtio spec requires driver to place any device-writable
+/// descriptors after any device-readable descriptors (2.6.4.2 in Virtio Spec v1.1).
+/// Writer will start iterating the descriptors from the first writable one and will
+/// assume that all following descriptors are writable.
+#[derive(Clone)]
+pub struct Writer<'a, B = ()> {
+ buffer: DescriptorChainConsumer<'a, B>,
+}
+
+impl<'a, B: BitmapSlice> Writer<'a, B> {
+ /// Construct a new Writer wrapper over `desc_chain`.
+ pub fn new<M, T>(mem: &'a M, desc_chain: DescriptorChain<T>) -> Result<Writer<'a, B>>
+ where
+ M: GuestMemory,
+ <<M as GuestMemory>::R as GuestMemoryRegion>::B: WithBitmapSlice<'a, S = B>,
+ T: Deref,
+ T::Target: GuestMemory + Sized,
+ {
+ let mut total_len: usize = 0;
+ let buffers = desc_chain
+ .writable()
+ .map(|desc| {
+ // Verify that summing the descriptor sizes does not overflow.
+ // This can happen if a driver tricks a device into writing more data than
+ // fits in a `usize`.
+ total_len = total_len
+ .checked_add(desc.len() as usize)
+ .ok_or(Error::DescriptorChainOverflow)?;
+
+ let region = mem
+ .find_region(desc.addr())
+ .ok_or(Error::FindMemoryRegion)?;
+ let offset = desc
+ .addr()
+ .checked_sub(region.start_addr().raw_value())
+ .unwrap();
+ region
+ .get_slice(MemoryRegionAddress(offset.raw_value()), desc.len() as usize)
+ .map_err(Error::GuestMemoryError)
+ })
+ .collect::<Result<VecDeque<VolatileSlice<'a, B>>>>()?;
+
+ Ok(Writer {
+ buffer: DescriptorChainConsumer {
+ buffers,
+ bytes_consumed: 0,
+ },
+ })
+ }
+
+ /// Writes an object to the descriptor chain buffer.
+ pub fn write_obj<T: ByteValued>(&mut self, val: T) -> io::Result<()> {
+ self.write_all(val.as_slice())
+ }
+
+ /// Returns number of bytes available for writing. May return an error if the combined
+ /// lengths of all the buffers in the DescriptorChain would cause an overflow.
+ pub fn available_bytes(&self) -> usize {
+ self.buffer.available_bytes()
+ }
+
+ /// Returns number of bytes already written to the descriptor chain buffer.
+ pub fn bytes_written(&self) -> usize {
+ self.buffer.bytes_consumed()
+ }
+
+ /// Splits this `Writer` into two at the given offset in the `DescriptorChain` buffer.
+ /// After the split, `self` will be able to write up to `offset` bytes while the returned
+ /// `Writer` can write up to `available_bytes() - offset` bytes. Returns an error if
+ /// `offset > self.available_bytes()`.
+ pub fn split_at(&mut self, offset: usize) -> Result<Writer<'a, B>> {
+ self.buffer.split_at(offset).map(|buffer| Writer { buffer })
+ }
+}
+
+impl<'a, B: BitmapSlice> io::Write for Writer<'a, B> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.buffer.consume(buf.len(), |bufs| {
+ let mut rem = buf;
+ let mut total = 0;
+ for vs in bufs {
+ let copy_len = cmp::min(rem.len(), vs.len());
+
+ // SAFETY: Safe because we ensure that we do not write over the
+ // slice's bounds. The slice guard will only get dropped after
+ // the function returns. This will keep the pointer valid while
+ // writes are happening.
+ unsafe {
+ copy_nonoverlapping(rem.as_ptr(), vs.ptr_guard_mut().as_ptr(), copy_len);
+ }
+ vs.bitmap().mark_dirty(0, copy_len);
+ rem = &rem[copy_len..];
+ total += copy_len;
+ }
+ Ok(total)
+ })
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ // Nothing to flush since the writes go straight into the buffer.
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{Descriptor, Queue, QueueOwnedT, QueueT};
+ use vm_memory::{GuestAddress, GuestMemoryMmap, Le32};
+
+ use crate::mock::MockSplitQueue;
+ use virtio_bindings::bindings::virtio_ring::{VRING_DESC_F_NEXT, VRING_DESC_F_WRITE};
+
+ const MAX_QUEUE_SIZE: u16 = 16;
+
+ #[derive(Copy, Clone, PartialEq, Eq)]
+ pub enum DescriptorType {
+ Readable,
+ Writable,
+ }
+
+ /// Test utility function to create a descriptor chain in guest memory.
+ pub fn create_descriptor_chain(
+ memory: &GuestMemoryMmap,
+ descriptor_array_addr: GuestAddress,
+ descriptors: Vec<(DescriptorType, u32)>,
+ spaces_between_regions: u32,
+ ) -> Result<DescriptorChain<&GuestMemoryMmap>> {
+ let descriptors_len = descriptors.len();
+ let mut descs = vec![];
+
+ let queue = MockSplitQueue::create(memory, descriptor_array_addr, MAX_QUEUE_SIZE);
+
+ let mut buffers_start_addr = queue.end();
+
+ for (index, (type_, size)) in descriptors.into_iter().enumerate() {
+ let mut flags = 0;
+ if let DescriptorType::Writable = type_ {
+ flags |= VRING_DESC_F_WRITE;
+ }
+ if index + 1 < descriptors_len {
+ flags |= VRING_DESC_F_NEXT;
+ }
+
+ descs.push(Descriptor::new(
+ buffers_start_addr.raw_value(),
+ size,
+ flags as u16,
+ (index + 1) as u16,
+ ));
+
+ let offset = size + spaces_between_regions;
+ buffers_start_addr = buffers_start_addr
+ .checked_add(u64::from(offset))
+ .ok_or(Error::InvalidChain)?;
+ }
+
+ queue.build_desc_chain(&descs).unwrap();
+
+ let avail_ring = queue.avail_addr();
+
+ let mut queue: Queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
+ queue
+ .try_set_desc_table_address(descriptor_array_addr)
+ .unwrap();
+ queue.try_set_avail_ring_address(avail_ring).unwrap();
+ queue.set_ready(true);
+
+ let chain = queue.iter(memory).unwrap().next().unwrap();
+
+ Ok(chain.clone())
+ }
+
+ #[test]
+ fn reader_test_inv_desc_addr() {
+ let memory: GuestMemoryMmap =
+ GuestMemoryMmap::from_ranges(&[(GuestAddress(0x0), 0x1000)]).unwrap();
+
+ let queue = MockSplitQueue::create(&memory, GuestAddress(0x0), MAX_QUEUE_SIZE);
+
+ // set addr out of memory
+ let descriptor = Descriptor::new(0x1001, 1, 0, 1_u16);
+ queue.build_desc_chain(&[descriptor]).unwrap();
+
+ let avail_ring = queue.avail_addr();
+
+ let mut queue: Queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
+ queue.try_set_desc_table_address(GuestAddress(0x0)).unwrap();
+ queue.try_set_avail_ring_address(avail_ring).unwrap();
+ queue.set_ready(true);
+
+ let chain = queue.iter(&memory).unwrap().next().unwrap();
+
+ assert!(Reader::new(&memory, chain).is_err());
+ }
+
+ #[test]
+ fn reader_test_simple_chain() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 8),
+ (Readable, 16),
+ (Readable, 18),
+ (Readable, 64),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+ assert_eq!(reader.available_bytes(), 106);
+ assert_eq!(reader.bytes_read(), 0);
+
+ let mut buffer = [0_u8; 64];
+ if let Err(e) = reader.read_exact(&mut buffer) {
+ panic!("read_exact should not fail here: {:?}", e);
+ }
+
+ assert_eq!(reader.available_bytes(), 42);
+ assert_eq!(reader.bytes_read(), 64);
+
+ match reader.read(&mut buffer) {
+ Err(e) => panic!("read should not fail here: {:?}", e),
+ Ok(length) => assert_eq!(length, 42),
+ }
+
+ assert_eq!(reader.available_bytes(), 0);
+ assert_eq!(reader.bytes_read(), 106);
+ }
+
+ #[test]
+ fn writer_test_simple_chain() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Writable, 8),
+ (Writable, 16),
+ (Writable, 18),
+ (Writable, 64),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut writer = Writer::new(&memory, chain).expect("failed to create Writer");
+ assert_eq!(writer.available_bytes(), 106);
+ assert_eq!(writer.bytes_written(), 0);
+
+ let buffer = [0_u8; 64];
+ if let Err(e) = writer.write_all(&buffer) {
+ panic!("write_all should not fail here: {:?}", e);
+ }
+
+ assert_eq!(writer.available_bytes(), 42);
+ assert_eq!(writer.bytes_written(), 64);
+
+ match writer.write(&buffer) {
+ Err(e) => panic!("write should not fail here {:?}", e),
+ Ok(length) => assert_eq!(length, 42),
+ }
+
+ assert_eq!(writer.available_bytes(), 0);
+ assert_eq!(writer.bytes_written(), 106);
+ }
+
+ #[test]
+ fn reader_test_incompatible_chain() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(&memory, GuestAddress(0x0), vec![(Writable, 8)], 0)
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+ assert_eq!(reader.available_bytes(), 0);
+ assert_eq!(reader.bytes_read(), 0);
+
+ assert!(reader.read_obj::<u8>().is_err());
+
+ assert_eq!(reader.available_bytes(), 0);
+ assert_eq!(reader.bytes_read(), 0);
+ }
+
+ #[test]
+ fn writer_test_incompatible_chain() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(&memory, GuestAddress(0x0), vec![(Readable, 8)], 0)
+ .expect("create_descriptor_chain failed");
+ let mut writer = Writer::new(&memory, chain).expect("failed to create Writer");
+ assert_eq!(writer.available_bytes(), 0);
+ assert_eq!(writer.bytes_written(), 0);
+
+ assert!(writer.write_obj(0u8).is_err());
+
+ assert_eq!(writer.available_bytes(), 0);
+ assert_eq!(writer.bytes_written(), 0);
+ }
+
+ #[test]
+ fn reader_writer_shared_chain() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain.clone()).expect("failed to create Reader");
+ let mut writer = Writer::new(&memory, chain).expect("failed to create Writer");
+
+ assert_eq!(reader.bytes_read(), 0);
+ assert_eq!(writer.bytes_written(), 0);
+
+ let mut buffer = Vec::with_capacity(200);
+
+ assert_eq!(
+ reader
+ .read_to_end(&mut buffer)
+ .expect("read should not fail here"),
+ 128
+ );
+
+ // The writable descriptors are only 68 bytes long.
+ writer
+ .write_all(&buffer[..68])
+ .expect("write should not fail here");
+
+ assert_eq!(reader.available_bytes(), 0);
+ assert_eq!(reader.bytes_read(), 128);
+ assert_eq!(writer.available_bytes(), 0);
+ assert_eq!(writer.bytes_written(), 68);
+ }
+
+ #[test]
+ fn reader_writer_shattered_object() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let secret: Le32 = 0x1234_5678.into();
+
+ // Create a descriptor chain with memory regions that are properly separated.
+ let chain_writer = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![(Writable, 1), (Writable, 1), (Writable, 1), (Writable, 1)],
+ 123,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut writer = Writer::new(&memory, chain_writer).expect("failed to create Writer");
+ if let Err(e) = writer.write_obj(secret) {
+ panic!("write_obj should not fail here: {:?}", e);
+ }
+
+ // Now create new descriptor chain pointing to the same memory and try to read it.
+ let chain_reader = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![(Readable, 1), (Readable, 1), (Readable, 1), (Readable, 1)],
+ 123,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain_reader).expect("failed to create Reader");
+ match reader.read_obj::<Le32>() {
+ Err(e) => panic!("read_obj should not fail here: {:?}", e),
+ Ok(read_secret) => assert_eq!(read_secret, secret),
+ }
+ }
+
+ #[test]
+ fn reader_unexpected_eof() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![(Readable, 256), (Readable, 256)],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ let mut buf = vec![0; 1024];
+
+ assert_eq!(
+ reader
+ .read_exact(&mut buf[..])
+ .expect_err("read more bytes than available")
+ .kind(),
+ io::ErrorKind::UnexpectedEof
+ );
+ }
+
+ #[test]
+ fn split_border() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain.clone()).expect("failed to create Reader");
+
+ let other = reader.split_at(32).expect("failed to split Reader");
+ assert_eq!(reader.available_bytes(), 32);
+ assert_eq!(other.available_bytes(), 96);
+
+ let mut writer = Writer::new(&memory, chain.clone()).expect("failed to create Writer");
+ let other = writer.split_at(64).expect("failed to split Writer");
+ assert_eq!(writer.available_bytes(), 64);
+ assert_eq!(other.available_bytes(), 4);
+ }
+
+ #[test]
+ fn split_middle() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ let other = reader.split_at(24).expect("failed to split Reader");
+ assert_eq!(reader.available_bytes(), 24);
+ assert_eq!(other.available_bytes(), 104);
+ }
+
+ #[test]
+ fn split_end() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ let other = reader.split_at(128).expect("failed to split Reader");
+ assert_eq!(reader.available_bytes(), 128);
+ assert_eq!(other.available_bytes(), 0);
+ }
+
+ #[test]
+ fn split_beginning() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ let other = reader.split_at(0).expect("failed to split Reader");
+ assert_eq!(reader.available_bytes(), 0);
+ assert_eq!(other.available_bytes(), 128);
+ }
+
+ #[test]
+ fn split_outofbounds() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![
+ (Readable, 16),
+ (Readable, 16),
+ (Readable, 96),
+ (Writable, 64),
+ (Writable, 1),
+ (Writable, 3),
+ ],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ if reader.split_at(256).is_ok() {
+ panic!("successfully split Reader with out of bounds offset");
+ }
+ }
+
+ #[test]
+ fn read_full() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![(Readable, 16), (Readable, 16), (Readable, 16)],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut reader = Reader::new(&memory, chain).expect("failed to create Reader");
+
+ let mut buf = [0u8; 64];
+ assert_eq!(
+ reader.read(&mut buf[..]).expect("failed to read to buffer"),
+ 48
+ );
+ }
+
+ #[test]
+ fn write_full() {
+ use DescriptorType::*;
+
+ let memory_start_addr = GuestAddress(0x0);
+ let memory = GuestMemoryMmap::from_ranges(&[(memory_start_addr, 0x10000)]).unwrap();
+
+ let chain = create_descriptor_chain(
+ &memory,
+ GuestAddress(0x0),
+ vec![(Writable, 16), (Writable, 16), (Writable, 16)],
+ 0,
+ )
+ .expect("create_descriptor_chain failed");
+ let mut writer = Writer::new(&memory, chain).expect("failed to create Writer");
+
+ let buf = [0xdeu8; 64];
+ assert_eq!(
+ writer.write(&buf[..]).expect("failed to write from buffer"),
+ 48
+ );
+
+ assert!(writer.flush().is_ok());
+ }
+}
diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs
index 0e27935..df3d189 100644
--- a/crates/virtio-queue/src/lib.rs
+++ b/crates/virtio-queue/src/lib.rs
@@ -20,10 +20,11 @@
use std::sync::atomic::Ordering;
use log::error;
-use vm_memory::{GuestMemory, GuestMemoryError};
+use vm_memory::{GuestMemory, GuestMemoryError, VolatileMemoryError};
pub use self::chain::{DescriptorChain, DescriptorChainRwIter};
pub use self::descriptor::{Descriptor, VirtqUsedElem};
+pub use self::descriptor_utils::{Reader, Writer};
pub use self::queue::{AvailIter, Queue};
pub use self::queue_sync::QueueSync;
pub use self::state::QueueState;
@@ -34,6 +35,7 @@
mod chain;
mod descriptor;
+mod descriptor_utils;
mod queue;
mod queue_sync;
mod state;
@@ -67,6 +69,16 @@
InvalidAvailRingIndex,
/// The queue is not ready for operation.
QueueNotReady,
+ /// Volatile memory error.
+ VolatileMemoryError(VolatileMemoryError),
+ /// The combined length of all the buffers in a `DescriptorChain` would overflow.
+ DescriptorChainOverflow,
+ /// No memory region for this address range.
+ FindMemoryRegion,
+ /// Descriptor guest memory error.
+ GuestMemoryError(GuestMemoryError),
+ /// DescriptorChain split is out of bounds.
+ SplitOutOfBounds(usize),
}
impl Display for Error {
@@ -98,6 +110,14 @@
"invalid available ring index (more descriptors to process than queue size)"
),
QueueNotReady => write!(f, "trying to process requests on a queue that's not ready"),
+ VolatileMemoryError(e) => write!(f, "volatile memory error: {e}"),
+ DescriptorChainOverflow => write!(
+ f,
+ "the combined length of all the buffers in a `DescriptorChain` would overflow"
+ ),
+ FindMemoryRegion => write!(f, "no memory region for this address range"),
+ GuestMemoryError(e) => write!(f, "descriptor guest memory error: {e}"),
+ SplitOutOfBounds(off) => write!(f, "`DescriptorChain` split is out of bounds: {off}"),
}
}
}
diff --git a/crates/virtio-queue/src/queue.rs b/crates/virtio-queue/src/queue.rs
index 4a69b13..ca55090 100644
--- a/crates/virtio-queue/src/queue.rs
+++ b/crates/virtio-queue/src/queue.rs
@@ -484,26 +484,6 @@
.map_err(Error::GuestMemory)
}
- // TODO: Turn this into a doc comment/example.
- // With the current implementation, a common way of consuming entries from the available ring
- // while also leveraging notification suppression is to use a loop, for example:
- //
- // loop {
- // // We have to explicitly disable notifications if `VIRTIO_F_EVENT_IDX` has not been
- // // negotiated.
- // self.disable_notification()?;
- //
- // for chain in self.iter()? {
- // // Do something with each chain ...
- // // Let's assume we process all available chains here.
- // }
- //
- // // If `enable_notification` returns `true`, the driver has added more entries to the
- // // available ring.
- // if !self.enable_notification()? {
- // break;
- // }
- // }
fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> {
self.set_notification(mem, true)?;
// Ensures the following read is not reordered before any previous write operation.
diff --git a/crates/virtio-vsock/Android.bp b/crates/virtio-vsock/Android.bp
index a64df92..d1b08e3 100644
--- a/crates/virtio-vsock/Android.bp
+++ b/crates/virtio-vsock/Android.bp
@@ -25,4 +25,5 @@
"libvirtio_queue",
"libvm_memory_android",
],
+ compile_multilib: "first",
}
diff --git a/crates/virtio-vsock/cargo_embargo.json b/crates/virtio-vsock/cargo_embargo.json
index 7edff07..a03712e 100644
--- a/crates/virtio-vsock/cargo_embargo.json
+++ b/crates/virtio-vsock/cargo_embargo.json
@@ -4,7 +4,8 @@
},
"package": {
"virtio-vsock": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first"
}
},
"run_cargo": false
diff --git a/crates/vm-memory/.cargo-checksum.json b/crates/vm-memory/.cargo-checksum.json
index 9b35a58..ffdac0e 100644
--- a/crates/vm-memory/.cargo-checksum.json
+++ b/crates/vm-memory/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"68b774d7290cc0864d789ce9e1b8f85048a9a95beed248847172167a67cdb935","CODEOWNERS":"3969d4fa52acf29819098633aedfb2d46ccdb5b721357cecd7e7fdd884fa4b1b","Cargo.toml":"d3281fc40ad8c5892c1fcafd6b4aa9b59bde07fdc97874871999cf71b685e511","DESIGN.md":"392add56a05e8bf9ab7e527c52640054bd6a8cdb0780d00331d01617227d0b80","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"4db218bca015391e49025ada0362b9c4b296fb053df362a7533dc98da02439a0","TODO.md":"c844f03be6631843e90d13b3410df031b07ee16db4a3c7cbda7e89557e9be46b","benches/guest_memory.rs":"ea80023f341dac6ff5222f5e5c5f60b4ba0a56f149e9d8039250d088153f87d6","benches/main.rs":"1d29d29baf2f9dc2e00af68102eb868451acb6692b836c2f6ce92e58cff46a5f","benches/mmap/mod.rs":"d6eea32eee47b001060464de45348266c87430eeba60c77c41f62d4b68b54728","benches/volatile.rs":"cd3f8d8413498a570884b02587555efd94ed7caaac1a463e2f7013cbb11b9541","coverage_config_aarch64.json":"ca288852cecd24c77ab3a790f35eb621f3c7b9e9e18014f71776a37530ca0b8d","coverage_config_x86_64.json":"576a6fed2a3675181987087a2f12ca4727f58b50b25aa5bce373d2daf96c35a3","src/address.rs":"dee1e39710d17fdc30a7f562f2fe066a98d7edd88348911e65f6a6a98d1ccabe","src/atomic.rs":"592b73ed462aa2c9ad785c85a75b2b66bba44e88365a1006abab02063f79201d","src/atomic_integer.rs":"42f5262e2a1cef6683a0e62fdfb5f2f3873ef4284a6c0f8882dd34b0f076678d","src/bitmap/backend/atomic_bitmap.rs":"8c5846180c8e6fd209a7a60b79b628ddccf0af9dfb3da2106b59b16e782daff8","src/bitmap/backend/atomic_bitmap_arc.rs":"de4ff032f63f62b80e5de5657e435431ed8433c2f4f307ab040dc5b11884ee9e","src/bitmap/backend/mod.rs":"efdd3652c4050f52b3a9746520d67e23a57051dd57ed79118bc0e5cdbe1d3fb9","src/bitmap/backend/slice.rs":"4d20c7df4811e337216c720281181200516119d455c94132915c1e72aa7ea747","src/bitmap/mod.rs":"1926c28c659f3a2e2501f242e91d41a0e9a3c785fa989cc497eb431fbd4689fa","src/bytes.rs":"23e2f8d57ccdca8319beee0deb2cff0d43ed4fc171ffc074b0aec87f72271c40","src/endian.rs":"d954021a4640d8d2c35759466a910ac4d0e477d6cdda7c01d09f8c0fd1d51e8f","src/guest_memory.rs":"fd280efb0d0beaf7f93ca4ebd8463d129669f495e21f814674cd8a15445d4115","src/lib.rs":"18fa835266e378e87b9868e41702cbb9191c9f1db231f2d03f86e9fe9d1ada7d","src/mmap.rs":"a1bdb40c5b0bc3a68dba528ea237c17fbc1fd9e25bc623f7a7e978dfe9fb40d6","src/mmap_unix.rs":"7c07b17ff4f6892c295e395d262c1322def876958fa2636558040ebc2a6a2485","src/mmap_windows.rs":"df7dd2061903c294b21b404f0d6cb118fac36f1b64f59a53c0d002a129df86e4","src/mmap_xen.rs":"e8c4ee998dae5bcb1dcd442b35cf75817a0926d52870010b63b10176b804d22e","src/volatile_memory.rs":"f1c03e5e860004112287bacb5f295e6294ce684628a60b572b7ea055dbf0c3dd"},"package":"9dc276f0d00c17b9aeb584da0f1e1c673df0d183cc2539e3636ec8cbc5eae99b"}
\ No newline at end of file
+{"files":{"CHANGELOG.md":"16b2f893e68cb4bf42feb73adc34c85630cf2e6958f28f22d846ae60491710fb","CODEOWNERS":"3969d4fa52acf29819098633aedfb2d46ccdb5b721357cecd7e7fdd884fa4b1b","Cargo.toml":"3e3bf143dc31afd2f8723dfd5779030d827bba7bc34f97a9c5dae39621d0d403","DESIGN.md":"35e9671628e330fbd62a0f00b8726cb09c82bda4779834a1b762ed9036f9330f","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"f9fd1d519777a1a052aaa11e54a79c0dc1781379d2406f800c10ed04283a20cd","TODO.md":"275c82047e4a1a1f8db0787839645220f11ca376776cb979fdb0ecb0a33fe781","benches/guest_memory.rs":"ea80023f341dac6ff5222f5e5c5f60b4ba0a56f149e9d8039250d088153f87d6","benches/main.rs":"1d29d29baf2f9dc2e00af68102eb868451acb6692b836c2f6ce92e58cff46a5f","benches/mmap/mod.rs":"39a71e023286ee63a5553325ea2815cf3e1390daa363708b82baaecacb468173","benches/volatile.rs":"cd3f8d8413498a570884b02587555efd94ed7caaac1a463e2f7013cbb11b9541","coverage_config_aarch64.json":"ca288852cecd24c77ab3a790f35eb621f3c7b9e9e18014f71776a37530ca0b8d","coverage_config_x86_64.json":"f4e604310360cbd48fd22f7ca1c63d53595f7fed44f3c1ccf4d112f5b5d2e52d","src/address.rs":"e73a5650d3bbbaa70b2d3fc90ef2fc226108750bcb5f119b6ab7629bab635946","src/atomic.rs":"ab89e700b3c3df62c4c4222c54e7d402d4a4fd6741988202233fabc5c594ad40","src/atomic_integer.rs":"470c14fc9c9f8a2425d4d1c78c638a41066564288f9c9fae8f83056e03589b86","src/bitmap/backend/atomic_bitmap.rs":"5ce709ff8b37d8d52201d6c5cb67e5a5a28a71c1011225ced4ddf42f8f3f2684","src/bitmap/backend/atomic_bitmap_arc.rs":"62dc5f1c6038b2a3c93566d55a3eb8e08941c033d5e3942f677097ae4f2b375c","src/bitmap/backend/mod.rs":"46918db372ee74f6fa510cc689c8d394e1821d568d388500259a2a16417b7ddc","src/bitmap/backend/slice.rs":"96d9f08fc7b425dd25fe83c048f73c89afb56776b2922f835c8ec22f83bf1ff3","src/bitmap/mod.rs":"a16b8c92da33154279853aa023f4cfc16a1af8da16a75f02f99cb14e691944be","src/bytes.rs":"22e8c7eb1a70da4ff90b0cc4bab5c55e6239dfacc33df244c47e0cbc0b2114bb","src/endian.rs":"d954021a4640d8d2c35759466a910ac4d0e477d6cdda7c01d09f8c0fd1d51e8f","src/guest_memory.rs":"bfeeb5c0796c96243d18cfe2a762c3856807b6410f0c141c3938e29004891b3d","src/io.rs":"2a023c58c6e4febc5c009e4e74273f43273494893c44bb87c8d5e0ecf8214810","src/lib.rs":"e43516bb2e1b1c4c70d45c325952c6c636bb3ea3175154d923908b97cfc1be48","src/mmap.rs":"29db8a95bcc95d1042fe1b63ac60d564855aadaacdc99d661f3ef521374be104","src/mmap_unix.rs":"f6aa6869492c476936124f926afa6a0d3a30c747e7f14343a69f99c8cee952e3","src/mmap_windows.rs":"df7dd2061903c294b21b404f0d6cb118fac36f1b64f59a53c0d002a129df86e4","src/mmap_xen.rs":"a2ea095dd282e66ef04fce226cea8d0803f690a252ef4db19b2851d2d1ab593d","src/volatile_memory.rs":"13d74fecf1e8c0f8c0009117c51bce6d6ebe9ec1bcc1f930ee6863e17b969d2e"},"package":"f1720e7240cdc739f935456eb77f370d7e9b2a3909204da1e2b47bef1137a013"}
\ No newline at end of file
diff --git a/crates/vm-memory/Android.bp b/crates/vm-memory/Android.bp
index 030e264..3f81445 100644
--- a/crates/vm-memory/Android.bp
+++ b/crates/vm-memory/Android.bp
@@ -17,7 +17,7 @@
name: "libvm_memory_android",
crate_name: "vm_memory",
cargo_env_compat: true,
- cargo_pkg_version: "0.12.2",
+ cargo_pkg_version: "0.16.1",
crate_root: "src/lib.rs",
edition: "2021",
features: [
@@ -32,4 +32,13 @@
"liblibc",
"libthiserror",
],
+ compile_multilib: "first",
+ arch: {
+ arm64: {
+ enabled: true,
+ },
+ x86_64: {
+ enabled: true,
+ },
+ },
}
diff --git a/crates/vm-memory/CHANGELOG.md b/crates/vm-memory/CHANGELOG.md
index a70cb03..2ef77e8 100644
--- a/crates/vm-memory/CHANGELOG.md
+++ b/crates/vm-memory/CHANGELOG.md
@@ -1,153 +1,241 @@
# Changelog
-## [v0.12.2]
+## Upcoming version
+
+## \[v0.16.1\]
+
+### Added
+
+- \[[#304](https://github.com/rust-vmm/vm-memory/pull/304)\] Implement ReadVolatile and WriteVolatile for TcpStream
+
+## \[v0.16.0\]
+
+### Added
+
+- \[[#287](https://github.com/rust-vmm/vm-memory/pull/287)\] Support for RISC-V 64-bit platform.
+- \[[#299](https://github.com/rust-vmm/vm-memory/pull/299)\] atomic_bitmap: support enlarging the bitmap.
+
+### Changed
+
+- \[[#278](https://github.com/rust-vmm/vm-memory/pull/278) Remove `GuestMemoryIterator` trait,
+ and instead have GuestMemory::iter() return `impl Iterator`.
+
+## \[v0.15.0\]
+
+### Added
+
+- \[[#270](https://github.com/rust-vmm/vm-memory/pull/270)\] atomic_bitmap: add capability to reset bits range
+- \[[#285](https://github.com/rust-vmm/vm-memory/pull/285)\] Annotated modules in lib.rs to indicate their feature
+ dependencies such that it is reflected in the docs, enhancing documentation clarity for users.
+
+### Changed
+
+- \[[#275](https://github.com/rust-vmm/vm-memory/pull/275)\] Fail builds on non 64-bit platforms.
### Fixed
-- [[#251]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks
+
+- \[[#279](https://github.com/rust-vmm/vm-memory/pull/279)\] Remove restriction from `read_volatile_from` and `write_volatile_into`
+ that made it copy data it chunks of 4096.
+
+### Removed
+
+### Deprecated
+
+## \[v0.14.0\]
+
+### Added
+
+- \[[#266](https://github.com/rust-vmm/vm-memory/pull/266)\] Derive `Debug` for several
+ types that were missing it.
+
+### Changed
+
+- \[[#274](https://github.com/rust-vmm/vm-memory/pull/274)\] Drop `Default` as requirement for `ByteValued`.
+
+## \[v0.13.1\]
+
+### Added
+
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `std::io::Stdout`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `std::vec::Vec`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `WriteVolatile`
+ for `Cursor<&mut [u8]>`.
+- \[[#256](https://github.com/rust-vmm/vm-memory/pull/256)\] Implement `ReadVolatile`
+ for `Cursor<T: AsRef[u8]>`.
+
+## \[v0.13.0\]
+
+### Added
+
+- [\[#247\]](https://github.com/rust-vmm/vm-memory/pull/247) Add `ReadVolatile` and
+ `WriteVolatile` traits which are equivalents of `Read`/`Write` with volatile
+ access semantics.
+
+### Changed
+
+- [\[#247\]](https://github.com/rust-vmm/vm-memory/pull/247) Deprecate
+ `Bytes::{read_from, read_exact_from, write_to, write_all_to}`. Instead use
+ `ReadVolatile`/`WriteVolatile`, which do not incur the performance penalty
+ of copying to hypervisor memory due to `Read`/`Write` being incompatible
+ with volatile semantics (see also #217).
+
+## \[v0.12.2\]
+
+### Fixed
+
+- [\[#251\]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks
that verify that the value returned by `VolatileMemory::get_slice` is of
the correct length.
### Deprecated
-- [[#244]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile
+
+- [\[#244\]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile
memory's `as_ptr()` interfaces. The new interfaces to be used instead are:
`ptr_guard()` and `ptr_guard_mut()`.
-## [v0.12.1]
+## \[v0.12.1\]
### Fixed
-- [[#241]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop
+
+- [\[#241\]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop
the FileOffset while in use #245
-## [v0.12.0]
+## \[v0.12.0\]
### Added
-- [[#241]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory
+
+- [\[#241\]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory
mapping support: Foreign and Grant. Add new API for accessing pointers to
volatile slices, as `as_ptr()` can't be used with Xen's Grant mapping.
-- [[#237]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`.
+- [\[#237\]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`.
-## [v0.11.0]
+## \[v0.11.0\]
### Added
-- [[#216]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`.
+
+- [\[#216\]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`.
### Fixed
-- [[#217]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally
- taking rust-style slices to guest memory in ways that could potentially cause
+
+- [\[#217\]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally
+ taking rust-style slices to guest memory in ways that could potentially cause
undefined behavior. Removes/deprecates various `as_slice`/`as_slice_mut` methods
- whose usage violated rust's aliasing rules, as well as an unsound
+ whose usage violated rust's aliasing rules, as well as an unsound
`impl<'a> VolatileMemory for &'a mut [u8]`.
-## [v0.10.0]
+## \[v0.10.0\]
### Changed
-- [[#208]](https://github.com/rust-vmm/vm-memory/issues/208) Updated
+
+- [\[#208\]](https://github.com/rust-vmm/vm-memory/issues/208) Updated
vmm-sys-util dependency to v0.11.0
-- [[#203]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust
+- [\[#203\]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust
edition 2021.
-## [v0.9.0]
+## \[v0.9.0\]
### Fixed
-- [[#195]](https://github.com/rust-vmm/vm-memory/issues/195):
+- [\[#195\]](https://github.com/rust-vmm/vm-memory/issues/195):
`mmap::check_file_offset` is doing the correct size validation for block and
char devices as well.
### Changed
-- [[#198]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64
+- [\[#198\]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64
bit atomics on ppc64le and s390x.
-- [[#200]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all
+- [\[#200\]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all
features in `docs.rs`.
-- [[#199]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way
+- [\[#199\]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way
the dependencies are pulled such that we don't end up with incompatible
versions.
-## [v0.8.0]
+## \[v0.8.0\]
### Fixed
-- [[#190]](https://github.com/rust-vmm/vm-memory/pull/190):
+- [\[#190\]](https://github.com/rust-vmm/vm-memory/pull/190):
`VolatileSlice::read/write` when input slice is empty.
-## [v0.7.0]
+## \[v0.7.0\]
### Changed
-- [[#176]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait
+- [\[#176\]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait
bounds of `Bytes` auto impl for `T: GuestMemory`
-- [[#178]](https://github.com/rust-vmm/vm-memory/pull/178):
+- [\[#178\]](https://github.com/rust-vmm/vm-memory/pull/178):
`MmapRegion::build_raw` no longer requires that the length of the region is a
multiple of the page size.
-## [v0.6.0]
+## \[v0.6.0\]
### Added
- - [[#160]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap
- backend implementations.
- - [[#149]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion.
- - [[#140]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions.
+- [\[#160\]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap
+ backend implementations.
+- [\[#149\]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion.
+- [\[#140\]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions.
-### Deprecated
+### Deprecated
- - [[#133]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`,
- `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`.
+- [\[#133\]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`,
+ `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`.
-## [v0.5.0]
+## \[v0.5.0\]
### Added
-- [[#8]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator
-- [[#120]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion
-- [[#126]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at()
-- [[#128]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice()
+- [\[#8\]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator
+- [\[#120\]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion
+- [\[#126\]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at()
+- [\[#128\]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice()
-## [v0.4.0]
+## \[v0.4.0\]
### Fixed
-- [[#100]](https://github.com/rust-vmm/vm-memory/issues/100): Performance
+- [\[#100\]](https://github.com/rust-vmm/vm-memory/issues/100): Performance
degradation after fixing [#95](https://github.com/rust-vmm/vm-memory/pull/95).
-- [[#122]](https://github.com/rust-vmm/vm-memory/pull/122): atomic,
+- [\[#122\]](https://github.com/rust-vmm/vm-memory/pull/122): atomic,
Cargo.toml: Update for arc-swap 1.0.0.
-## [v0.3.0]
+## \[v0.3.0\]
### Added
-- [[#109]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to
+- [\[#109\]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to
`MmapRegion` which can be used to operate on externally created mappings.
-- [[#101]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for
+- [\[#101\]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for
GuestMemory which could be used to validate a range of guest memory.
-- [[#115]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic
+- [\[#115\]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic
access to `Bytes`.
### Fixed
-- [[#93]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using
+- [\[#93\]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using
virtio with rust-vmm/vm-memory.
-- [[#106]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger
- on zero-length access.
+- [\[#106\]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger
+ on zero-length access.
### Removed
- `integer-atomics` is no longer a distinct feature of the crate.
-## [v0.2.0]
+## \[v0.2.0\]
### Added
-- [[#76]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and
+- [\[#76\]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and
`as_volatile_slice` to `GuestMemoryRegion`.
-- [[#82]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound
+- [\[#82\]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound
for `GuestAddressSpace::T`, the return value of `GuestAddressSpace::memory()`.
-- [[#88]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for
+- [\[#88\]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for
`ByteValued` which can be used for reading into POD structures from
raw bytes.
-## [v0.1.0]
+## \[v0.1.0\]
### Added
diff --git a/crates/vm-memory/Cargo.lock b/crates/vm-memory/Cargo.lock
new file mode 100644
index 0000000..d424dcc
--- /dev/null
+++ b/crates/vm-memory/Cargo.lock
@@ -0,0 +1,688 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
+[[package]]
+name = "anstyle"
+version = "1.0.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
+
+[[package]]
+name = "arc-swap"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
+
+[[package]]
+name = "autocfg"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitflags"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+
+[[package]]
+name = "bumpalo"
+version = "3.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "ciborium"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
+dependencies = [
+ "ciborium-io",
+ "half",
+]
+
+[[package]]
+name = "clap"
+version = "4.5.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f"
+dependencies = [
+ "clap_builder",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.5.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec"
+dependencies = [
+ "anstyle",
+ "clap_lex",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7"
+
+[[package]]
+name = "criterion"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
+dependencies = [
+ "anes",
+ "cast",
+ "ciborium",
+ "clap",
+ "criterion-plot",
+ "is-terminal",
+ "itertools",
+ "num-traits",
+ "once_cell",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
+dependencies = [
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
+dependencies = [
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80"
+
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+[[package]]
+name = "either"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+
+[[package]]
+name = "half"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
+dependencies = [
+ "cfg-if",
+ "crunchy",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
+
+[[package]]
+name = "is-terminal"
+version = "0.4.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "windows-sys 0.52.0",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2"
+
+[[package]]
+name = "js-sys"
+version = "0.3.72"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.164"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f"
+
+[[package]]
+name = "log"
+version = "0.4.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
+
+[[package]]
+name = "matches"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
+
+[[package]]
+name = "memchr"
+version = "2.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
+
+[[package]]
+name = "num-traits"
+version = "0.2.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
+
+[[package]]
+name = "oorandom"
+version = "11.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
+
+[[package]]
+name = "plotters"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.92"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
+dependencies = [
+ "crossbeam-deque",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "regex"
+version = "1.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
+
+[[package]]
+name = "ryu"
+version = "1.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.215"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.215"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.133"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
+dependencies = [
+ "itoa",
+ "memchr",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.89"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
+
+[[package]]
+name = "vm-memory"
+version = "0.16.1"
+dependencies = [
+ "arc-swap",
+ "bitflags 2.6.0",
+ "criterion",
+ "libc",
+ "matches",
+ "thiserror",
+ "vmm-sys-util",
+ "winapi",
+]
+
+[[package]]
+name = "vmm-sys-util"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d1435039746e20da4f8d507a72ee1b916f7b4b05af7a91c093d2c6561934ede"
+dependencies = [
+ "bitflags 1.3.2",
+ "libc",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.95"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
+
+[[package]]
+name = "web-sys"
+version = "0.3.72"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
+dependencies = [
+ "windows-sys 0.59.0",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.52.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.59.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_gnullvm",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
+
+[[package]]
+name = "windows_i686_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.52.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/crates/vm-memory/Cargo.toml b/crates/vm-memory/Cargo.toml
index 5081971..56c753e 100644
--- a/crates/vm-memory/Cargo.toml
+++ b/crates/vm-memory/Cargo.toml
@@ -12,8 +12,12 @@
[package]
edition = "2021"
name = "vm-memory"
-version = "0.12.2"
+version = "0.16.1"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
+build = false
+autobins = false
+autoexamples = false
+autotests = false
autobenches = false
description = "Safe abstractions for accessing the VM physical memory"
readme = "README.md"
@@ -24,13 +28,22 @@
[package.metadata.docs.rs]
all-features = true
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+]
[profile.bench]
lto = true
codegen-units = 1
+[lib]
+name = "vm_memory"
+path = "src/lib.rs"
+
[[bench]]
name = "main"
+path = "benches/main.rs"
harness = false
[dependencies.arc-swap]
@@ -38,7 +51,7 @@
optional = true
[dependencies.bitflags]
-version = "1.0"
+version = "2.4.0"
optional = true
[dependencies.libc]
@@ -48,17 +61,17 @@
version = "1.0.40"
[dependencies.vmm-sys-util]
-version = "0.11.0"
+version = "0.12.1"
optional = true
[dev-dependencies.criterion]
-version = "0.3.0"
+version = "0.5.0"
[dev-dependencies.matches]
version = "0.1.0"
[dev-dependencies.vmm-sys-util]
-version = "0.11.0"
+version = "0.12.1"
[features]
backend-atomic = ["arc-swap"]
diff --git a/crates/vm-memory/DESIGN.md b/crates/vm-memory/DESIGN.md
index 1e420e8..5915f50 100644
--- a/crates/vm-memory/DESIGN.md
+++ b/crates/vm-memory/DESIGN.md
@@ -39,7 +39,7 @@
- [Abstraction of Address Space](#abstraction-of-address-space)
- [Specialization for Virtual Machine Physical Address Space](#specialization-for-virtual-machine-physical-address-space)
-- [Backend Implementation Based on `mmap`](#backend-implementation-based-on-`mmap`)
+- [Backend Implementation Based on `mmap`](#backend-implementation-based-on-mmap)
- [Utilities and helpers](#utilities-and-helpers)
### Address Space Abstraction
@@ -48,9 +48,9 @@
with addresses as follows:
- `AddressValue`: stores the raw value of an address. Typically `u32`, `u64` or
- `usize` are used to store the raw value. Pointers such as `*u8`, can not be
- used as an implementation of `AddressValue` because the `Add` and `Sub`
- traits are not implemented for that type.
+ `usize` are used to store the raw value. Pointers such as `*u8`, can not be
+ used as an implementation of `AddressValue` because the `Add` and `Sub`
+ traits are not implemented for that type.
- `Address`: implementation of `AddressValue`.
- `Bytes`: trait for volatile access to memory. The `Bytes` trait can be
parameterized with types that represent addresses, in order to enforce that
diff --git a/crates/vm-memory/METADATA b/crates/vm-memory/METADATA
index d76b1ba..9a7dd5e 100644
--- a/crates/vm-memory/METADATA
+++ b/crates/vm-memory/METADATA
@@ -1,17 +1,17 @@
name: "vm-memory"
description: "Safe abstractions for accessing the VM physical memory"
third_party {
- version: "0.12.2"
+ version: "0.16.1"
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 9
- day: 6
+ year: 2024
+ month: 11
+ day: 21
}
homepage: "https://crates.io/crates/vm-memory"
identifier {
type: "Archive"
- value: "https://static.crates.io/crates/vm-memory/vm-memory-0.12.2.crate"
- version: "0.12.2"
+ value: "https://static.crates.io/crates/vm-memory/vm-memory-0.16.1.crate"
+ version: "0.16.1"
}
}
diff --git a/crates/vm-memory/README.md b/crates/vm-memory/README.md
index 07e55ee..b390caf 100644
--- a/crates/vm-memory/README.md
+++ b/crates/vm-memory/README.md
@@ -18,7 +18,7 @@
### Platform Support
-- Arch: x86, AMD64, ARM64
+- Arch: x86_64, ARM64, RISCV64
- OS: Linux/Unix/Windows
### Xen support
diff --git a/crates/vm-memory/TODO.md b/crates/vm-memory/TODO.md
index e52bb07..3552f7e 100644
--- a/crates/vm-memory/TODO.md
+++ b/crates/vm-memory/TODO.md
@@ -1,3 +1,4 @@
### TODO List
+
- Abstraction layer to seperate VM memory management from VM memory accessor.
- Help needed to refine documentation and usage examples.
diff --git a/crates/vm-memory/benches/mmap/mod.rs b/crates/vm-memory/benches/mmap/mod.rs
index ed15e18..bbf3ab3 100644
--- a/crates/vm-memory/benches/mmap/mod.rs
+++ b/crates/vm-memory/benches/mmap/mod.rs
@@ -8,7 +8,6 @@
extern crate vm_memory;
use std::fs::{File, OpenOptions};
-use std::io::Cursor;
use std::mem::size_of;
use std::path::Path;
@@ -105,7 +104,7 @@
c.bench_function(format!("read_from_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_from(address, &mut Cursor::new(&image), ACCESS_SIZE)
+ .read_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -113,7 +112,7 @@
c.bench_function(format!("read_from_file_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_from(address, &mut file, ACCESS_SIZE)
+ .read_volatile_from(address, &mut file, ACCESS_SIZE)
.unwrap()
})
});
@@ -121,7 +120,7 @@
c.bench_function(format!("read_exact_from_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .read_exact_from(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .read_exact_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -154,7 +153,7 @@
c.bench_function(format!("write_to_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .write_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE)
.unwrap()
})
});
@@ -162,7 +161,7 @@
c.bench_function(format!("write_to_file_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_to(address, &mut file_to_write, ACCESS_SIZE)
+ .write_volatile_to(address, &mut file_to_write, ACCESS_SIZE)
.unwrap()
})
});
@@ -170,7 +169,7 @@
c.bench_function(format!("write_exact_to_{:#0X}", offset).as_str(), |b| {
b.iter(|| {
black_box(&memory)
- .write_all_to(address, &mut Cursor::new(&mut image), ACCESS_SIZE)
+ .write_all_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE)
.unwrap()
})
});
diff --git a/crates/vm-memory/cargo_embargo.json b/crates/vm-memory/cargo_embargo.json
index 087fd61..fa13900 100644
--- a/crates/vm-memory/cargo_embargo.json
+++ b/crates/vm-memory/cargo_embargo.json
@@ -11,7 +11,16 @@
},
"package": {
"vm-memory": {
- "device_supported": false
+ "device_supported": false,
+ "compile_multilib": "first",
+ "arch": {
+ "arm64": {
+ "enabled": true
+ },
+ "x86_64": {
+ "enabled": true
+ }
+ }
}
},
"run_cargo": false
diff --git a/crates/vm-memory/coverage_config_x86_64.json b/crates/vm-memory/coverage_config_x86_64.json
index a6a09c4..003b2d7 100644
--- a/crates/vm-memory/coverage_config_x86_64.json
+++ b/crates/vm-memory/coverage_config_x86_64.json
@@ -1,5 +1,5 @@
{
- "coverage_score": 92.2,
+ "coverage_score": 89.16,
"exclude_path": "mmap_windows.rs",
"crate_features": "backend-mmap,backend-atomic,backend-bitmap"
}
diff --git a/crates/vm-memory/src/address.rs b/crates/vm-memory/src/address.rs
index 350a186..639e226 100644
--- a/crates/vm-memory/src/address.rs
+++ b/crates/vm-memory/src/address.rs
@@ -12,11 +12,11 @@
//!
//! Two traits are defined to represent an address within an address space:
//! - [`AddressValue`](trait.AddressValue.html): stores the raw value of an address. Typically
-//! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used
-//! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and
-//! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits.
+//! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used
+//! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and
+//! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits.
//! - [Address](trait.Address.html): encapsulates an [`AddressValue`](trait.AddressValue.html)
-//! object and defines methods to access and manipulate it.
+//! object and defines methods to access and manipulate it.
use std::cmp::{Eq, Ord, PartialEq, PartialOrd};
use std::fmt::Debug;
@@ -243,7 +243,7 @@
#[test]
fn test_new() {
assert_eq!(MockAddress::new(0), MockAddress(0));
- assert_eq!(MockAddress::new(std::u64::MAX), MockAddress(std::u64::MAX));
+ assert_eq!(MockAddress::new(u64::MAX), MockAddress(u64::MAX));
}
#[test]
@@ -285,7 +285,7 @@
Some(MockAddress(0x130))
);
assert_eq!(
- MockAddress::new(std::u64::MAX - 0x3fff).checked_align_up(0x10000),
+ MockAddress::new(u64::MAX - 0x3fff).checked_align_up(0x10000),
None
);
}
@@ -343,10 +343,10 @@
// normal case
check_add(10, 10, false, 20);
// edge case
- check_add(std::u64::MAX - 1, 1, false, std::u64::MAX);
+ check_add(u64::MAX - 1, 1, false, u64::MAX);
// with overflow
- check_add(std::u64::MAX, 1, true, 0);
+ check_add(u64::MAX, 1, true, 0);
}
fn check_sub(a: u64, b: u64, expected_overflow: bool, expected_result: u64) {
@@ -384,7 +384,7 @@
check_sub(1, 1, false, 0);
// with underflow
- check_sub(0, 1, true, std::u64::MAX);
+ check_sub(0, 1, true, u64::MAX);
}
#[test]
diff --git a/crates/vm-memory/src/atomic.rs b/crates/vm-memory/src/atomic.rs
index ae10224..4b20b2c 100644
--- a/crates/vm-memory/src/atomic.rs
+++ b/crates/vm-memory/src/atomic.rs
@@ -124,6 +124,7 @@
/// this structure is dropped (falls out of scope) the lock will be unlocked,
/// possibly after updating the memory map represented by the
/// `GuestMemoryAtomic` that created the guard.
+#[derive(Debug)]
pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> {
parent: &'a GuestMemoryAtomic<M>,
_guard: MutexGuard<'a, ()>,
diff --git a/crates/vm-memory/src/atomic_integer.rs b/crates/vm-memory/src/atomic_integer.rs
index 1b55c81..72ebc48 100644
--- a/crates/vm-memory/src/atomic_integer.rs
+++ b/crates/vm-memory/src/atomic_integer.rs
@@ -60,7 +60,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64);
@@ -71,7 +72,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64);
diff --git a/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs b/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
index b3340c3..b163043 100644
--- a/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
+++ b/crates/vm-memory/src/bitmap/backend/atomic_bitmap.rs
@@ -3,6 +3,7 @@
//! Bitmap backend implementation based on atomic integers.
+use std::num::NonZeroUsize;
use std::sync::atomic::{AtomicU64, Ordering};
use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice};
@@ -17,30 +18,36 @@
pub struct AtomicBitmap {
map: Vec<AtomicU64>,
size: usize,
- page_size: usize,
+ byte_size: usize,
+ page_size: NonZeroUsize,
}
#[allow(clippy::len_without_is_empty)]
impl AtomicBitmap {
/// Create a new bitmap of `byte_size`, with one bit per page. This is effectively
/// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`.
- pub fn new(byte_size: usize, page_size: usize) -> Self {
- let mut num_pages = byte_size / page_size;
- if byte_size % page_size > 0 {
- num_pages += 1;
- }
-
- // Adding one entry element more just in case `num_pages` is not a multiple of `64`.
- let map_size = num_pages / 64 + 1;
+ pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self {
+ let num_pages = byte_size.div_ceil(page_size.get());
+ let map_size = num_pages.div_ceil(u64::BITS as usize);
let map: Vec<AtomicU64> = (0..map_size).map(|_| AtomicU64::new(0)).collect();
AtomicBitmap {
map,
size: num_pages,
+ byte_size,
page_size,
}
}
+ /// Enlarge this bitmap with enough bits to track `additional_size` additional bytes at page granularity.
+ /// New bits are initialized to zero.
+ pub fn enlarge(&mut self, additional_size: usize) {
+ self.byte_size += additional_size;
+ self.size = self.byte_size.div_ceil(self.page_size.get());
+ let map_size = self.size.div_ceil(u64::BITS as usize);
+ self.map.resize_with(map_size, Default::default);
+ }
+
/// Is bit `n` set? Bits outside the range of the bitmap are always unset.
pub fn is_bit_set(&self, index: usize) -> bool {
if index < self.size {
@@ -60,6 +67,14 @@
/// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
/// to address `start_addr + len - 1`.
pub fn set_addr_range(&self, start_addr: usize, len: usize) {
+ self.set_reset_addr_range(start_addr, len, true);
+ }
+
+ // Set/Reset a range of `len` bytes starting at `start_addr`
+ // reset parameter determines whether bit will be set/reset
+ // if set is true then the range of bits will be set to one,
+ // otherwise zero
+ fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) {
// Return early in the unlikely event that `len == 0` so the `len - 1` computation
// below does not underflow.
if len == 0 {
@@ -75,15 +90,49 @@
// Attempts to set bits beyond the end of the bitmap are simply ignored.
break;
}
- self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
+ if set {
+ self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst);
+ } else {
+ self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst);
+ }
}
}
+ /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap
+ /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds
+ /// to address `start_addr + len - 1`.
+ pub fn reset_addr_range(&self, start_addr: usize, len: usize) {
+ self.set_reset_addr_range(start_addr, len, false);
+ }
+
+ /// Set bit to corresponding index
+ pub fn set_bit(&self, index: usize) {
+ if index >= self.size {
+ // Attempts to set bits beyond the end of the bitmap are simply ignored.
+ return;
+ }
+ self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst);
+ }
+
+ /// Reset bit to corresponding index
+ pub fn reset_bit(&self, index: usize) {
+ if index >= self.size {
+ // Attempts to reset bits beyond the end of the bitmap are simply ignored.
+ return;
+ }
+ self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst);
+ }
+
/// Get the length of the bitmap in bits (i.e. in how many pages it can represent).
pub fn len(&self) -> usize {
self.size
}
+ /// Get the size in bytes i.e how many bytes the bitmap can represent, one bit per page.
+ pub fn byte_size(&self) -> usize {
+ self.byte_size
+ }
+
/// Atomically get and reset the dirty page bitmap.
pub fn get_and_reset(&self) -> Vec<u64> {
self.map
@@ -111,6 +160,7 @@
AtomicBitmap {
map,
size: self.size,
+ byte_size: self.byte_size,
page_size: self.page_size,
}
}
@@ -136,38 +186,35 @@
impl Default for AtomicBitmap {
fn default() -> Self {
- AtomicBitmap::new(0, 0x1000)
+ // SAFETY: Safe as `0x1000` is non-zero.
+ AtomicBitmap::new(0, unsafe { NonZeroUsize::new_unchecked(0x1000) })
}
}
#[cfg(feature = "backend-mmap")]
impl NewBitmap for AtomicBitmap {
fn with_len(len: usize) -> Self {
- let page_size;
-
#[cfg(unix)]
- {
- // SAFETY: There's no unsafe potential in calling this function.
- page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
- }
+ // SAFETY: There's no unsafe potential in calling this function.
+ let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) };
#[cfg(windows)]
- {
+ let page_size = {
use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO};
-
- // It's safe to initialize this object from a zeroed memory region.
- let mut sysinfo: SYSTEM_INFO = unsafe { std::mem::zeroed() };
-
- // It's safe to call this method as the pointer is based on the address
- // of the previously initialized `sysinfo` object.
- unsafe { GetSystemInfo(&mut sysinfo as LPSYSTEM_INFO) };
-
- page_size = sysinfo.dwPageSize;
- }
+ let mut sysinfo = MaybeUninit::zeroed();
+ // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized
+ // allocated memory.
+ unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) };
+ // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`.
+ unsafe { sysinfo.assume_init().dwPageSize }
+ };
// The `unwrap` is safe to use because the above call should always succeed on the
// supported platforms, and the size of a page will always fit within a `usize`.
- AtomicBitmap::new(len, usize::try_from(page_size).unwrap())
+ AtomicBitmap::new(
+ len,
+ NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(),
+ )
}
}
@@ -177,13 +224,16 @@
use crate::bitmap::tests::test_bitmap;
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) };
+
#[test]
fn test_bitmap_basic() {
// Test that bitmap size is properly rounded up.
- let a = AtomicBitmap::new(1025, 128);
+ let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE);
assert_eq!(a.len(), 9);
- let b = AtomicBitmap::new(1024, 128);
+ let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
assert_eq!(b.len(), 8);
b.set_addr_range(128, 129);
assert!(!b.is_addr_set(0));
@@ -213,8 +263,25 @@
}
#[test]
+ fn test_bitmap_reset() {
+ let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE);
+ assert_eq!(b.len(), 8);
+ b.set_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.reset_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+ }
+
+ #[test]
fn test_bitmap_out_of_range() {
- let b = AtomicBitmap::new(1024, 1);
+ let b = AtomicBitmap::new(1024, NonZeroUsize::MIN);
// Set a partial range that goes beyond the end of the bitmap
b.set_addr_range(768, 512);
assert!(b.is_addr_set(768));
@@ -225,7 +292,47 @@
#[test]
fn test_bitmap_impl() {
- let b = AtomicBitmap::new(0x2000, 128);
+ let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE);
test_bitmap(&b);
}
+
+ #[test]
+ fn test_bitmap_enlarge() {
+ let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE);
+ assert_eq!(b.len(), 64);
+ b.set_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.reset_addr_range(128, 129);
+ assert!(!b.is_addr_set(0));
+ assert!(!b.is_addr_set(128));
+ assert!(!b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+ b.set_addr_range(128, 129);
+ b.enlarge(8 * 1024);
+ for i in 65..128 {
+ assert!(!b.is_bit_set(i));
+ }
+ assert_eq!(b.len(), 128);
+ assert!(!b.is_addr_set(0));
+ assert!(b.is_addr_set(128));
+ assert!(b.is_addr_set(256));
+ assert!(!b.is_addr_set(384));
+
+ b.set_bit(55);
+ assert!(b.is_bit_set(55));
+ for i in 65..128 {
+ b.set_bit(i);
+ }
+ for i in 65..128 {
+ assert!(b.is_bit_set(i));
+ }
+ b.reset_addr_range(0, 16 * 1024);
+ for i in 0..128 {
+ assert!(!b.is_bit_set(i));
+ }
+ }
}
diff --git a/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs b/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
index 3545623..7d52050 100644
--- a/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
+++ b/crates/vm-memory/src/bitmap/backend/atomic_bitmap_arc.rs
@@ -77,10 +77,14 @@
use super::*;
use crate::bitmap::tests::test_bitmap;
+ use std::num::NonZeroUsize;
#[test]
fn test_bitmap_impl() {
- let b = AtomicBitmapArc::new(AtomicBitmap::new(0x2000, 128));
+ // SAFETY: `128` is non-zero.
+ let b = AtomicBitmapArc::new(AtomicBitmap::new(0x800, unsafe {
+ NonZeroUsize::new_unchecked(128)
+ }));
test_bitmap(&b);
}
}
diff --git a/crates/vm-memory/src/bitmap/backend/mod.rs b/crates/vm-memory/src/bitmap/backend/mod.rs
index 256585e..8d2d866 100644
--- a/crates/vm-memory/src/bitmap/backend/mod.rs
+++ b/crates/vm-memory/src/bitmap/backend/mod.rs
@@ -6,5 +6,4 @@
mod slice;
pub use atomic_bitmap::AtomicBitmap;
-pub use atomic_bitmap_arc::AtomicBitmapArc;
pub use slice::{ArcSlice, RefSlice};
diff --git a/crates/vm-memory/src/bitmap/backend/slice.rs b/crates/vm-memory/src/bitmap/backend/slice.rs
index 913a2f5..383ce69 100644
--- a/crates/vm-memory/src/bitmap/backend/slice.rs
+++ b/crates/vm-memory/src/bitmap/backend/slice.rs
@@ -99,15 +99,16 @@
use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap};
use crate::bitmap::AtomicBitmap;
+ use std::num::NonZeroUsize;
#[test]
fn test_slice() {
- let bitmap_size = 0x1_0000;
- let dirty_offset = 0x1000;
+ let bitmap_size = 0x800;
+ let dirty_offset = 0x400;
let dirty_len = 0x100;
{
- let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
let slice1 = bitmap.slice_at(0);
let slice2 = bitmap.slice_at(dirty_offset);
@@ -121,7 +122,7 @@
}
{
- let bitmap = AtomicBitmap::new(bitmap_size, 1);
+ let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN);
let slice = bitmap.slice_at(0);
test_bitmap(&slice);
}
diff --git a/crates/vm-memory/src/bitmap/mod.rs b/crates/vm-memory/src/bitmap/mod.rs
index e8c0987..51318ed 100644
--- a/crates/vm-memory/src/bitmap/mod.rs
+++ b/crates/vm-memory/src/bitmap/mod.rs
@@ -140,10 +140,10 @@
}
// Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
- // an area of length at least 0x2000.
+ // an area of length at least 0x800.
pub fn test_bitmap<B: Bitmap>(b: &B) {
- let len = 0x2000;
- let dirty_offset = 0x1000;
+ let len = 0x800;
+ let dirty_offset = 0x400;
let dirty_len = 0x100;
// Some basic checks.
@@ -267,6 +267,7 @@
dirty_offset += step;
// Test `read_from`.
+ #[allow(deprecated)] // test of deprecated functions
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
assert_eq!(
m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(),
@@ -277,6 +278,7 @@
dirty_offset += step;
// Test `read_exact_from`.
+ #[allow(deprecated)] // test of deprecated functions
h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| {
m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE)
.unwrap()
diff --git a/crates/vm-memory/src/bytes.rs b/crates/vm-memory/src/bytes.rs
index 2430708..6274c3a 100644
--- a/crates/vm-memory/src/bytes.rs
+++ b/crates/vm-memory/src/bytes.rs
@@ -12,7 +12,7 @@
//! data.
use std::io::{Read, Write};
-use std::mem::size_of;
+use std::mem::{size_of, MaybeUninit};
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;
@@ -31,7 +31,7 @@
/// cause undefined behavior.
///
/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
-pub unsafe trait ByteValued: Copy + Default + Send + Sync {
+pub unsafe trait ByteValued: Copy + Send + Sync {
/// Converts a slice of raw data into a reference of `Self`.
///
/// The value of `data` is not copied. Instead a reference is made from the given slice. The
@@ -191,7 +191,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
@@ -202,7 +203,8 @@
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
- target_arch = "s390x"
+ target_arch = "s390x",
+ target_arch = "riscv64"
))]
impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
@@ -268,7 +270,10 @@
///
/// Returns an error if there's not enough data inside the container.
fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
- let mut result: T = Default::default();
+ // SAFETY: ByteValued objects must be assignable from a arbitrary byte
+ // sequence and are mandated to be packed.
+ // Hence, zeroed memory is a fine initialization.
+ let mut result: T = unsafe { MaybeUninit::<T>::zeroed().assume_init() };
self.read_slice(result.as_mut_slice(), addr).map(|_| result)
}
@@ -280,6 +285,9 @@
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
+ #[deprecated(
+ note = "Use `.read_volatile_from` or the functions of the `ReadVolatile` trait instead"
+ )]
fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Read;
@@ -295,6 +303,9 @@
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy exactly `count` bytes from `src` into the container.
+ #[deprecated(
+ note = "Use `.read_exact_volatile_from` or the functions of the `ReadVolatile` trait instead"
+ )]
fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
where
F: Read;
@@ -307,6 +318,9 @@
/// * `addr` - Begin reading from this address.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
+ #[deprecated(
+ note = "Use `.write_volatile_to` or the functions of the `WriteVolatile` trait instead"
+ )]
fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Write;
@@ -322,6 +336,9 @@
/// * `addr` - Begin reading from this address.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy exactly `count` bytes from the container to `dst`.
+ #[deprecated(
+ note = "Use `.write_all_volatile_to` or the functions of the `WriteVolatile` trait instead"
+ )]
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;
@@ -510,11 +527,11 @@
fn test_bytes() {
let bytes = MockBytesContainer::new();
- assert!(bytes.write_obj(std::u64::MAX, 0).is_ok());
- assert_eq!(bytes.read_obj::<u64>(0).unwrap(), std::u64::MAX);
+ assert!(bytes.write_obj(u64::MAX, 0).is_ok());
+ assert_eq!(bytes.read_obj::<u64>(0).unwrap(), u64::MAX);
assert!(bytes
- .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
+ .write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
.is_err());
assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
}
diff --git a/crates/vm-memory/src/guest_memory.rs b/crates/vm-memory/src/guest_memory.rs
index ba615ef..98c68b7 100644
--- a/crates/vm-memory/src/guest_memory.rs
+++ b/crates/vm-memory/src/guest_memory.rs
@@ -19,12 +19,12 @@
//! Traits and Structs
//! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
//! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
-//! region.
+//! region.
//! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
-//! physical memory.
+//! physical memory.
//! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
-//! objects.
-//! The main responsibilities of the `GuestMemory` trait are:
+//! objects.
+//! The main responsibilities of the `GuestMemory` trait are:
//! - hide the detail of accessing guest's physical address.
//! - map a request address to a `GuestMemoryRegion` object and relay the request to it.
//! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
@@ -52,7 +52,9 @@
use crate::address::{Address, AddressValue};
use crate::bitmap::{Bitmap, BS, MS};
use crate::bytes::{AtomicAccess, Bytes};
+use crate::io::{ReadVolatile, WriteVolatile};
use crate::volatile_memory::{self, VolatileSlice};
+use crate::GuestMemoryError;
static MAX_ACCESS_CHUNK: usize = 4096;
@@ -75,6 +77,14 @@
/// Host virtual address not available.
#[error("Guest memory error: host virtual address not available")]
HostAddressNotAvailable,
+ /// The length returned by the callback passed to `try_access` is outside the address range.
+ #[error(
+ "The length returned by the callback passed to `try_access` is outside the address range."
+ )]
+ CallbackOutOfRange,
+ /// The address to be read by `try_access` is outside the address range.
+ #[error("The address to be read by `try_access` is outside the address range")]
+ GuestAddressOverflow,
}
impl From<volatile_memory::Error> for Error {
@@ -412,42 +422,6 @@
}
}
-/// Lifetime generic associated iterators. The actual iterator type is defined through associated
-/// item `Iter`, for example:
-///
-/// ```
-/// # use std::marker::PhantomData;
-/// # use vm_memory::guest_memory::GuestMemoryIterator;
-/// #
-/// // Declare the relevant Region and Memory types
-/// struct MyGuestRegion {/* fields omitted */}
-/// struct MyGuestMemory {/* fields omitted */}
-///
-/// // Make an Iterator type to iterate over the Regions
-/// # /*
-/// struct MyGuestMemoryIter<'a> {/* fields omitted */}
-/// # */
-/// # struct MyGuestMemoryIter<'a> {
-/// # _marker: PhantomData<&'a MyGuestRegion>,
-/// # }
-/// impl<'a> Iterator for MyGuestMemoryIter<'a> {
-/// type Item = &'a MyGuestRegion;
-/// fn next(&mut self) -> Option<&'a MyGuestRegion> {
-/// // ...
-/// # None
-/// }
-/// }
-///
-/// // Associate the Iter type with the Memory type
-/// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory {
-/// type Iter = MyGuestMemoryIter<'a>;
-/// }
-/// ```
-pub trait GuestMemoryIterator<'a, R: 'a> {
- /// Type of the `iter` method's return value.
- type Iter: Iterator<Item = &'a R>;
-}
-
/// `GuestMemory` represents a container for an *immutable* collection of
/// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes<GuestAddress>`
/// trait to hide the details of accessing guest memory by physical address.
@@ -461,9 +435,6 @@
/// Type of objects hosted by the address space.
type R: GuestMemoryRegion;
- /// Lifetime generic associated iterators. Usually this is just `Self`.
- type I: for<'a> GuestMemoryIterator<'a, Self::R>;
-
/// Returns the number of regions in the collection.
fn num_regions(&self) -> usize;
@@ -504,7 +475,7 @@
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
- /// `backend-mmap` feature)
+ /// `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
@@ -523,7 +494,7 @@
/// assert_eq!(3, total_size)
/// # }
/// ```
- fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter;
+ fn iter(&self) -> impl Iterator<Item = &Self::R>;
/// Applies two functions, specified as callbacks, on the inner memory regions.
///
@@ -540,7 +511,7 @@
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
- /// `backend-mmap` feature)
+ /// `backend-mmap` feature)
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
@@ -644,15 +615,15 @@
Ok(0) => return Ok(total),
// made some progress
Ok(len) => {
- total += len;
- if total == count {
- break;
- }
+ total = match total.checked_add(len) {
+ Some(x) if x < count => x,
+ Some(x) if x == count => return Ok(x),
+ _ => return Err(Error::CallbackOutOfRange),
+ };
cur = match cur.overflowing_add(len as GuestUsize) {
- (GuestAddress(0), _) => GuestAddress(0),
- (result, false) => result,
- (_, true) => panic!("guest address overflow"),
- }
+ (x @ GuestAddress(0), _) | (x, false) => x,
+ (_, true) => return Err(Error::GuestAddressOverflow),
+ };
}
// error happened
e => return e,
@@ -665,6 +636,143 @@
}
}
+ /// Reads up to `count` bytes from an object and writes them into guest memory at `addr`.
+ ///
+ /// Returns the number of bytes written into guest memory.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into the container.
+ /// * `count` - Copy `count` bytes from `src` into the container.
+ ///
+ /// # Examples
+ ///
+ /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
+ ///
+ /// ```
+ /// # #[cfg(feature = "backend-mmap")]
+ /// # {
+ /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap};
+ /// # use std::fs::File;
+ /// # use std::path::Path;
+ /// #
+ /// # let start_addr = GuestAddress(0x1000);
+ /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
+ /// # .expect("Could not create guest memory");
+ /// # let addr = GuestAddress(0x1010);
+ /// # let mut file = if cfg!(unix) {
+ /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
+ /// # file
+ /// # } else {
+ /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
+ /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
+ /// # };
+ ///
+ /// gm.read_volatile_from(addr, &mut file, 128)
+ /// .expect("Could not read from /dev/urandom into guest memory");
+ ///
+ /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
+ /// let rand_val: u32 = gm
+ /// .read_obj(read_addr)
+ /// .expect("Could not read u32 val from /dev/urandom");
+ /// # }
+ /// ```
+ fn read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
+ where
+ F: ReadVolatile,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let mut vslice = region.get_slice(caddr, len)?;
+
+ src.read_volatile(&mut vslice)
+ .map_err(GuestMemoryError::from)
+ })
+ }
+
+ /// Reads up to `count` bytes from guest memory at `addr` and writes them it into an object.
+ ///
+ /// Returns the number of bytes copied from guest memory.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from guest memory to `dst`.
+ /// * `count` - Copy `count` bytes from guest memory to `dst`.
+ fn write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
+ where
+ F: WriteVolatile,
+ {
+ self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
+ // Check if something bad happened before doing unsafe things.
+ assert!(offset <= count);
+
+ let vslice = region.get_slice(caddr, len)?;
+
+ // For a non-RAM region, reading could have side effects, so we
+ // must use write_all().
+ dst.write_all_volatile(&vslice)?;
+
+ Ok(len)
+ })
+ }
+
+ /// Reads exactly `count` bytes from an object and writes them into guest memory at `addr`.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from `src` to guest memory.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin writing at this address.
+ /// * `src` - Copy from `src` into guest memory.
+ /// * `count` - Copy exactly `count` bytes from `src` into guest memory.
+ fn read_exact_volatile_from<F>(
+ &self,
+ addr: GuestAddress,
+ src: &mut F,
+ count: usize,
+ ) -> Result<()>
+ where
+ F: ReadVolatile,
+ {
+ let res = self.read_volatile_from(addr, src, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
+ /// Reads exactly `count` bytes from guest memory at `addr` and writes them into an object.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if `count` bytes couldn't have been copied from guest memory to `dst`.
+ /// Part of the data may have been copied nevertheless.
+ ///
+ /// # Arguments
+ /// * `addr` - Begin reading from this address.
+ /// * `dst` - Copy from guest memory to `dst`.
+ /// * `count` - Copy exactly `count` bytes from guest memory to `dst`.
+ fn write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
+ where
+ F: WriteVolatile,
+ {
+ let res = self.write_volatile_to(addr, dst, count)?;
+ if res != count {
+ return Err(Error::PartialBuffer {
+ expected: count,
+ completed: res,
+ });
+ }
+ Ok(())
+ }
+
/// Get the host virtual address corresponding to the guest address.
///
/// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
@@ -856,6 +964,7 @@
where
F: Read,
{
+ #[allow(deprecated)] // this function itself is deprecated
let res = self.read_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
@@ -949,6 +1058,7 @@
where
F: Write,
{
+ #[allow(deprecated)] // this function itself is deprecated
let res = self.write_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
@@ -983,8 +1093,6 @@
#[cfg(feature = "backend-mmap")]
use crate::GuestAddress;
#[cfg(feature = "backend-mmap")]
- use std::io::Cursor;
- #[cfg(feature = "backend-mmap")]
use std::time::{Duration, Instant};
use vmm_sys_util::tempfile::TempFile;
@@ -1024,7 +1132,7 @@
let count: usize = 0x20;
assert_eq!(
0x20_usize,
- mem.read_from(offset, &mut Cursor::new(&image), count)
+ mem.read_volatile_from(offset, &mut image.as_slice(), count)
.unwrap()
);
}
@@ -1179,19 +1287,24 @@
assert!(mem.write_obj(obj, addr).is_ok());
assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
- assert_eq!(mem.read_from(addr, &mut Cursor::new(&image), 0).unwrap(), 0);
-
- assert!(mem
- .read_exact_from(addr, &mut Cursor::new(&image), 0)
- .is_ok());
-
assert_eq!(
- mem.write_to(addr, &mut Cursor::new(&mut image), 0).unwrap(),
+ mem.read_volatile_from(addr, &mut image.as_slice(), 0)
+ .unwrap(),
0
);
assert!(mem
- .write_all_to(addr, &mut Cursor::new(&mut image), 0)
+ .read_exact_volatile_from(addr, &mut image.as_slice(), 0)
+ .is_ok());
+
+ assert_eq!(
+ mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0)
+ .unwrap(),
+ 0
+ );
+
+ assert!(mem
+ .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0)
.is_ok());
}
diff --git a/crates/vm-memory/src/io.rs b/crates/vm-memory/src/io.rs
new file mode 100644
index 0000000..9905d71
--- /dev/null
+++ b/crates/vm-memory/src/io.rs
@@ -0,0 +1,611 @@
+// Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//! Module containing versions of the standard library's [`Read`](std::io::Read) and
+//! [`Write`](std::io::Write) traits compatible with volatile memory accesses.
+
+use crate::bitmap::BitmapSlice;
+use crate::volatile_memory::copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
+use crate::{VolatileMemoryError, VolatileSlice};
+use std::io::{Cursor, ErrorKind, Stdout};
+use std::os::fd::AsRawFd;
+
+/// A version of the standard library's [`Read`](std::io::Read) trait that operates on volatile
+/// memory instead of slices
+///
+/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on
+/// guest memory [1].
+///
+/// [1]: https://github.com/rust-vmm/vm-memory/pull/217
+pub trait ReadVolatile {
+ /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes
+ /// were read.
+ ///
+ /// The behavior of implementations should be identical to [`Read::read`](std::io::Read::read)
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError>;
+
+ /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error
+ /// if insufficient bytes could be read.
+ ///
+ /// The default implementation is identical to that of [`Read::read_exact`](std::io::Read::read_exact)
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465
+
+ let mut partial_buf = buf.offset(0)?;
+
+ while !partial_buf.is_empty() {
+ match self.read_volatile(&mut partial_buf) {
+ Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => {
+ continue
+ }
+ Ok(0) => {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::UnexpectedEof,
+ "failed to fill whole buffer",
+ )))
+ }
+ Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?,
+ Err(err) => return Err(err),
+ }
+ }
+
+ Ok(())
+ }
+}
+
+/// A version of the standard library's [`Write`](std::io::Write) trait that operates on volatile
+/// memory instead of slices.
+///
+/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on
+/// guest memory [1].
+///
+/// [1]: https://github.com/rust-vmm/vm-memory/pull/217
+pub trait WriteVolatile {
+ /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes
+ /// were written.
+ ///
+ /// The behavior of implementations should be identical to [`Write::write`](std::io::Write::write)
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError>;
+
+ /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an
+ /// error if not all bytes could be written.
+ ///
+ /// The default implementation is identical to that of [`Write::write_all`](std::io::Write::write_all)
+ fn write_all_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570
+
+ let mut partial_buf = buf.offset(0)?;
+
+ while !partial_buf.is_empty() {
+ match self.write_volatile(&partial_buf) {
+ Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => {
+ continue
+ }
+ Ok(0) => {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ )))
+ }
+ Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?,
+ Err(err) => return Err(err),
+ }
+ }
+
+ Ok(())
+ }
+}
+
+// We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`]
+// instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A
+// blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as
+// "an upstream crate could implement AsRawFd for &mut [u8]`.
+
+macro_rules! impl_read_write_volatile_for_raw_fd {
+ ($raw_fd_ty:ty) => {
+ impl ReadVolatile for $raw_fd_ty {
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ read_volatile_raw_fd(self, buf)
+ }
+ }
+
+ impl WriteVolatile for $raw_fd_ty {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ write_volatile_raw_fd(self, buf)
+ }
+ }
+ };
+}
+
+impl WriteVolatile for Stdout {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ write_volatile_raw_fd(self, buf)
+ }
+}
+
+impl_read_write_volatile_for_raw_fd!(std::fs::File);
+impl_read_write_volatile_for_raw_fd!(std::net::TcpStream);
+impl_read_write_volatile_for_raw_fd!(std::os::unix::net::UnixStream);
+impl_read_write_volatile_for_raw_fd!(std::os::fd::OwnedFd);
+impl_read_write_volatile_for_raw_fd!(std::os::fd::BorrowedFd<'_>);
+
+/// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in
+/// the given [`VolatileSlice`].
+///
+/// Returns the numbers of bytes read.
+fn read_volatile_raw_fd<Fd: AsRawFd>(
+ raw_fd: &mut Fd,
+ buf: &mut VolatileSlice<impl BitmapSlice>,
+) -> Result<usize, VolatileMemoryError> {
+ let fd = raw_fd.as_raw_fd();
+ let guard = buf.ptr_guard_mut();
+
+ let dst = guard.as_ptr().cast::<libc::c_void>();
+
+ // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `dst` is
+ // valid for writes of length `buf.len() by the invariants upheld by the constructor
+ // of `VolatileSlice`.
+ let bytes_read = unsafe { libc::read(fd, dst, buf.len()) };
+
+ if bytes_read < 0 {
+ // We don't know if a partial read might have happened, so mark everything as dirty
+ buf.bitmap().mark_dirty(0, buf.len());
+
+ Err(VolatileMemoryError::IOError(std::io::Error::last_os_error()))
+ } else {
+ let bytes_read = bytes_read.try_into().unwrap();
+ buf.bitmap().mark_dirty(0, bytes_read);
+ Ok(bytes_read)
+ }
+}
+
+/// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the
+/// data stored in the given [`VolatileSlice`].
+///
+/// Returns the numbers of bytes written.
+fn write_volatile_raw_fd<Fd: AsRawFd>(
+ raw_fd: &mut Fd,
+ buf: &VolatileSlice<impl BitmapSlice>,
+) -> Result<usize, VolatileMemoryError> {
+ let fd = raw_fd.as_raw_fd();
+ let guard = buf.ptr_guard();
+
+ let src = guard.as_ptr().cast::<libc::c_void>();
+
+ // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `src` is
+ // valid for reads of length `buf.len() by the invariants upheld by the constructor
+ // of `VolatileSlice`.
+ let bytes_written = unsafe { libc::write(fd, src, buf.len()) };
+
+ if bytes_written < 0 {
+ Err(VolatileMemoryError::IOError(std::io::Error::last_os_error()))
+ } else {
+ Ok(bytes_written.try_into().unwrap())
+ }
+}
+
+impl WriteVolatile for &mut [u8] {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let total = buf.len().min(self.len());
+ let src = buf.subslice(0, total)?;
+
+ // SAFETY:
+ // We check above that `src` is contiguously allocated memory of length `total <= self.len())`.
+ // Furthermore, both src and dst of the call to
+ // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
+ // since total is the minimum of lengths of the memory areas pointed to. The areas do not
+ // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
+ // memory are possible without violating rust's aliasing rules).
+ let written = unsafe { copy_from_volatile_slice(self.as_mut_ptr(), &src, total) };
+
+ // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335
+ *self = std::mem::take(self).split_at_mut(written).1;
+
+ Ok(written)
+ }
+
+ fn write_all_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382
+ if self.write_volatile(buf)? == buf.len() {
+ Ok(())
+ } else {
+ Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ )))
+ }
+ }
+}
+
+impl ReadVolatile for &[u8] {
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let total = buf.len().min(self.len());
+ let dst = buf.subslice(0, total)?;
+
+ // SAFETY:
+ // We check above that `dst` is contiguously allocated memory of length `total <= self.len())`.
+ // Furthermore, both src and dst of the call to copy_to_volatile_slice are valid for reads
+ // and writes respectively of length `total` since total is the minimum of lengths of the
+ // memory areas pointed to. The areas do not overlap, since `dst` is inside guest memory,
+ // and buf is a slice (no slices to guest memory are possible without violating rust's aliasing rules).
+ let read = unsafe { copy_to_volatile_slice(&dst, self.as_ptr(), total) };
+
+ // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310
+ *self = self.split_at(read).1;
+
+ Ok(read)
+ }
+
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302
+ if buf.len() > self.len() {
+ return Err(VolatileMemoryError::IOError(std::io::Error::new(
+ ErrorKind::UnexpectedEof,
+ "failed to fill whole buffer",
+ )));
+ }
+
+ self.read_volatile(buf).map(|_| ())
+ }
+}
+
+// WriteVolatile implementation for Vec<u8> is based upon the Write impl for Vec, which
+// defers to Vec::append_elements, after which the below functionality is modelled.
+impl WriteVolatile for Vec<u8> {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let count = buf.len();
+ self.reserve(count);
+ let len = self.len();
+
+ // SAFETY: Calling Vec::reserve() above guarantees the the backing storage of the Vec has
+ // length at least `len + count`. This means that self.as_mut_ptr().add(len) remains within
+ // the same allocated object, the offset does not exceed isize (as otherwise reserve would
+ // have panicked), and does not rely on address space wrapping around.
+ // In particular, the entire `count` bytes after `self.as_mut_ptr().add(count)` is
+ // contiguously allocated and valid for writes.
+ // Lastly, `copy_to_volatile_slice` correctly initialized `copied_len` additional bytes
+ // in the Vec's backing storage, and we assert this to be equal to `count`. Additionally,
+ // `len + count` is at most the reserved capacity of the vector. Thus the call to `set_len`
+ // is safe.
+ unsafe {
+ let copied_len = copy_from_volatile_slice(self.as_mut_ptr().add(len), buf, count);
+
+ assert_eq!(copied_len, count);
+ self.set_len(len + count);
+ }
+ Ok(count)
+ }
+}
+
+// ReadVolatile and WriteVolatile implementations for Cursor<T> is modelled after the standard
+// library's implementation (modulo having to inline `Cursor::remaining_slice`, as that's nightly only)
+impl<T> ReadVolatile for Cursor<T>
+where
+ T: AsRef<[u8]>,
+{
+ fn read_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let inner = self.get_ref().as_ref();
+ let len = self.position().min(inner.len() as u64);
+ let n = ReadVolatile::read_volatile(&mut &inner[(len as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(n)
+ }
+
+ fn read_exact_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &mut VolatileSlice<B>,
+ ) -> Result<(), VolatileMemoryError> {
+ let inner = self.get_ref().as_ref();
+ let n = buf.len();
+ let len = self.position().min(inner.len() as u64);
+ ReadVolatile::read_exact_volatile(&mut &inner[(len as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(())
+ }
+}
+
+impl WriteVolatile for Cursor<&mut [u8]> {
+ fn write_volatile<B: BitmapSlice>(
+ &mut self,
+ buf: &VolatileSlice<B>,
+ ) -> Result<usize, VolatileMemoryError> {
+ let pos = self.position().min(self.get_ref().len() as u64);
+ let n = WriteVolatile::write_volatile(&mut &mut self.get_mut()[(pos as usize)..], buf)?;
+ self.set_position(self.position() + n as u64);
+ Ok(n)
+ }
+
+ // no write_all provided in standard library, since our default for write_all is based on the
+ // standard library's write_all, omitting it here as well will correctly mimic stdlib behavior.
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::io::{ReadVolatile, WriteVolatile};
+ use crate::{VolatileMemoryError, VolatileSlice};
+ use std::io::{Cursor, ErrorKind, Read, Seek, Write};
+ use vmm_sys_util::tempfile::TempFile;
+
+ // ---- Test ReadVolatile for &[u8] ----
+ fn read_4_bytes_to_5_byte_memory(source: Vec<u8>, expected_output: [u8; 5]) {
+ // Test read_volatile for &[u8] works
+ let mut memory = vec![0u8; 5];
+
+ assert_eq!(
+ (&source[..])
+ .read_volatile(&mut VolatileSlice::from(&mut memory[..4]))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_output);
+
+ // Test read_exact_volatile for &[u8] works
+ let mut memory = vec![0u8; 5];
+ let result = (&source[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4]));
+
+ // read_exact fails if there are not enough bytes in input to completely fill
+ // memory[..4]
+ if source.len() < 4 {
+ match result.unwrap_err() {
+ VolatileMemoryError::IOError(ioe) => {
+ assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof)
+ }
+ err => panic!("{:?}", err),
+ }
+ assert_eq!(memory, vec![0u8; 5]);
+ } else {
+ result.unwrap();
+ assert_eq!(&memory, &expected_output);
+ }
+ }
+
+ // ---- Test ReadVolatile for File ----
+ fn read_4_bytes_from_file(source: Vec<u8>, expected_output: [u8; 5]) {
+ let mut temp_file = TempFile::new().unwrap().into_file();
+ temp_file.write_all(source.as_ref()).unwrap();
+ temp_file.rewind().unwrap();
+
+ // Test read_volatile for File works
+ let mut memory = vec![0u8; 5];
+
+ assert_eq!(
+ temp_file
+ .read_volatile(&mut VolatileSlice::from(&mut memory[..4]))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_output);
+
+ temp_file.rewind().unwrap();
+
+ // Test read_exact_volatile for File works
+ let mut memory = vec![0u8; 5];
+
+ let read_exact_result =
+ temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4]));
+
+ if source.len() < 4 {
+ read_exact_result.unwrap_err();
+ } else {
+ read_exact_result.unwrap();
+ }
+ assert_eq!(&memory, &expected_output);
+ }
+
+ #[test]
+ fn test_read_volatile() {
+ let test_cases = [
+ (vec![1u8, 2], [1u8, 2, 0, 0, 0]),
+ (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]),
+ // ensure we don't have a buffer overrun
+ (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]),
+ ];
+
+ for (input, output) in test_cases {
+ read_4_bytes_to_5_byte_memory(input.clone(), output);
+ read_4_bytes_from_file(input, output);
+ }
+ }
+
+ // ---- Test WriteVolatile for &mut [u8] ----
+ fn write_4_bytes_to_5_byte_vec(mut source: Vec<u8>, expected_result: [u8; 5]) {
+ let mut memory = vec![0u8; 5];
+
+ // Test write_volatile for &mut [u8] works
+ assert_eq!(
+ (&mut memory[..4])
+ .write_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap(),
+ source.len().min(4)
+ );
+ assert_eq!(&memory, &expected_result);
+
+ // Test write_all_volatile for &mut [u8] works
+ let mut memory = vec![0u8; 5];
+
+ let result =
+ (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(source.as_mut_slice()));
+
+ if source.len() > 4 {
+ match result.unwrap_err() {
+ VolatileMemoryError::IOError(ioe) => {
+ assert_eq!(ioe.kind(), ErrorKind::WriteZero)
+ }
+ err => panic!("{:?}", err),
+ }
+ // This quirky behavior of writing to the slice even in the case of failure is also
+ // exhibited by the stdlib
+ assert_eq!(&memory, &expected_result);
+ } else {
+ result.unwrap();
+ assert_eq!(&memory, &expected_result);
+ }
+ }
+
+ // ---- Test ẂriteVolatile for File works ----
+ fn write_5_bytes_to_file(mut source: Vec<u8>) {
+ // Test write_volatile for File works
+ let mut temp_file = TempFile::new().unwrap().into_file();
+
+ temp_file
+ .write_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap();
+ temp_file.rewind().unwrap();
+
+ let mut written = vec![0u8; source.len()];
+ temp_file.read_exact(written.as_mut_slice()).unwrap();
+
+ assert_eq!(source, written);
+ // check no excess bytes were written to the file
+ assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0);
+
+ // Test write_all_volatile for File works
+ let mut temp_file = TempFile::new().unwrap().into_file();
+
+ temp_file
+ .write_all_volatile(&VolatileSlice::from(source.as_mut_slice()))
+ .unwrap();
+ temp_file.rewind().unwrap();
+
+ let mut written = vec![0u8; source.len()];
+ temp_file.read_exact(written.as_mut_slice()).unwrap();
+
+ assert_eq!(source, written);
+ // check no excess bytes were written to the file
+ assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0);
+ }
+
+ #[test]
+ fn test_write_volatile() {
+ let test_cases = [
+ (vec![1u8, 2], [1u8, 2, 0, 0, 0]),
+ (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]),
+ // ensure we don't have a buffer overrun
+ (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]),
+ ];
+
+ for (input, output) in test_cases {
+ write_4_bytes_to_5_byte_vec(input.clone(), output);
+ write_5_bytes_to_file(input);
+ }
+ }
+
+ #[test]
+ fn test_read_volatile_for_cursor() {
+ let read_buffer = [1, 2, 3, 4, 5, 6, 7];
+ let mut output = vec![0u8; 5];
+
+ let mut cursor = Cursor::new(read_buffer);
+
+ // Read 4 bytes from cursor to volatile slice (amount read limited by volatile slice length)
+ assert_eq!(
+ cursor
+ .read_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap(),
+ 4
+ );
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+
+ // Read next 3 bytes from cursor to volatile slice (amount read limited by length of remaining data in cursor)
+ assert_eq!(
+ cursor
+ .read_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap(),
+ 3
+ );
+ assert_eq!(output, vec![5, 6, 7, 4, 0]);
+
+ cursor.set_position(0);
+ // Same as first test above, but with read_exact
+ cursor
+ .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .unwrap();
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+
+ // Same as above, but with read_exact. Should fail now, because we cannot fill a 4 byte buffer
+ // with whats remaining in the cursor (3 bytes). Output should remain unchanged.
+ assert!(cursor
+ .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4]))
+ .is_err());
+ assert_eq!(output, vec![1, 2, 3, 4, 0]);
+ }
+
+ #[test]
+ fn test_write_volatile_for_cursor() {
+ let mut write_buffer = vec![0u8; 7];
+ let mut input = [1, 2, 3, 4];
+
+ let mut cursor = Cursor::new(write_buffer.as_mut_slice());
+
+ // Write 4 bytes from volatile slice to cursor (amount written limited by volatile slice length)
+ assert_eq!(
+ cursor
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 4
+ );
+ assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 0, 0, 0]);
+
+ // Write 3 bytes from volatile slice to cursor (amount written limited by remaining space in cursor)
+ assert_eq!(
+ cursor
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 3
+ );
+ assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 1, 2, 3]);
+ }
+
+ #[test]
+ fn test_write_volatile_for_vec() {
+ let mut write_buffer = Vec::new();
+ let mut input = [1, 2, 3, 4];
+
+ assert_eq!(
+ write_buffer
+ .write_volatile(&VolatileSlice::from(input.as_mut_slice()))
+ .unwrap(),
+ 4
+ );
+
+ assert_eq!(&write_buffer, &input);
+ }
+}
diff --git a/crates/vm-memory/src/lib.rs b/crates/vm-memory/src/lib.rs
index b574dfa..6f87ce4 100644
--- a/crates/vm-memory/src/lib.rs
+++ b/crates/vm-memory/src/lib.rs
@@ -15,9 +15,14 @@
//! without knowing the implementation details of the VM memory provider. Thus hypervisor
//! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost
//! drivers etc, could be shared and reused by multiple hypervisors.
+#![warn(clippy::doc_markdown)]
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
-#![deny(clippy::doc_markdown)]
-#![deny(missing_docs)]
+// We only support 64bit. Fail build when attempting to build other targets
+#[cfg(not(target_pointer_width = "64"))]
+compile_error!("vm-memory only supports 64-bit targets!");
#[macro_use]
pub mod address;
@@ -45,6 +50,9 @@
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult,
};
+pub mod io;
+pub use io::{ReadVolatile, WriteVolatile};
+
#[cfg(all(feature = "backend-mmap", not(feature = "xen"), unix))]
mod mmap_unix;
@@ -56,6 +64,7 @@
#[cfg(feature = "backend-mmap")]
pub mod mmap;
+
#[cfg(feature = "backend-mmap")]
pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion};
#[cfg(all(feature = "backend-mmap", feature = "xen", unix))]
diff --git a/crates/vm-memory/src/mmap.rs b/crates/vm-memory/src/mmap.rs
index 0a442e6..48d9a56 100644
--- a/crates/vm-memory/src/mmap.rs
+++ b/crates/vm-memory/src/mmap.rs
@@ -24,8 +24,7 @@
use crate::address::Address;
use crate::bitmap::{Bitmap, BS};
use crate::guest_memory::{
- self, FileOffset, GuestAddress, GuestMemory, GuestMemoryIterator, GuestMemoryRegion,
- GuestUsize, MemoryRegionAddress,
+ self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress,
};
use crate::volatile_memory::{VolatileMemory, VolatileSlice};
use crate::{AtomicAccess, Bytes};
@@ -273,6 +272,7 @@
F: Read,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.read_from::<F>(maddr, src, count)
@@ -318,6 +318,7 @@
F: Read,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.read_exact_from::<F>(maddr, src, count)
@@ -363,6 +364,7 @@
F: Write,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.write_to::<F>(maddr, dst, count)
@@ -408,6 +410,7 @@
F: Write,
{
let maddr = addr.raw_value() as usize;
+ #[allow(deprecated)] // function itself is deprecated
self.as_volatile_slice()
.unwrap()
.write_all_to::<F>(maddr, dst, count)
@@ -508,7 +511,7 @@
/// Creates a container and allocates anonymous memory for guest memory regions.
///
- /// Valid memory regions are specified as a sequence of (Address, Size, Option<FileOffset>)
+ /// Valid memory regions are specified as a sequence of (Address, Size, [`Option<FileOffset>`])
/// tuples sorted by Address.
pub fn from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, Error>
where
@@ -609,27 +612,9 @@
}
}
-/// An iterator over the elements of `GuestMemoryMmap`.
-///
-/// This struct is created by `GuestMemory::iter()`. See its documentation for more.
-pub struct Iter<'a, B>(std::slice::Iter<'a, Arc<GuestRegionMmap<B>>>);
-
-impl<'a, B> Iterator for Iter<'a, B> {
- type Item = &'a GuestRegionMmap<B>;
- fn next(&mut self) -> Option<Self::Item> {
- self.0.next().map(AsRef::as_ref)
- }
-}
-
-impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionMmap<B>> for GuestMemoryMmap<B> {
- type Iter = Iter<'a, B>;
-}
-
impl<B: Bitmap + 'static> GuestMemory for GuestMemoryMmap<B> {
type R = GuestRegionMmap<B>;
- type I = Self;
-
fn num_regions(&self) -> usize {
self.regions.len()
}
@@ -644,8 +629,8 @@
index.map(|x| self.regions[x].as_ref())
}
- fn iter(&self) -> Iter<B> {
- Iter(self.regions.iter())
+ fn iter(&self) -> impl Iterator<Item = &Self::R> {
+ self.regions.iter().map(AsRef::as_ref)
}
}
@@ -946,7 +931,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
@@ -972,7 +957,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert_eq!(
guest_mem.check_address(GuestAddress(0x200)),
@@ -1004,7 +989,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
@@ -1032,7 +1017,7 @@
])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
@@ -1059,7 +1044,7 @@
)])
.unwrap();
- let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
+ let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
for guest_mem in guest_mem_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
@@ -1097,7 +1082,7 @@
])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let val1: u64 = 0xaa55_aa55_aa55_aa55;
let val2: u64 = 0x55aa_55aa_55aa_55aa;
@@ -1137,7 +1122,7 @@
)])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
@@ -1168,7 +1153,7 @@
)])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let addr = GuestAddress(0x1010);
let mut file = if cfg!(unix) {
@@ -1177,7 +1162,7 @@
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
gm.write_obj(!0u32, addr).unwrap();
- gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
+ gm.read_exact_volatile_from(addr, &mut file, mem::size_of::<u32>())
.unwrap();
let value: u32 = gm.read_obj(addr).unwrap();
if cfg!(unix) {
@@ -1186,8 +1171,8 @@
assert_eq!(value, 0x0090_5a4d);
}
- let mut sink = Vec::new();
- gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
+ let mut sink = vec![0; mem::size_of::<u32>()];
+ gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::<u32>())
.unwrap();
if cfg!(unix) {
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
@@ -1271,7 +1256,7 @@
])
.unwrap();
- let gm_list = vec![gm, gm_backed_by_file];
+ let gm_list = [gm, gm_backed_by_file];
for gm in gm_list.iter() {
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
@@ -1484,7 +1469,7 @@
Some(GuestAddress(0xfff))
);
assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
- assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
+ assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None);
assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
assert_eq!(
@@ -1513,7 +1498,7 @@
assert!(guest_mem.check_range(start_addr2, 0x800));
assert!(!guest_mem.check_range(start_addr2, 0x801));
assert!(!guest_mem.check_range(start_addr2, 0xc00));
- assert!(!guest_mem.check_range(start_addr1, std::usize::MAX));
+ assert!(!guest_mem.check_range(start_addr1, usize::MAX));
}
#[test]
diff --git a/crates/vm-memory/src/mmap_unix.rs b/crates/vm-memory/src/mmap_unix.rs
index c1d1adb..14ceb80 100644
--- a/crates/vm-memory/src/mmap_unix.rs
+++ b/crates/vm-memory/src/mmap_unix.rs
@@ -52,6 +52,7 @@
pub type Result<T> = result::Result<T, Error>;
/// A factory struct to build `MmapRegion` objects.
+#[derive(Debug)]
pub struct MmapRegionBuilder<B = ()> {
size: usize,
prot: i32,
@@ -445,6 +446,7 @@
use super::*;
use std::io::Write;
+ use std::num::NonZeroUsize;
use std::slice;
use std::sync::Arc;
use vmm_sys_util::tempfile::TempFile;
@@ -453,13 +455,14 @@
type MmapRegion = super::MmapRegion<()>;
- // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
- // distinctive value when the error is represented by another variant.
impl Error {
+ /// Helper method to extract the errno within an
+ /// `Error::Mmap(e)`. Returns `i32::MIN` if `self` is any
+ /// other variant.
pub fn raw_os_error(&self) -> i32 {
match self {
Error::Mmap(e) => e.raw_os_error().unwrap(),
- _ => std::i32::MIN,
+ _ => i32::MIN,
}
}
}
@@ -550,7 +553,7 @@
// Offset + size will overflow.
let r = MmapRegion::build(
- Some(FileOffset::from_arc(a.clone(), std::u64::MAX)),
+ Some(FileOffset::from_arc(a.clone(), u64::MAX)),
size,
prot,
flags,
@@ -598,7 +601,7 @@
assert!(r.owned());
let region_size = 0x10_0000;
- let bitmap = AtomicBitmap::new(region_size, 0x1000);
+ let bitmap = AtomicBitmap::new(region_size, unsafe { NonZeroUsize::new_unchecked(0x1000) });
let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap)
.with_hugetlbfs(true)
.with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE);
diff --git a/crates/vm-memory/src/mmap_xen.rs b/crates/vm-memory/src/mmap_xen.rs
index b641311..31d08e2 100644
--- a/crates/vm-memory/src/mmap_xen.rs
+++ b/crates/vm-memory/src/mmap_xen.rs
@@ -432,6 +432,7 @@
// Bit mask for the vhost-user xen mmap message.
bitflags! {
/// Flags for the Xen mmap message.
+ #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MmapXenFlags: u32 {
/// Standard Unix memory mapping.
const UNIX = 0x0;
@@ -1035,7 +1036,7 @@
fn raw_os_error(&self) -> i32 {
match self {
Error::Mmap(e) => e.raw_os_error().unwrap(),
- _ => std::i32::MIN,
+ _ => i32::MIN,
}
}
}
@@ -1062,6 +1063,7 @@
}
impl MmapRegion {
+ /// Create an `MmapRegion` with specified `size` at GuestAdress(0)
pub fn new(size: usize) -> Result<Self> {
let range = MmapRange::new_unix(size, None, GuestAddress(0));
Self::from_range(range)
diff --git a/crates/vm-memory/src/volatile_memory.rs b/crates/vm-memory/src/volatile_memory.rs
index 76e41bb..30e1038 100644
--- a/crates/vm-memory/src/volatile_memory.rs
+++ b/crates/vm-memory/src/volatile_memory.rs
@@ -18,10 +18,10 @@
//! For the purposes of maintaining safety, volatile memory has some rules of its own:
//! 1. No references or slices to volatile memory (`&` or `&mut`).
//! 2. Access should always been done with a volatile read or write.
-//! The First rule is because having references of any kind to memory considered volatile would
-//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
-//! done concurrently without synchronization. With volatile access we know that the compiler has
-//! not reordered or elided the access.
+//! The First rule is because having references of any kind to memory considered volatile would
+//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
+//! done concurrently without synchronization. With volatile access we know that the compiler has
+//! not reordered or elided the access.
use std::cmp::min;
use std::io::{self, Read, Write};
@@ -31,7 +31,6 @@
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::sync::atomic::Ordering;
-use std::usize;
use crate::atomic_integer::AtomicInteger;
use crate::bitmap::{Bitmap, BitmapSlice, BS};
@@ -43,6 +42,7 @@
#[cfg(not(feature = "xen"))]
type MmapInfo = std::marker::PhantomData<()>;
+use crate::io::{ReadVolatile, WriteVolatile};
use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice};
/// `VolatileMemory` related errors.
@@ -301,6 +301,7 @@
struct Packed<T>(T);
/// A guard to perform mapping and protect unmapping of the memory.
+#[derive(Debug)]
pub struct PtrGuard {
addr: *mut u8,
len: usize,
@@ -346,6 +347,7 @@
}
/// A mutable guard to perform mapping and protect unmapping of the memory.
+#[derive(Debug)]
pub struct PtrGuardMut(PtrGuard);
#[allow(clippy::len_without_is_empty)]
@@ -682,7 +684,7 @@
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 4);
/// ```
- fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
+ fn write(&self, mut buf: &[u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
@@ -691,18 +693,10 @@
return Err(Error::OutOfBounds { addr });
}
- let total = buf.len().min(self.len() - addr);
- let dst = self.subslice(addr, total)?;
-
- // SAFETY:
- // We check above that `addr` is a valid offset within this volatile slice, and by
- // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
- // memory of length self.len(). Furthermore, both src and dst of the call to
- // copy_to_volatile_slice are valid for reads and writes respectively of length `total`
- // since total is the minimum of lengths of the memory areas pointed to. The areas do not
- // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
- // memory are possible without violating rust's aliasing rules).
- Ok(unsafe { copy_to_volatile_slice(&dst, buf.as_ptr(), total) })
+ // NOTE: the duality of read <-> write here is correct. This is because we translate a call
+ // "volatile_slice.write(buf)" (e.g. "write to volatile_slice from buf") into
+ // "buf.read_volatile(volatile_slice)" (e.g. read from buf into volatile_slice)
+ buf.read_volatile(&mut self.offset(addr)?)
}
/// # Examples
@@ -719,7 +713,7 @@
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 14);
/// ```
- fn read(&self, buf: &mut [u8], addr: usize) -> Result<usize> {
+ fn read(&self, mut buf: &mut [u8], addr: usize) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
@@ -728,18 +722,11 @@
return Err(Error::OutOfBounds { addr });
}
- let total = buf.len().min(self.len() - addr);
- let src = self.subslice(addr, total)?;
-
- // SAFETY:
- // We check above that `addr` is a valid offset within this volatile slice, and by
- // the invariants of `VolatileSlice::new`, this volatile slice points to contiguous
- // memory of length self.len(). Furthermore, both src and dst of the call to
- // copy_from_volatile_slice are valid for reads and writes respectively of length `total`
- // since total is the minimum of lengths of the memory areas pointed to. The areas do not
- // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest
- // memory are possible without violating rust's aliasing rules).
- unsafe { Ok(copy_from_volatile_slice(buf.as_mut_ptr(), &src, total)) }
+ // NOTE: The duality of read <-> write here is correct. This is because we translate a call
+ // volatile_slice.read(buf) (e.g. read from volatile_slice into buf) into
+ // "buf.write_volatile(volatile_slice)" (e.g. write into buf from volatile_slice)
+ // Both express data transfer from volatile_slice to buf.
+ buf.write_volatile(&self.offset(addr)?)
}
/// # Examples
@@ -1512,7 +1499,7 @@
addr & (!addr + 1)
}
-mod copy_slice_impl {
+pub(crate) mod copy_slice_impl {
use super::*;
// SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
@@ -1610,7 +1597,7 @@
///
/// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at
/// least length `total`. The regions must not overlap.
- pub(super) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
+ pub(crate) unsafe fn copy_from_volatile_slice<B: BitmapSlice>(
dst: *mut u8,
slice: &VolatileSlice<'_, B>,
total: usize,
@@ -1625,7 +1612,7 @@
///
/// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at
/// least length `total`. The regions must not overlap.
- pub(super) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
+ pub(crate) unsafe fn copy_to_volatile_slice<B: BitmapSlice>(
slice: &VolatileSlice<'_, B>,
src: *const u8,
total: usize,
@@ -1647,7 +1634,6 @@
use std::alloc::Layout;
use std::fs::File;
- use std::io::Cursor;
use std::mem::size_of_val;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -1655,6 +1641,7 @@
use std::thread::spawn;
use matches::assert_matches;
+ use std::num::NonZeroUsize;
use vmm_sys_util::tempfile::TempFile;
use crate::bitmap::tests::{
@@ -1662,6 +1649,8 @@
};
use crate::bitmap::{AtomicBitmap, RefSlice};
+ const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(0x1000) };
+
#[test]
fn test_display_error() {
assert_eq!(
@@ -1899,8 +1888,8 @@
assert!(slice.subslice(101, 0).is_err());
assert!(slice.subslice(101, 1).is_err());
- assert!(slice.subslice(std::usize::MAX, 2).is_err());
- assert!(slice.subslice(2, std::usize::MAX).is_err());
+ assert!(slice.subslice(usize::MAX, 2).is_err());
+ assert!(slice.subslice(2, usize::MAX).is_err());
let maybe_offset_slice = slice.subslice(10, 80);
assert!(maybe_offset_slice.is_ok());
@@ -2008,14 +1997,13 @@
#[test]
fn slice_overflow_error() {
- use std::usize::MAX;
let mut backing = vec![0u8];
let a = VolatileSlice::from(backing.as_mut_slice());
- let res = a.get_slice(MAX, 1).unwrap_err();
+ let res = a.get_slice(usize::MAX, 1).unwrap_err();
assert_matches!(
res,
Error::Overflow {
- base: MAX,
+ base: usize::MAX,
offset: 1,
}
);
@@ -2032,14 +2020,13 @@
#[test]
fn ref_overflow_error() {
- use std::usize::MAX;
let mut backing = vec![0u8];
let a = VolatileSlice::from(backing.as_mut_slice());
- let res = a.get_ref::<u8>(MAX).unwrap_err();
+ let res = a.get_ref::<u8>(usize::MAX).unwrap_err();
assert_matches!(
res,
Error::Overflow {
- base: MAX,
+ base: usize::MAX,
offset: 1,
}
);
@@ -2114,11 +2101,11 @@
let a = VolatileSlice::from(backing.as_mut_slice());
let s = a.as_volatile_slice();
assert!(s.write_obj(55u16, 4).is_err());
- assert!(s.write_obj(55u16, core::usize::MAX).is_err());
+ assert!(s.write_obj(55u16, usize::MAX).is_err());
assert!(s.write_obj(55u16, 2).is_ok());
assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
assert!(s.read_obj::<u16>(4).is_err());
- assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
+ assert!(s.read_obj::<u16>(usize::MAX).is_err());
}
#[test]
@@ -2132,16 +2119,15 @@
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
- assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
- assert!(s
- .read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
- .is_err());
- assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
+ assert!(file
+ .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_ok());
let mut f = TempFile::new().unwrap().into_file();
- assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
- format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
+ assert!(f
+ .read_exact_volatile(&mut s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_err());
let value = s.read_obj::<u32>(1).unwrap();
if cfg!(unix) {
@@ -2150,13 +2136,12 @@
assert_eq!(value, 0x0090_5a4d);
}
- let mut sink = Vec::new();
- assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
- assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
- assert!(s
- .write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
- .is_err());
- format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
+ let mut sink = vec![0; size_of::<u32>()];
+ assert!(sink
+ .as_mut_slice()
+ .write_all_volatile(&s.get_slice(1, size_of::<u32>()).unwrap())
+ .is_ok());
+
if cfg!(unix) {
assert_eq!(sink, vec![0; size_of::<u32>()]);
} else {
@@ -2190,16 +2175,15 @@
}
unsafe impl ByteValued for BytesToRead {}
let cursor_size = 20;
- let mut image = Cursor::new(vec![1u8; cursor_size]);
+ let image = vec![1u8; cursor_size];
- // Trying to read more bytes than we have available in the cursor should
- // make the read_from function return maximum cursor size (i.e. 20).
+ // Trying to read more bytes than we have space for in image
+ // make the read_from function return maximum vec size (i.e. 20).
let mut bytes_to_read = BytesToRead::default();
- let size_of_bytes = size_of_val(&bytes_to_read);
assert_eq!(
- bytes_to_read
- .as_bytes()
- .read_from(0, &mut image, size_of_bytes)
+ image
+ .as_slice()
+ .read_volatile(&mut bytes_to_read.as_bytes())
.unwrap(),
cursor_size
);
@@ -2314,14 +2298,13 @@
let val = 123u64;
let dirty_offset = 0x1000;
let dirty_len = size_of_val(&val);
- let page_size = 0x1000;
let len = 0x10000;
let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) };
// Invoke the `Bytes` test helper function.
{
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
test_bytes(
@@ -2337,18 +2320,18 @@
// Invoke the `VolatileMemory` test helper function.
{
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
test_volatile_memory(&slice);
}
- let bitmap = AtomicBitmap::new(len, page_size);
+ let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) };
- let bitmap2 = AtomicBitmap::new(len, page_size);
+ let bitmap2 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) };
- let bitmap3 = AtomicBitmap::new(len, page_size);
+ let bitmap3 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE);
let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) };
assert!(range_is_clean(slice.bitmap(), 0, slice.len()));
@@ -2404,9 +2387,8 @@
fn test_volatile_ref_dirty_tracking() {
let val = 123u64;
let mut buf = vec![val];
- let page_size = 0x1000;
- let bitmap = AtomicBitmap::new(size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(size_of_val(&val), DEFAULT_PAGE_SIZE);
let vref = unsafe {
VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None)
};
@@ -2416,8 +2398,11 @@
assert!(range_is_dirty(vref.bitmap(), 0, vref.len()));
}
- fn test_volatile_array_ref_copy_from_tracking<T>(buf: &mut [T], index: usize, page_size: usize)
- where
+ fn test_volatile_array_ref_copy_from_tracking<T>(
+ buf: &mut [T],
+ index: usize,
+ page_size: NonZeroUsize,
+ ) where
T: ByteValued + From<u8>,
{
let bitmap = AtomicBitmap::new(size_of_val(buf), page_size);
@@ -2444,14 +2429,13 @@
let dirty_len = size_of_val(&val);
let index = 0x1000;
let dirty_offset = dirty_len * index;
- let page_size = 0x1000;
let mut buf = vec![0u64; index + 1];
let mut byte_buf = vec![0u8; index + 1];
// Test `ref_at`.
{
- let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
@@ -2468,7 +2452,7 @@
// Test `store`.
{
- let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), page_size);
+ let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE);
let arr = unsafe {
VolatileArrayRef::with_bitmap(
buf.as_mut_ptr() as *mut u8,
@@ -2485,8 +2469,8 @@
}
// Test `copy_from` when size_of::<T>() == 1.
- test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, page_size);
+ test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, DEFAULT_PAGE_SIZE);
// Test `copy_from` when size_of::<T>() > 1.
- test_volatile_array_ref_copy_from_tracking(&mut buf, index, page_size);
+ test_volatile_array_ref_copy_from_tracking(&mut buf, index, DEFAULT_PAGE_SIZE);
}
}
diff --git a/pseudo_crate/Cargo.lock b/pseudo_crate/Cargo.lock
index 99d6b90..3544015 100644
--- a/pseudo_crate/Cargo.lock
+++ b/pseudo_crate/Cargo.lock
@@ -447,10 +447,10 @@
"vhost-user-backend",
"virtio-bindings",
"virtio-drivers",
- "virtio-queue 0.11.0",
+ "virtio-queue 0.14.0",
"virtio-vsock",
- "vm-memory 0.12.2",
- "vmm-sys-util 0.12.1",
+ "vm-memory 0.16.1",
+ "vmm-sys-util",
"vsock",
"vsprintf",
"vulkano",
@@ -5605,9 +5605,25 @@
[[package]]
name = "uuid"
-version = "1.7.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a"
+checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
+dependencies = [
+ "getrandom",
+ "rand",
+ "uuid-macro-internal",
+]
+
+[[package]]
+name = "uuid-macro-internal"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b91f57fe13a38d0ce9e28a03463d8d3c2468ed03d75375110ec71d93b449a08"
+dependencies = [
+ "proc-macro2 1.0.75",
+ "quote 1.0.36",
+ "syn 2.0.58",
+]
[[package]]
name = "valuable"
@@ -5635,36 +5651,37 @@
[[package]]
name = "vhost"
-version = "0.8.1"
+version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61957aeb36daf0b00b87fff9c10dd28a161bd35ab157553d340d183b3d8756e6"
+checksum = "bce0aad4d8776cb64f1ac591e908a561c50ba6adac4416296efee590b155623f"
dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.6.0",
"libc",
- "vm-memory 0.12.2",
- "vmm-sys-util 0.11.2",
+ "uuid",
+ "vm-memory 0.16.1",
+ "vmm-sys-util",
]
[[package]]
name = "vhost-user-backend"
-version = "0.10.1"
+version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab069cdedaf18a0673766eb0a07a0f4ee3ed1b8e17fbfe4aafe5b988e2de1d01"
+checksum = "daa03d476437d005abd2dec0970c468ed2a692e6a0604b834699680e171de942"
dependencies = [
"libc",
"log",
"vhost",
"virtio-bindings",
- "virtio-queue 0.9.0",
- "vm-memory 0.12.2",
- "vmm-sys-util 0.11.2",
+ "virtio-queue 0.14.0",
+ "vm-memory 0.16.1",
+ "vmm-sys-util",
]
[[package]]
name = "virtio-bindings"
-version = "0.2.2"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "878bcb1b2812a10c30d53b0ed054999de3d98f25ece91fc173973f9c57aaae86"
+checksum = "1711e61c00f8cb450bd15368152a1e37a12ef195008ddc7d0f4812f9e2b30a68"
[[package]]
name = "virtio-drivers"
@@ -5679,18 +5696,6 @@
[[package]]
name = "virtio-queue"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35aca00da06841bd99162c381ec65893cace23ca0fb89254302cfe4bec4c300f"
-dependencies = [
- "log",
- "virtio-bindings",
- "vm-memory 0.12.2",
- "vmm-sys-util 0.11.2",
-]
-
-[[package]]
-name = "virtio-queue"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3f69a13d6610db9312acbb438b0390362af905d37634a2106be70c0f734986d"
@@ -5698,7 +5703,19 @@
"log",
"virtio-bindings",
"vm-memory 0.14.1",
- "vmm-sys-util 0.12.1",
+ "vmm-sys-util",
+]
+
+[[package]]
+name = "virtio-queue"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "872e2f3fbd70a7e6f01689720cce3d5c2c5efe52b484dd07b674246ada0e9a8d"
+dependencies = [
+ "log",
+ "virtio-bindings",
+ "vm-memory 0.16.1",
+ "vmm-sys-util",
]
[[package]]
@@ -5723,18 +5740,6 @@
[[package]]
name = "vm-memory"
-version = "0.12.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9dc276f0d00c17b9aeb584da0f1e1c673df0d183cc2539e3636ec8cbc5eae99b"
-dependencies = [
- "arc-swap",
- "libc",
- "thiserror",
- "winapi",
-]
-
-[[package]]
-name = "vm-memory"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c3aba5064cc5f6f7740cddc8dae34d2d9a311cac69b60d942af7f3ab8fc49f4"
@@ -5745,13 +5750,15 @@
]
[[package]]
-name = "vmm-sys-util"
-version = "0.11.2"
+name = "vm-memory"
+version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48b7b084231214f7427041e4220d77dfe726897a6d41fddee450696e66ff2a29"
+checksum = "f1720e7240cdc739f935456eb77f370d7e9b2a3909204da1e2b47bef1137a013"
dependencies = [
- "bitflags 1.3.2",
+ "arc-swap",
"libc",
+ "thiserror",
+ "winapi",
]
[[package]]
diff --git a/pseudo_crate/Cargo.toml b/pseudo_crate/Cargo.toml
index 27eed76..0e62591 100644
--- a/pseudo_crate/Cargo.toml
+++ b/pseudo_crate/Cargo.toml
@@ -358,14 +358,14 @@
userfaultfd = "=0.7.0"
userfaultfd-sys = "=0.5.0"
utf-8 = "=0.7.6"
-uuid = "=1.7.0"
-vhost = "=0.8.1"
-vhost-user-backend = "=0.10.1"
-virtio-bindings = "=0.2.2"
+uuid = "=1.11.0"
+vhost = "=0.13.0"
+vhost-user-backend = "=0.17.0"
+virtio-bindings = "=0.2.4"
virtio-drivers = "=0.7.4"
-virtio-queue = "=0.11.0"
+virtio-queue = "=0.14.0"
virtio-vsock = "=0.5.0"
-vm-memory = "=0.12.2"
+vm-memory = "=0.16.1"
vmm-sys-util = "=0.12.1"
vsock = "=0.5.0"
vsprintf = "=2.0.0"