Merge "Revert "Upgrade to android13-5.15 kernel""
diff --git a/apex/sign_virt_apex.py b/apex/sign_virt_apex.py
index a1e81d2..1c0714e 100644
--- a/apex/sign_virt_apex.py
+++ b/apex/sign_virt_apex.py
@@ -104,7 +104,7 @@
         help='the directory having files to be packaged')
     args = parser.parse_args(argv)
     # preprocess --key_override into a map
-    args.key_overrides = dict()
+    args.key_overrides = {}
     if args.key_override:
         for pair in args.key_override:
             name, key = pair.split('=')
@@ -159,7 +159,7 @@
         - a list of descriptors.
     """
     if not os.path.exists(image_path):
-        raise ValueError('Failed to find image: {}'.format(image_path))
+        raise ValueError(f'Failed to find image: {image_path}')
 
     output, ret_code = RunCommand(
         args, ['avbtool', 'info_image', '--image', image_path], expected_return_values={0, 1})
@@ -283,8 +283,7 @@
                 part_key = chained_partitions[part_name]
                 avbpubkey = os.path.join(work_dir, part_name + '.avbpubkey')
                 ExtractAvbPubkey(args, part_key, avbpubkey)
-                cmd.extend(['--chain_partition', '%s:%s:%s' %
-                           (part_name, ril, avbpubkey)])
+                cmd.extend(['--chain_partition', f'{part_name}:{ril}:{avbpubkey}'])
 
         if args.signing_args:
             cmd.extend(shlex.split(args.signing_args))
@@ -292,7 +291,7 @@
         RunCommand(args, cmd)
         # libavb expects to be able to read the maximum vbmeta size, so we must provide a partition
         # which matches this or the read will fail.
-        with open(vbmeta_img, 'a') as f:
+        with open(vbmeta_img, 'a', encoding='utf8') as f:
             f.truncate(65536)
 
 
@@ -311,9 +310,8 @@
             tmp_img = os.path.join(work_dir, part)
             RunCommand(args, ['img2simg', img, tmp_img])
 
-            image_arg = '--image=%s=%s' % (part, img)
-            partition_arg = '--partition=%s:readonly:%d:default' % (
-                part, os.path.getsize(img))
+            image_arg = f'--image={part}={img}'
+            partition_arg = f'--partition={part}:readonly:{os.path.getsize(img)}:default'
             cmd.extend([image_arg, partition_arg])
 
         RunCommand(args, cmd)
@@ -448,15 +446,15 @@
             return f.read()
 
     def check_equals_pubkey(file):
-        assert contents(file) == pubkey, 'pubkey mismatch: %s' % file
+        assert contents(file) == pubkey, f'pubkey mismatch: {file}'
 
     def check_contains_pubkey(file):
-        assert contents(file).find(pubkey) != -1, 'pubkey missing: %s' % file
+        assert contents(file).find(pubkey) != -1, f'pubkey missing: {file}'
 
     def check_avb_pubkey(file):
         info, _ = AvbInfo(args, file)
-        assert info is not None, 'no avbinfo: %s' % file
-        assert info['Public key (sha1)'] == pubkey_digest, 'pubkey mismatch: %s' % file
+        assert info is not None, f'no avbinfo: {file}'
+        assert info['Public key (sha1)'] == pubkey_digest, f'pubkey mismatch: {file}'
 
     for f in files.values():
         if f == files['bootloader.pubkey']:
diff --git a/authfs/Android.bp b/authfs/Android.bp
index 935ed5c..84eb0f4 100644
--- a/authfs/Android.bp
+++ b/authfs/Android.bp
@@ -13,7 +13,6 @@
         "authfs_aidl_interface-rust",
         "libandroid_logger",
         "libanyhow",
-        "libauthfs_crypto_bindgen",
         "libauthfs_fsverity_metadata",
         "libbinder_rpc_unstable_bindgen",
         "libbinder_rs",
@@ -23,6 +22,7 @@
         "liblibc",
         "liblog_rust",
         "libnix",
+        "libopenssl",
         "libprotobuf",
         "libstructopt",
         "libthiserror",
@@ -34,26 +34,11 @@
         },
     },
     shared_libs: [
-        "libcrypto",
         "libbinder_rpc_unstable",
     ],
     defaults: ["crosvm_defaults"],
 }
 
-// TODO(b/172687320): remove once there is a canonical bindgen.
-rust_bindgen {
-    name: "libauthfs_crypto_bindgen",
-    wrapper_src: "src/crypto.hpp",
-    crate_name: "authfs_crypto_bindgen",
-    source_stem: "bindings",
-    shared_libs: [
-        "libcrypto",
-    ],
-    bindgen_flags: ["--size_t-is-usize"],
-    cflags: ["-D BORINGSSL_NO_CXX"],
-    apex_available: ["com.android.virt"],
-}
-
 rust_binary {
     name: "authfs",
     defaults: ["authfs_defaults"],
@@ -80,13 +65,3 @@
         "testdata/input.4m.fsv_meta.bad_merkle",
     ],
 }
-
-rust_test {
-    name: "libauthfs_crypto_bindgen_test",
-    srcs: [":libauthfs_crypto_bindgen"],
-    crate_name: "authfs_crypto_bindgen_test",
-    test_suites: ["general-tests"],
-    auto_gen_config: true,
-    clippy_lints: "none",
-    lints: "none",
-}
diff --git a/authfs/src/crypto.hpp b/authfs/src/crypto.hpp
deleted file mode 100644
index 58b0bd3..0000000
--- a/authfs/src/crypto.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AUTHFS_OPENSSL_WRAPPER_H
-#define AUTHFS_OPENSSL_WRAPPER_H
-
-#include <openssl/sha.h>
-
-#endif  // AUTHFS_OPENSSL_WRAPPER_H
diff --git a/authfs/src/crypto.rs b/authfs/src/crypto.rs
deleted file mode 100644
index 672dfb6..0000000
--- a/authfs/src/crypto.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-use std::mem::MaybeUninit;
-
-use thiserror::Error;
-
-#[derive(Error, Debug)]
-pub enum CryptoError {
-    #[error("Unexpected error returned from {0}")]
-    Unexpected(&'static str),
-}
-
-use authfs_crypto_bindgen::{SHA256_Final, SHA256_Init, SHA256_Update, SHA256_CTX};
-
-pub type Sha256Hash = [u8; Sha256Hasher::HASH_SIZE];
-
-pub struct Sha256Hasher {
-    ctx: SHA256_CTX,
-}
-
-impl Sha256Hasher {
-    pub const HASH_SIZE: usize = 32;
-
-    pub const HASH_OF_4096_ZEROS: [u8; Self::HASH_SIZE] = [
-        0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b,
-        0x02, 0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89,
-        0x2c, 0xa7,
-    ];
-
-    pub fn new() -> Result<Sha256Hasher, CryptoError> {
-        // Safe assuming the crypto FFI should initialize the uninitialized `ctx`, which is
-        // currently a pure data struct.
-        unsafe {
-            let mut ctx = MaybeUninit::uninit();
-            if SHA256_Init(ctx.as_mut_ptr()) == 0 {
-                Err(CryptoError::Unexpected("SHA256_Init"))
-            } else {
-                Ok(Sha256Hasher { ctx: ctx.assume_init() })
-            }
-        }
-    }
-
-    pub fn update(&mut self, data: &[u8]) -> Result<&mut Self, CryptoError> {
-        // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
-        let retval = unsafe {
-            SHA256_Update(&mut self.ctx, data.as_ptr() as *mut std::ffi::c_void, data.len())
-        };
-        if retval == 0 {
-            Err(CryptoError::Unexpected("SHA256_Update"))
-        } else {
-            Ok(self)
-        }
-    }
-
-    pub fn update_from<I, T>(&mut self, iter: I) -> Result<&mut Self, CryptoError>
-    where
-        I: IntoIterator<Item = T>,
-        T: AsRef<[u8]>,
-    {
-        for data in iter {
-            self.update(data.as_ref())?;
-        }
-        Ok(self)
-    }
-
-    pub fn finalize(&mut self) -> Result<[u8; Self::HASH_SIZE], CryptoError> {
-        let mut md = [0u8; Self::HASH_SIZE];
-        // Safe assuming the crypto FFI will not touch beyond `ctx` as pure data.
-        let retval = unsafe { SHA256_Final(md.as_mut_ptr(), &mut self.ctx) };
-        if retval == 0 {
-            Err(CryptoError::Unexpected("SHA256_Final"))
-        } else {
-            Ok(md)
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    fn to_hex_string(data: &[u8]) -> String {
-        data.iter().map(|&b| format!("{:02x}", b)).collect()
-    }
-
-    #[test]
-    fn verify_hash_values() -> Result<(), CryptoError> {
-        let hash = Sha256Hasher::new()?.update(&[0; 0])?.finalize()?;
-        let s: String = to_hex_string(&hash);
-        assert_eq!(s, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
-
-        let hash = Sha256Hasher::new()?
-            .update(&[1u8; 1])?
-            .update(&[2u8; 1])?
-            .update(&[3u8; 1])?
-            .finalize()?;
-        let s: String = to_hex_string(&hash);
-        assert_eq!(s, "039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81");
-        Ok(())
-    }
-
-    #[test]
-    fn sha256_of_4096_zeros() -> Result<(), CryptoError> {
-        let hash = Sha256Hasher::new()?.update(&[0u8; 4096])?.finalize()?;
-        assert_eq!(hash, Sha256Hasher::HASH_OF_4096_ZEROS);
-        Ok(())
-    }
-}
diff --git a/authfs/src/fsverity/builder.rs b/authfs/src/fsverity/builder.rs
index fda47bc..8585fdf 100644
--- a/authfs/src/fsverity/builder.rs
+++ b/authfs/src/fsverity/builder.rs
@@ -14,13 +14,20 @@
  * limitations under the License.
  */
 
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{
+    build_fsverity_digest, merkle_tree_height, FsverityError, Sha256Hash, SHA256_HASH_SIZE,
+};
 use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
 
-const HASH_SIZE: usize = Sha256Hasher::HASH_SIZE;
+const HASH_SIZE: usize = SHA256_HASH_SIZE;
 const HASH_PER_PAGE: usize = CHUNK_SIZE as usize / HASH_SIZE;
 
+const HASH_OF_4096_ZEROS: Sha256Hash = [
+    0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9, 0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
+    0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a, 0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
+];
+
 /// MerkleLeaves can be used by the class' customer for bookkeeping integrity data for their bytes.
 /// It can also be used to generate the standard fs-verity digest for the source data.
 ///
@@ -34,12 +41,17 @@
     file_size: u64,
 }
 
-fn hash_all_pages(source: &[Sha256Hash]) -> Result<Vec<Sha256Hash>, CryptoError> {
+fn hash_all_pages(source: &[Sha256Hash]) -> Vec<Sha256Hash> {
     source
         .chunks(HASH_PER_PAGE)
         .map(|chunk| {
             let padding_bytes = (HASH_PER_PAGE - chunk.len()) * HASH_SIZE;
-            Sha256Hasher::new()?.update_from(chunk)?.update(&vec![0u8; padding_bytes])?.finalize()
+            let mut ctx = Sha256::new();
+            for data in chunk {
+                ctx.update(data.as_ref());
+            }
+            ctx.update(&vec![0u8; padding_bytes]);
+            ctx.finish()
         })
         .collect()
 }
@@ -64,7 +76,7 @@
     pub fn resize(&mut self, new_file_size: usize) {
         let new_file_size = new_file_size as u64;
         let leaves_size = divide_roundup(new_file_size, CHUNK_SIZE);
-        self.leaves.resize(leaves_size as usize, Sha256Hasher::HASH_OF_4096_ZEROS);
+        self.leaves.resize(leaves_size as usize, HASH_OF_4096_ZEROS);
         self.file_size = new_file_size;
     }
 
@@ -75,7 +87,7 @@
         if self.leaves.len() < index + 1 {
             // When resizing, fill in hash of zeros by default. This makes it easy to handle holes
             // in a file.
-            self.leaves.resize(index + 1, Sha256Hasher::HASH_OF_4096_ZEROS);
+            self.leaves.resize(index + 1, HASH_OF_4096_ZEROS);
         }
         self.leaves[index].clone_from_slice(hash);
 
@@ -116,9 +128,8 @@
 
                 // `leaves` is owned and can't be the initial state below. Here we manually hash it
                 // first to avoid a copy and to get the type right.
-                let second_level = hash_all_pages(&self.leaves)?;
-                let hashes =
-                    (1..=level).try_fold(second_level, |source, _| hash_all_pages(&source))?;
+                let second_level = hash_all_pages(&self.leaves);
+                let hashes = (1..=level).fold(second_level, |source, _| hash_all_pages(&source));
                 if hashes.len() != 1 {
                     Err(FsverityError::InvalidState)
                 } else {
@@ -131,7 +142,7 @@
     /// Returns the fs-verity digest based on the current tree and file size.
     pub fn calculate_fsverity_digest(&self) -> Result<Sha256Hash, FsverityError> {
         let root_hash = self.calculate_root_hash()?;
-        Ok(build_fsverity_digest(&root_hash, self.file_size)?)
+        Ok(build_fsverity_digest(&root_hash, self.file_size))
     }
 }
 
@@ -143,6 +154,7 @@
     //  $ fsverity digest foo
     use super::*;
     use anyhow::Result;
+    use openssl::sha::sha256;
 
     #[test]
     fn merkle_tree_empty_file() -> Result<()> {
@@ -194,7 +206,7 @@
     #[test]
     fn merkle_tree_non_sequential() -> Result<()> {
         let mut tree = MerkleLeaves::new();
-        let hash = Sha256Hasher::new()?.update(&vec![1u8; CHUNK_SIZE as usize])?.finalize()?;
+        let hash = sha256(&vec![1u8; CHUNK_SIZE as usize]);
 
         // Update hashes of 4 1-blocks.
         tree.update_hash(1, &hash, CHUNK_SIZE * 2);
@@ -221,8 +233,8 @@
         assert!(tree.is_index_valid(1));
         assert!(tree.is_index_valid(2));
         assert!(!tree.is_index_valid(3));
-        assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
-        assert!(tree.is_consistent(2, &Sha256Hasher::HASH_OF_4096_ZEROS));
+        assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
+        assert!(tree.is_consistent(2, &HASH_OF_4096_ZEROS));
         Ok(())
     }
 
@@ -240,17 +252,17 @@
         assert!(!tree.is_index_valid(2));
         // The second chunk is a hole and full of zero. When shrunk, with zero padding, the hash
         // happens to be consistent to a full-zero chunk.
-        assert!(tree.is_consistent(1, &Sha256Hasher::HASH_OF_4096_ZEROS));
+        assert!(tree.is_consistent(1, &HASH_OF_4096_ZEROS));
         Ok(())
     }
 
     fn generate_fsverity_digest_sequentially(test_data: &[u8]) -> Result<Sha256Hash> {
         let mut tree = MerkleLeaves::new();
         for (index, chunk) in test_data.chunks(CHUNK_SIZE as usize).enumerate() {
-            let hash = Sha256Hasher::new()?
-                .update(chunk)?
-                .update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()])?
-                .finalize()?;
+            let mut ctx = Sha256::new();
+            ctx.update(chunk);
+            ctx.update(&vec![0u8; CHUNK_SIZE as usize - chunk.len()]);
+            let hash = ctx.finish();
 
             tree.update_hash(index, &hash, CHUNK_SIZE * index as u64 + chunk.len() as u64);
         }
diff --git a/authfs/src/fsverity/common.rs b/authfs/src/fsverity/common.rs
index eba379d..cb268ef 100644
--- a/authfs/src/fsverity/common.rs
+++ b/authfs/src/fsverity/common.rs
@@ -20,7 +20,13 @@
 
 use super::sys::{FS_VERITY_HASH_ALG_SHA256, FS_VERITY_LOG_BLOCKSIZE, FS_VERITY_VERSION};
 use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
+use openssl::sha::Sha256;
+
+/// Output size of SHA-256 in bytes.
+pub const SHA256_HASH_SIZE: usize = 32;
+
+/// A SHA-256 hash.
+pub type Sha256Hash = [u8; SHA256_HASH_SIZE];
 
 #[derive(Error, Debug)]
 pub enum FsverityError {
@@ -32,8 +38,6 @@
     CannotVerify,
     #[error("I/O error")]
     Io(#[from] io::Error),
-    #[error("Crypto")]
-    UnexpectedCryptoError(#[from] CryptoError),
     #[error("Invalid state")]
     InvalidState,
 }
@@ -47,7 +51,7 @@
 
 /// Return the Merkle tree height for our tree configuration, or None if the size is 0.
 pub fn merkle_tree_height(data_size: u64) -> Option<u64> {
-    let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+    let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
     let hash_pages = divide_roundup(data_size, hashes_per_node * CHUNK_SIZE);
     log128_ceil(hash_pages)
 }
@@ -56,7 +60,7 @@
 pub fn merkle_tree_size(mut data_size: u64) -> u64 {
     let mut total = 0;
     while data_size > CHUNK_SIZE {
-        let hash_size = divide_roundup(data_size, CHUNK_SIZE) * Sha256Hasher::HASH_SIZE as u64;
+        let hash_size = divide_roundup(data_size, CHUNK_SIZE) * SHA256_HASH_SIZE as u64;
         let hash_storage_size = divide_roundup(hash_size, CHUNK_SIZE) * CHUNK_SIZE;
         total += hash_storage_size;
         data_size = hash_storage_size;
@@ -64,28 +68,25 @@
     total
 }
 
-pub fn build_fsverity_digest(
-    root_hash: &Sha256Hash,
-    file_size: u64,
-) -> Result<Sha256Hash, CryptoError> {
+pub fn build_fsverity_digest(root_hash: &Sha256Hash, file_size: u64) -> Sha256Hash {
     // Little-endian byte representation of fsverity_descriptor from linux/fsverity.h
     // Not FFI-ed as it seems easier to deal with the raw bytes manually.
-    Sha256Hasher::new()?
-        .update(&FS_VERITY_VERSION.to_le_bytes())? // version
-        .update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes())? // hash_algorithm
-        .update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes())? // log_blocksize
-        .update(&0u8.to_le_bytes())? // salt_size
-        .update(&0u32.to_le_bytes())? // sig_size
-        .update(&file_size.to_le_bytes())? // data_size
-        .update(root_hash)? // root_hash, first 32 bytes
-        .update(&[0u8; 32])? // root_hash, last 32 bytes, always 0 because we are using sha256.
-        .update(&[0u8; 32])? // salt
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 32])? // reserved
-        .update(&[0u8; 16])? // reserved
-        .finalize()
+    let mut hash = Sha256::new();
+    hash.update(&FS_VERITY_VERSION.to_le_bytes()); // version
+    hash.update(&FS_VERITY_HASH_ALG_SHA256.to_le_bytes()); // hash_algorithm
+    hash.update(&FS_VERITY_LOG_BLOCKSIZE.to_le_bytes()); // log_blocksize
+    hash.update(&0u8.to_le_bytes()); // salt_size
+    hash.update(&0u32.to_le_bytes()); // sig_size
+    hash.update(&file_size.to_le_bytes()); // data_size
+    hash.update(root_hash); // root_hash, first 32 bytes
+    hash.update(&[0u8; 32]); // root_hash, last 32 bytes, always 0 because we are using sha256.
+    hash.update(&[0u8; 32]); // salt
+    hash.update(&[0u8; 32]); // reserved
+    hash.update(&[0u8; 32]); // reserved
+    hash.update(&[0u8; 32]); // reserved
+    hash.update(&[0u8; 32]); // reserved
+    hash.update(&[0u8; 16]); // reserved
+    hash.finish()
 }
 
 #[cfg(test)]
diff --git a/authfs/src/fsverity/editor.rs b/authfs/src/fsverity/editor.rs
index 857c6d9..1e298be 100644
--- a/authfs/src/fsverity/editor.rs
+++ b/authfs/src/fsverity/editor.rs
@@ -56,17 +56,10 @@
 use std::sync::{Arc, RwLock};
 
 use super::builder::MerkleLeaves;
+use super::common::{Sha256Hash, SHA256_HASH_SIZE};
 use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
 use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
-
-// Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
-// mapping below.
-impl From<CryptoError> for io::Error {
-    fn from(error: CryptoError) -> Self {
-        io::Error::new(io::ErrorKind::Other, error)
-    }
-}
+use openssl::sha::{sha256, Sha256};
 
 fn debug_assert_usize_is_u64() {
     // Since we don't need to support 32-bit CPU, make an assert to make conversion between
@@ -90,7 +83,7 @@
 
     /// Returns the fs-verity digest size in bytes.
     pub fn get_fsverity_digest_size(&self) -> usize {
-        Sha256Hasher::HASH_SIZE
+        SHA256_HASH_SIZE
     }
 
     /// Calculates the fs-verity digest of the current file.
@@ -119,7 +112,7 @@
             let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
 
             // Ensure the returned buffer matches the known hash.
-            let hash = Sha256Hasher::new()?.update(buf)?.finalize()?;
+            let hash = sha256(buf);
             if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
                 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
             }
@@ -147,17 +140,17 @@
             self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
 
             // Verify original content
-            let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
+            let hash = sha256(&orig_data);
             if !merkle_tree.is_consistent(output_chunk_index, &hash) {
                 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
             }
         }
 
-        Ok(Sha256Hasher::new()?
-            .update(&orig_data[..offset_from_alignment])?
-            .update(source)?
-            .update(&orig_data[offset_from_alignment + source.len()..])?
-            .finalize()?)
+        let mut ctx = Sha256::new();
+        ctx.update(&orig_data[..offset_from_alignment]);
+        ctx.update(source);
+        ctx.update(&orig_data[offset_from_alignment + source.len()..]);
+        Ok(ctx.finish())
     }
 
     fn new_chunk_hash(
@@ -171,7 +164,7 @@
         if current_size as u64 == CHUNK_SIZE {
             // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
             // write location.
-            Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
+            Ok(sha256(source))
         } else {
             // Case 2: For an incomplete write, calculate the hash based on previous data (if
             // any).
@@ -273,10 +266,10 @@
                 debug_assert!(new_tail_size <= s);
 
                 let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
-                let new_hash = Sha256Hasher::new()?
-                    .update(&buf[..new_tail_size])?
-                    .update(&zeros)?
-                    .finalize()?;
+                let mut ctx = Sha256::new();
+                ctx.update(&buf[..new_tail_size]);
+                ctx.update(&zeros);
+                let new_hash = ctx.finish();
                 merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
             }
         }
@@ -519,7 +512,7 @@
         // detects the inconsistent read.
         {
             let mut merkle_tree = file.merkle_tree.write().unwrap();
-            let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+            let overriding_hash = [42; SHA256_HASH_SIZE];
             merkle_tree.update_hash(0, &overriding_hash, 8192);
         }
         assert!(file.write_at(&[1; 1], 2048).is_err());
@@ -532,7 +525,7 @@
         // resumed write will fail since no bytes can be written due to the same inconsistency.
         {
             let mut merkle_tree = file.merkle_tree.write().unwrap();
-            let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
+            let overriding_hash = [42; SHA256_HASH_SIZE];
             merkle_tree.update_hash(1, &overriding_hash, 8192);
         }
         assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
diff --git a/authfs/src/fsverity/metadata/Android.bp b/authfs/src/fsverity/metadata/Android.bp
index af3729f..c988884 100644
--- a/authfs/src/fsverity/metadata/Android.bp
+++ b/authfs/src/fsverity/metadata/Android.bp
@@ -18,7 +18,7 @@
     ],
     rustlibs: [
         "libauthfs_fsverity_metadata_bindgen",
-        "libring",
+        "libopenssl",
     ],
     edition: "2018",
     apex_available: ["com.android.virt"],
diff --git a/authfs/src/fsverity/metadata/metadata.rs b/authfs/src/fsverity/metadata/metadata.rs
index 8bc0617..54d0145 100644
--- a/authfs/src/fsverity/metadata/metadata.rs
+++ b/authfs/src/fsverity/metadata/metadata.rs
@@ -20,7 +20,7 @@
     FSVERITY_SIGNATURE_TYPE_NONE, FSVERITY_SIGNATURE_TYPE_PKCS7, FSVERITY_SIGNATURE_TYPE_RAW,
 };
 
-use ring::digest::{Context, SHA256};
+use openssl::sha::sha256;
 use std::cmp::min;
 use std::ffi::OsString;
 use std::fs::File;
@@ -96,14 +96,11 @@
 
         // Digest needs to be calculated with the raw value (without changing the endianness).
         let digest = match header.descriptor.hash_algorithm {
-            FSVERITY_HASH_ALG_SHA256 => {
-                let mut context = Context::new(&SHA256);
-                context.update(
-                    &back_buffer
-                        [DESCRIPTOR_OFFSET..DESCRIPTOR_OFFSET + size_of::<fsverity_descriptor>()],
-                );
-                Ok(context.finish().as_ref().to_owned())
-            }
+            FSVERITY_HASH_ALG_SHA256 => Ok(sha256(
+                &back_buffer
+                    [DESCRIPTOR_OFFSET..DESCRIPTOR_OFFSET + size_of::<fsverity_descriptor>()],
+            )
+            .to_vec()),
             alg => Err(io::Error::new(
                 io::ErrorKind::Other,
                 format!("Unsupported hash algorithm {}, continue (likely failing soon)", alg),
diff --git a/authfs/src/fsverity/verifier.rs b/authfs/src/fsverity/verifier.rs
index aaf4bf7..1434b7e 100644
--- a/authfs/src/fsverity/verifier.rs
+++ b/authfs/src/fsverity/verifier.rs
@@ -17,18 +17,21 @@
 use libc::EIO;
 use std::io;
 
-use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError};
+use super::common::{build_fsverity_digest, merkle_tree_height, FsverityError, SHA256_HASH_SIZE};
 use crate::common::{divide_roundup, CHUNK_SIZE};
-use crate::crypto::{CryptoError, Sha256Hasher};
 use crate::file::{ChunkBuffer, ReadByChunk};
+use openssl::sha::{sha256, Sha256};
 
 const ZEROS: [u8; CHUNK_SIZE as usize] = [0u8; CHUNK_SIZE as usize];
 
-type HashBuffer = [u8; Sha256Hasher::HASH_SIZE];
+type HashBuffer = [u8; SHA256_HASH_SIZE];
 
-fn hash_with_padding(chunk: &[u8], pad_to: usize) -> Result<HashBuffer, CryptoError> {
+fn hash_with_padding(chunk: &[u8], pad_to: usize) -> HashBuffer {
     let padding_size = pad_to - chunk.len();
-    Sha256Hasher::new()?.update(chunk)?.update(&ZEROS[..padding_size])?.finalize()
+    let mut ctx = Sha256::new();
+    ctx.update(chunk);
+    ctx.update(&ZEROS[..padding_size]);
+    ctx.finish()
 }
 
 fn verity_check<T: ReadByChunk>(
@@ -42,7 +45,7 @@
     // beyond the file size, including empty file.
     assert_ne!(file_size, 0);
 
-    let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize)?;
+    let chunk_hash = hash_with_padding(chunk, CHUNK_SIZE as usize);
 
     // When the file is smaller or equal to CHUNK_SIZE, the root of Merkle tree is defined as the
     // hash of the file content, plus padding.
@@ -55,11 +58,11 @@
         |actual_hash, result| {
             let (merkle_chunk, hash_offset_in_chunk) = result?;
             let expected_hash =
-                &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + Sha256Hasher::HASH_SIZE];
+                &merkle_chunk[hash_offset_in_chunk..hash_offset_in_chunk + SHA256_HASH_SIZE];
             if actual_hash != expected_hash {
                 return Err(FsverityError::CannotVerify);
             }
-            Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize)?)
+            Ok(hash_with_padding(&merkle_chunk, CHUNK_SIZE as usize))
         },
     )
 }
@@ -74,7 +77,7 @@
     file_size: u64,
     merkle_tree: &T,
 ) -> Result<impl Iterator<Item = Result<([u8; 4096], usize), FsverityError>> + '_, FsverityError> {
-    let hashes_per_node = CHUNK_SIZE / Sha256Hasher::HASH_SIZE as u64;
+    let hashes_per_node = CHUNK_SIZE / SHA256_HASH_SIZE as u64;
     debug_assert_eq!(hashes_per_node, 128u64);
     let max_level = merkle_tree_height(file_size).expect("file should not be empty") as u32;
     let root_to_leaf_steps = (0..=max_level)
@@ -85,7 +88,7 @@
             let leaves_size_per_node = leaves_size_per_hash * hashes_per_node;
             let nodes_at_level = divide_roundup(file_size, leaves_size_per_node);
             let level_size = nodes_at_level * CHUNK_SIZE;
-            let offset_in_level = (chunk_index / leaves_per_hash) * Sha256Hasher::HASH_SIZE as u64;
+            let offset_in_level = (chunk_index / leaves_per_hash) * SHA256_HASH_SIZE as u64;
             (level_size, offset_in_level)
         })
         .scan(0, |level_offset, (level_size, offset_in_level)| {
@@ -135,8 +138,8 @@
                 return Err(FsverityError::InsufficientData(size));
             }
         }
-        let root_hash = Sha256Hasher::new()?.update(&buf[..])?.finalize()?;
-        if expected_digest == build_fsverity_digest(&root_hash, file_size)? {
+        let root_hash = sha256(&buf[..]);
+        if expected_digest == build_fsverity_digest(&root_hash, file_size) {
             // Once verified, use the root_hash for verification going forward.
             Ok(VerifiedFileReader { chunked_file, file_size, merkle_tree, root_hash })
         } else {
diff --git a/authfs/src/main.rs b/authfs/src/main.rs
index 60318e8..c09ed71 100644
--- a/authfs/src/main.rs
+++ b/authfs/src/main.rs
@@ -42,7 +42,6 @@
 use structopt::StructOpt;
 
 mod common;
-mod crypto;
 mod file;
 mod fsstat;
 mod fsverity;
diff --git a/compos/common/Android.bp b/compos/common/Android.bp
index 39e7c0a..51f97f8 100644
--- a/compos/common/Android.bp
+++ b/compos/common/Android.bp
@@ -13,10 +13,10 @@
         "libanyhow",
         "libbinder_common",
         "libbinder_rpc_unstable_bindgen",
-        "libbinder_rs",
         "liblog_rust",
         "libnum_traits",
         "librustutils",
+        "libvmclient",
     ],
     proc_macros: ["libnum_derive"],
     shared_libs: [
diff --git a/compos/common/binder.rs b/compos/common/binder.rs
index 4935b80..45139f3 100644
--- a/compos/common/binder.rs
+++ b/compos/common/binder.rs
@@ -16,7 +16,7 @@
 
 //! Helper for converting Error types to what Binder expects
 
-use binder::{ExceptionCode, Result as BinderResult};
+use android_system_virtualizationservice::binder::{ExceptionCode, Result as BinderResult};
 use binder_common::new_binder_exception;
 use log::warn;
 use std::fmt::Debug;
diff --git a/compos/common/compos_client.rs b/compos/common/compos_client.rs
index 839280c..fe31b27 100644
--- a/compos/common/compos_client.rs
+++ b/compos/common/compos_client.rs
@@ -20,39 +20,27 @@
 use crate::{COMPOS_APEX_ROOT, COMPOS_DATA_ROOT, COMPOS_VSOCK_PORT, DEFAULT_VM_CONFIG_PATH};
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
     DeathReason::DeathReason,
-    IVirtualMachine::IVirtualMachine,
     IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
     IVirtualizationService::IVirtualizationService,
     VirtualMachineAppConfig::{DebugLevel::DebugLevel, VirtualMachineAppConfig},
     VirtualMachineConfig::VirtualMachineConfig,
 };
 use android_system_virtualizationservice::binder::{
-    wait_for_interface, BinderFeatures, DeathRecipient, IBinder, Interface, ParcelFileDescriptor,
-    Result as BinderResult, Strong,
+    BinderFeatures, Interface, ParcelFileDescriptor, Result as BinderResult, Strong,
 };
-use anyhow::{anyhow, bail, Context, Result};
-use binder::{
-    unstable_api::{new_spibinder, AIBinder},
-    FromIBinder,
-};
+use anyhow::{bail, Context, Result};
 use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
 use log::{info, warn};
 use rustutils::system_properties;
 use std::fs::{self, File};
 use std::io::{BufRead, BufReader};
 use std::num::NonZeroU32;
-use std::os::raw;
-use std::os::unix::io::IntoRawFd;
 use std::path::{Path, PathBuf};
-use std::sync::{Arc, Condvar, Mutex};
 use std::thread;
+use vmclient::VmInstance;
 
 /// This owns an instance of the CompOS VM.
-pub struct VmInstance {
-    #[allow(dead_code)] // Keeps the VM alive even if we don`t touch it
-    vm: Strong<dyn IVirtualMachine>,
-    cid: i32,
-}
+pub struct ComposClient(VmInstance);
 
 /// Parameters to be used when creating a virtual machine instance.
 #[derive(Default, Debug, Clone)]
@@ -74,14 +62,7 @@
     pub never_log: bool,
 }
 
-impl VmInstance {
-    /// Return a new connection to the Virtualization Service binder interface. This will start the
-    /// service if necessary.
-    pub fn connect_to_virtualization_service() -> Result<Strong<dyn IVirtualizationService>> {
-        wait_for_interface::<dyn IVirtualizationService>("android.system.virtualizationservice")
-            .context("Failed to find VirtualizationService")
-    }
-
+impl ComposClient {
     /// Start a new CompOS VM instance using the specified instance image file and parameters.
     pub fn start(
         service: &dyn IVirtualizationService,
@@ -89,7 +70,7 @@
         idsig: &Path,
         idsig_manifest_apk: &Path,
         parameters: &VmParameters,
-    ) -> Result<VmInstance> {
+    ) -> Result<Self> {
         let protected_vm = want_protected_vm()?;
 
         let instance_fd = ParcelFileDescriptor::new(instance_image);
@@ -97,7 +78,7 @@
         let apex_dir = Path::new(COMPOS_APEX_ROOT);
         let data_dir = Path::new(COMPOS_DATA_ROOT);
 
-        let config_apk = Self::locate_config_apk(apex_dir)?;
+        let config_apk = locate_config_apk(apex_dir)?;
         let apk_fd = File::open(config_apk).context("Failed to open config APK file")?;
         let apk_fd = ParcelFileDescriptor::new(apk_fd);
         let idsig_fd = prepare_idsig(service, &apk_fd, idsig)?;
@@ -121,8 +102,6 @@
                 .context("Failed to create console log file")?;
             let log_fd = File::create(data_dir.join("vm.log"))
                 .context("Failed to create system log file")?;
-            let console_fd = ParcelFileDescriptor::new(console_fd);
-            let log_fd = ParcelFileDescriptor::new(log_fd);
             info!("Running in debug level {:?}", debug_level);
             (Some(console_fd), Some(log_fd))
         };
@@ -142,64 +121,39 @@
             taskProfiles: parameters.task_profiles.clone(),
         });
 
-        let vm = service
-            .createVm(&config, console_fd.as_ref(), log_fd.as_ref())
+        let instance = VmInstance::create(service, &config, console_fd, log_fd)
             .context("Failed to create VM")?;
-        let vm_state = Arc::new(VmStateMonitor::default());
 
-        let vm_state_clone = Arc::clone(&vm_state);
-        let mut death_recipient = DeathRecipient::new(move || {
-            vm_state_clone.set_died();
-            log::error!("VirtualizationService died");
-        });
-        // Note that dropping death_recipient cancels this, so we can't use a temporary here.
-        vm.as_binder().link_to_death(&mut death_recipient)?;
+        let callback =
+            BnVirtualMachineCallback::new_binder(VmCallback(), BinderFeatures::default());
+        instance.vm.registerCallback(&callback)?;
 
-        let vm_state_clone = Arc::clone(&vm_state);
-        let callback = BnVirtualMachineCallback::new_binder(
-            VmCallback(vm_state_clone),
-            BinderFeatures::default(),
-        );
-        vm.registerCallback(&callback)?;
+        instance.start()?;
 
-        vm.start()?;
+        instance.wait_until_ready(timeouts()?.vm_max_time_to_ready)?;
 
-        let cid = vm_state.wait_until_ready()?;
-
-        Ok(VmInstance { vm, cid })
-    }
-
-    fn locate_config_apk(apex_dir: &Path) -> Result<PathBuf> {
-        // Our config APK will be in a directory under app, but the name of the directory is at the
-        // discretion of the build system. So just look in each sub-directory until we find it.
-        // (In practice there will be exactly one directory, so this shouldn't take long.)
-        let app_dir = apex_dir.join("app");
-        for dir in fs::read_dir(app_dir).context("Reading app dir")? {
-            let apk_file = dir?.path().join("CompOSPayloadApp.apk");
-            if apk_file.is_file() {
-                return Ok(apk_file);
-            }
-        }
-
-        bail!("Failed to locate CompOSPayloadApp.apk")
+        Ok(Self(instance))
     }
 
     /// Create and return an RPC Binder connection to the Comp OS service in the VM.
     pub fn get_service(&self) -> Result<Strong<dyn ICompOsService>> {
-        let mut vsock_factory = VsockFactory::new(&*self.vm);
+        self.0.get_service(COMPOS_VSOCK_PORT).context("Connecting to CompOS service")
+    }
+}
 
-        let ibinder = vsock_factory
-            .connect_rpc_client()
-            .ok_or_else(|| anyhow!("Failed to connect to CompOS service"))?;
-
-        FromIBinder::try_from(ibinder).context("Connecting to CompOS service")
+fn locate_config_apk(apex_dir: &Path) -> Result<PathBuf> {
+    // Our config APK will be in a directory under app, but the name of the directory is at the
+    // discretion of the build system. So just look in each sub-directory until we find it.
+    // (In practice there will be exactly one directory, so this shouldn't take long.)
+    let app_dir = apex_dir.join("app");
+    for dir in fs::read_dir(app_dir).context("Reading app dir")? {
+        let apk_file = dir?.path().join("CompOSPayloadApp.apk");
+        if apk_file.is_file() {
+            return Ok(apk_file);
+        }
     }
 
-    /// Return the CID of the VM.
-    pub fn cid(&self) -> i32 {
-        // TODO: Do we actually need/use this?
-        self.cid
-    }
+    bail!("Failed to locate CompOSPayloadApp.apk")
 }
 
 fn prepare_idsig(
@@ -245,117 +199,12 @@
     bail!("No VM support available")
 }
 
-struct VsockFactory<'a> {
-    vm: &'a dyn IVirtualMachine,
-}
-
-impl<'a> VsockFactory<'a> {
-    fn new(vm: &'a dyn IVirtualMachine) -> Self {
-        Self { vm }
-    }
-
-    fn connect_rpc_client(&mut self) -> Option<binder::SpIBinder> {
-        let param = self.as_void_ptr();
-
-        unsafe {
-            // SAFETY: AIBinder returned by RpcPreconnectedClient has correct reference count, and
-            // the ownership can be safely taken by new_spibinder.
-            // RpcPreconnectedClient does not take ownership of param, only passing it to
-            // request_fd.
-            let binder =
-                binder_rpc_unstable_bindgen::RpcPreconnectedClient(Some(Self::request_fd), param)
-                    as *mut AIBinder;
-            new_spibinder(binder)
-        }
-    }
-
-    fn as_void_ptr(&mut self) -> *mut raw::c_void {
-        self as *mut _ as *mut raw::c_void
-    }
-
-    fn try_new_vsock_fd(&self) -> Result<i32> {
-        let vsock = self.vm.connectVsock(COMPOS_VSOCK_PORT as i32)?;
-        // Ownership of the fd is transferred to binder
-        Ok(vsock.into_raw_fd())
-    }
-
-    fn new_vsock_fd(&self) -> i32 {
-        self.try_new_vsock_fd().unwrap_or_else(|e| {
-            warn!("Connecting vsock failed: {}", e);
-            -1_i32
-        })
-    }
-
-    unsafe extern "C" fn request_fd(param: *mut raw::c_void) -> raw::c_int {
-        // SAFETY: This is only ever called by RpcPreconnectedClient, within the lifetime of the
-        // VsockFactory, with param taking the value returned by as_void_ptr (so a properly aligned
-        // non-null pointer to an initialized instance).
-        let vsock_factory = param as *mut Self;
-        vsock_factory.as_ref().unwrap().new_vsock_fd()
-    }
-}
-
-#[derive(Debug, Default)]
-struct VmState {
-    has_died: bool,
-    cid: Option<i32>,
-}
-
-#[derive(Debug)]
-struct VmStateMonitor {
-    mutex: Mutex<VmState>,
-    state_ready: Condvar,
-}
-
-impl Default for VmStateMonitor {
-    fn default() -> Self {
-        Self { mutex: Mutex::new(Default::default()), state_ready: Condvar::new() }
-    }
-}
-
-impl VmStateMonitor {
-    fn set_died(&self) {
-        let mut state = self.mutex.lock().unwrap();
-        state.has_died = true;
-        state.cid = None;
-        drop(state); // Unlock the mutex prior to notifying
-        self.state_ready.notify_all();
-    }
-
-    fn set_ready(&self, cid: i32) {
-        let mut state = self.mutex.lock().unwrap();
-        if state.has_died {
-            return;
-        }
-        state.cid = Some(cid);
-        drop(state); // Unlock the mutex prior to notifying
-        self.state_ready.notify_all();
-    }
-
-    fn wait_until_ready(&self) -> Result<i32> {
-        let (state, result) = self
-            .state_ready
-            .wait_timeout_while(
-                self.mutex.lock().unwrap(),
-                timeouts()?.vm_max_time_to_ready,
-                |state| state.cid.is_none() && !state.has_died,
-            )
-            .unwrap();
-        if result.timed_out() {
-            bail!("Timed out waiting for VM")
-        }
-        state.cid.ok_or_else(|| anyhow!("VM died"))
-    }
-}
-
-#[derive(Debug)]
-struct VmCallback(Arc<VmStateMonitor>);
+struct VmCallback();
 
 impl Interface for VmCallback {}
 
 impl IVirtualMachineCallback for VmCallback {
     fn onDied(&self, cid: i32, reason: DeathReason) -> BinderResult<()> {
-        self.0.set_died();
         log::warn!("VM died, cid = {}, reason = {:?}", cid, reason);
         Ok(())
     }
@@ -375,21 +224,16 @@
     }
 
     fn onPayloadReady(&self, cid: i32) -> BinderResult<()> {
-        self.0.set_ready(cid);
         log::info!("VM payload ready, cid = {}", cid);
         Ok(())
     }
 
     fn onPayloadFinished(&self, cid: i32, exit_code: i32) -> BinderResult<()> {
-        // This should probably never happen in our case, but if it does we means our VM is no
-        // longer running
-        self.0.set_died();
         log::warn!("VM payload finished, cid = {}, exit code = {}", cid, exit_code);
         Ok(())
     }
 
     fn onError(&self, cid: i32, error_code: i32, message: &str) -> BinderResult<()> {
-        self.0.set_died();
         log::warn!("VM error, cid = {}, error code = {}, message = {}", cid, error_code, message,);
         Ok(())
     }
diff --git a/compos/composd/Android.bp b/compos/composd/Android.bp
index 55a3107..3a6119f 100644
--- a/compos/composd/Android.bp
+++ b/compos/composd/Android.bp
@@ -24,6 +24,7 @@
         "liblog_rust",
         "librustutils",
         "libshared_child",
+        "libvmclient",
     ],
     apex_available: [
         "com.android.compos",
diff --git a/compos/composd/src/composd_main.rs b/compos/composd/src/composd_main.rs
index d1b711d..ebcd689 100644
--- a/compos/composd/src/composd_main.rs
+++ b/compos/composd/src/composd_main.rs
@@ -27,7 +27,6 @@
 use crate::instance_manager::InstanceManager;
 use android_system_composd::binder::{register_lazy_service, ProcessState};
 use anyhow::{Context, Result};
-use compos_common::compos_client::VmInstance;
 use log::{error, info};
 use std::panic;
 use std::sync::Arc;
@@ -46,7 +45,8 @@
 
     ProcessState::start_thread_pool();
 
-    let virtualization_service = VmInstance::connect_to_virtualization_service()?;
+    let virtualization_service =
+        vmclient::connect().context("Failed to find VirtualizationService")?;
     let instance_manager = Arc::new(InstanceManager::new(virtualization_service));
     let composd_service = service::new_binder(instance_manager);
     register_lazy_service("android.system.composd", composd_service.as_binder())
diff --git a/compos/composd/src/instance_manager.rs b/compos/composd/src/instance_manager.rs
index 60bf20f..c45f6e7 100644
--- a/compos/composd/src/instance_manager.rs
+++ b/compos/composd/src/instance_manager.rs
@@ -98,7 +98,7 @@
         }
     };
     let cpu_set = system_properties::read(DEX2OAT_CPU_SET_PROP_NAME)?;
-    let task_profiles = vec!["VMCompilationPerformance".to_string()];
+    let task_profiles = vec!["SCHED_SP_COMPUTE".to_string()];
     Ok(VmParameters {
         cpus,
         cpu_set,
diff --git a/compos/composd/src/instance_starter.rs b/compos/composd/src/instance_starter.rs
index f899497..340e8b7 100644
--- a/compos/composd/src/instance_starter.rs
+++ b/compos/composd/src/instance_starter.rs
@@ -24,7 +24,7 @@
 use binder_common::lazy_service::LazyServiceGuard;
 use compos_aidl_interface::aidl::com::android::compos::ICompOsService::ICompOsService;
 use compos_aidl_interface::binder::{ParcelFileDescriptor, Strong};
-use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::compos_client::{ComposClient, VmParameters};
 use compos_common::{COMPOS_DATA_ROOT, IDSIG_FILE, IDSIG_MANIFEST_APK_FILE, INSTANCE_IMAGE_FILE};
 use log::info;
 use std::fs;
@@ -33,7 +33,7 @@
 pub struct CompOsInstance {
     service: Strong<dyn ICompOsService>,
     #[allow(dead_code)] // Keeps VirtualizationService & the VM alive
-    vm_instance: VmInstance,
+    vm_instance: ComposClient,
     #[allow(dead_code)] // Keeps composd process alive
     lazy_service_guard: LazyServiceGuard,
 }
@@ -105,7 +105,7 @@
             .write(true)
             .open(&self.instance_image)
             .context("Failed to open instance image")?;
-        let vm_instance = VmInstance::start(
+        let vm_instance = ComposClient::start(
             virtualization_service,
             instance_image,
             &self.idsig,
diff --git a/compos/tests/Android.bp b/compos/tests/Android.bp
index c178ddd..b77a7e4 100644
--- a/compos/tests/Android.bp
+++ b/compos/tests/Android.bp
@@ -10,6 +10,7 @@
         "compatibility-tradefed",
         "compatibility-host-util",
     ],
+    data_native_bins: ["bcc_validator"],
     static_libs: [
         "VirtualizationTestHelper",
     ],
diff --git a/compos/tests/java/android/compos/test/ComposTestCase.java b/compos/tests/java/android/compos/test/ComposTestCase.java
index eec9e39..51f0a1f 100644
--- a/compos/tests/java/android/compos/test/ComposTestCase.java
+++ b/compos/tests/java/android/compos/test/ComposTestCase.java
@@ -16,17 +16,24 @@
 
 package android.compos.test;
 
+import static android.virt.test.CommandResultSubject.assertThat;
+import static android.virt.test.CommandResultSubject.command_results;
+
 import static com.android.tradefed.testtype.DeviceJUnit4ClassRunner.TestLogData;
 
 import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.Truth.assertWithMessage;
 
 import android.platform.test.annotations.RootPermissionTest;
 import android.virt.test.CommandRunner;
 import android.virt.test.VirtualizationTestCaseBase;
 
 import com.android.tradefed.log.LogUtil.CLog;
+import com.android.tradefed.result.FileInputStreamSource;
+import com.android.tradefed.result.LogDataType;
 import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
 import com.android.tradefed.util.CommandResult;
+import com.android.tradefed.util.RunUtil;
 
 import org.junit.After;
 import org.junit.Before;
@@ -35,6 +42,8 @@
 import org.junit.rules.TestName;
 import org.junit.runner.RunWith;
 
+import java.io.File;
+
 @RootPermissionTest
 @RunWith(DeviceJUnit4ClassRunner.class)
 public final class ComposTestCase extends VirtualizationTestCaseBase {
@@ -127,7 +136,7 @@
             long start = System.currentTimeMillis();
             CommandResult result = runOdrefresh(android, "--force-compile");
             long elapsed = System.currentTimeMillis() - start;
-            assertThat(result.getExitCode()).isEqualTo(COMPILATION_SUCCESS);
+            assertThat(result).exitCode().isEqualTo(COMPILATION_SUCCESS);
             CLog.i("Local compilation took " + elapsed + "ms");
         }
 
@@ -137,12 +146,7 @@
 
         // --check may delete the output.
         CommandResult result = runOdrefresh(android, "--check");
-        assertThat(result.getExitCode()).isEqualTo(OKAY);
-
-        // Make sure we generate a fresh instance.
-        android.tryRun("rm", "-rf", COMPOS_TEST_ROOT);
-        // TODO: remove once composd starts to clean up the directory.
-        android.tryRun("rm", "-rf", ODREFRESH_OUTPUT_DIR);
+        assertThat(result).exitCode().isEqualTo(OKAY);
 
         // Expect the compilation in Compilation OS to finish successfully.
         {
@@ -151,11 +155,14 @@
                     android.runForResultWithTimeout(
                             ODREFRESH_TIMEOUT_MS, COMPOSD_CMD_BIN, "test-compile");
             long elapsed = System.currentTimeMillis() - start;
-            assertThat(result.getExitCode()).isEqualTo(0);
+            assertThat(result).exitCode().isEqualTo(0);
             CLog.i("Comp OS compilation took " + elapsed + "ms");
         }
         killVmAndReconnectAdb();
 
+        // Expect the BCC extracted from the BCC to be well-formed.
+        assertVmBccIsValid();
+
         // Save the actual checksum for the output directory.
         String actualChecksumSnapshot = checksumDirectoryContentPartial(android,
                 ODREFRESH_OUTPUT_DIR);
@@ -171,6 +178,24 @@
         android.run(COMPOS_VERIFY_BIN + " --debug --instance test");
     }
 
+    private void assertVmBccIsValid() throws Exception {
+        File bcc_file = getDevice().pullFile(COMPOS_APEXDATA_DIR + "/test/bcc");
+        assertThat(bcc_file).isNotNull();
+
+        // Add the BCC to test artifacts, in case it is ill-formed or otherwise interesting.
+        mTestLogs.addTestLog(bcc_file.getPath(), LogDataType.UNKNOWN,
+                new FileInputStreamSource(bcc_file));
+
+        // Find the validator binary - note that it's specified as a dependency in our Android.bp.
+        File validator = getTestInformation().getDependencyFile("bcc_validator", /*targetFirst=*/
+                false);
+
+        CommandResult result = new RunUtil().runTimedCmd(10000,
+                validator.getAbsolutePath(), "verify-chain", bcc_file.getAbsolutePath());
+        assertWithMessage("bcc_validator failed").about(command_results())
+                .that(result).isSuccess();
+    }
+
     private CommandResult runOdrefresh(CommandRunner android, String command) throws Exception {
         return android.runForResultWithTimeout(
                 ODREFRESH_TIMEOUT_MS,
diff --git a/compos/verify/Android.bp b/compos/verify/Android.bp
index d6875d1..5c74e4f 100644
--- a/compos/verify/Android.bp
+++ b/compos/verify/Android.bp
@@ -15,6 +15,7 @@
         "libcompos_common",
         "libcompos_verify_native_rust",
         "liblog_rust",
+        "libvmclient",
     ],
     prefer_rlib: true,
     apex_available: [
diff --git a/compos/verify/verify.rs b/compos/verify/verify.rs
index 14ce798..7a22cfd 100644
--- a/compos/verify/verify.rs
+++ b/compos/verify/verify.rs
@@ -20,7 +20,7 @@
 use android_logger::LogId;
 use anyhow::{bail, Context, Result};
 use compos_aidl_interface::binder::ProcessState;
-use compos_common::compos_client::{VmInstance, VmParameters};
+use compos_common::compos_client::{ComposClient, VmParameters};
 use compos_common::odrefresh::{
     CURRENT_ARTIFACTS_SUBDIR, ODREFRESH_OUTPUT_ROOT_DIR, PENDING_ARTIFACTS_SUBDIR,
     TEST_ARTIFACTS_SUBDIR,
@@ -98,8 +98,8 @@
     // We need to start the thread pool to be able to receive Binder callbacks
     ProcessState::start_thread_pool();
 
-    let virtualization_service = VmInstance::connect_to_virtualization_service()?;
-    let vm_instance = VmInstance::start(
+    let virtualization_service = vmclient::connect()?;
+    let vm_instance = ComposClient::start(
         &*virtualization_service,
         instance_image,
         &idsig,
diff --git a/libs/apkverify/Android.bp b/libs/apkverify/Android.bp
index df1cac6..2445dd5 100644
--- a/libs/apkverify/Android.bp
+++ b/libs/apkverify/Android.bp
@@ -13,7 +13,7 @@
         "libbyteorder",
         "libbytes",
         "liblog_rust",
-        "libring",
+        "libopenssl",
         "libx509_parser",
         "libzip",
     ],
@@ -22,6 +22,8 @@
 rust_library {
     name: "libapkverify",
     defaults: ["libapkverify.defaults"],
+    // TODO(b/204562227): move to host_supported to the defaults to include tests
+    host_supported: true,
 }
 
 rust_test {
diff --git a/libs/apkverify/src/lib.rs b/libs/apkverify/src/lib.rs
index 71ea857..290a79a 100644
--- a/libs/apkverify/src/lib.rs
+++ b/libs/apkverify/src/lib.rs
@@ -23,17 +23,5 @@
 mod v3;
 mod ziputil;
 
-use anyhow::Result;
-use std::path::Path;
-
-/// Verifies APK/APEX signing with v2/v3 scheme. On success, the public key (in DER format) is
-/// returned.
-pub fn verify<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
-    // TODO(jooyung) fallback to v2 when v3 not found
-    v3::verify(path)
-}
-
-/// Gets the public key (in DER format) that was used to sign the given APK/APEX file
-pub fn get_public_key_der<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
-    v3::get_public_key_der(path)
-}
+// TODO(jooyung) fallback to v2 when v3 not found
+pub use v3::{get_public_key_der, verify};
diff --git a/libs/apkverify/src/sigutil.rs b/libs/apkverify/src/sigutil.rs
index 23dd91e..009154f 100644
--- a/libs/apkverify/src/sigutil.rs
+++ b/libs/apkverify/src/sigutil.rs
@@ -19,7 +19,7 @@
 use anyhow::{anyhow, bail, Result};
 use byteorder::{LittleEndian, ReadBytesExt};
 use bytes::{Buf, BufMut, Bytes, BytesMut};
-use ring::digest;
+use openssl::hash::{DigestBytes, Hasher, MessageDigest};
 use std::cmp::min;
 use std::io::{Cursor, Read, Seek, SeekFrom, Take};
 
@@ -107,12 +107,12 @@
                 let slice = &mut chunk[..(chunk_size as usize)];
                 data.read_exact(slice)?;
                 digests_of_chunks.put_slice(
-                    digester.digest(slice, CHUNK_HEADER_MID, chunk_size as u32).as_ref(),
+                    digester.digest(slice, CHUNK_HEADER_MID, chunk_size as u32)?.as_ref(),
                 );
                 chunk_count += 1;
             }
         }
-        Ok(digester.digest(&digests_of_chunks, CHUNK_HEADER_TOP, chunk_count).as_ref().into())
+        Ok(digester.digest(&digests_of_chunks, CHUNK_HEADER_TOP, chunk_count)?.as_ref().into())
     }
 
     fn zip_entries(&mut self) -> Result<Take<Box<dyn Read + '_>>> {
@@ -157,7 +157,7 @@
 }
 
 struct Digester {
-    algorithm: &'static digest::Algorithm,
+    algorithm: MessageDigest,
 }
 
 const CHUNK_HEADER_TOP: &[u8] = &[0x5a];
@@ -167,8 +167,8 @@
     fn new(signature_algorithm_id: u32) -> Result<Digester> {
         let digest_algorithm_id = to_content_digest_algorithm(signature_algorithm_id)?;
         let algorithm = match digest_algorithm_id {
-            CONTENT_DIGEST_CHUNKED_SHA256 => &digest::SHA256,
-            CONTENT_DIGEST_CHUNKED_SHA512 => &digest::SHA512,
+            CONTENT_DIGEST_CHUNKED_SHA256 => MessageDigest::sha256(),
+            CONTENT_DIGEST_CHUNKED_SHA512 => MessageDigest::sha512(),
             // TODO(jooyung): implement
             CONTENT_DIGEST_VERITY_CHUNKED_SHA256 => {
                 bail!("TODO(b/190343842): CONTENT_DIGEST_VERITY_CHUNKED_SHA256: not implemented")
@@ -179,12 +179,12 @@
     }
 
     // v2/v3 digests are computed after prepending "header" byte and "size" info.
-    fn digest(&self, data: &[u8], header: &[u8], size: u32) -> digest::Digest {
-        let mut ctx = digest::Context::new(self.algorithm);
-        ctx.update(header);
-        ctx.update(&size.to_le_bytes());
-        ctx.update(data);
-        ctx.finish()
+    fn digest(&self, data: &[u8], header: &[u8], size: u32) -> Result<DigestBytes> {
+        let mut ctx = Hasher::new(self.algorithm)?;
+        ctx.update(header)?;
+        ctx.update(&size.to_le_bytes())?;
+        ctx.update(data)?;
+        Ok(ctx.finish()?)
     }
 }
 
diff --git a/libs/apkverify/src/v3.rs b/libs/apkverify/src/v3.rs
index 797911b..16530be 100644
--- a/libs/apkverify/src/v3.rs
+++ b/libs/apkverify/src/v3.rs
@@ -19,12 +19,12 @@
 // TODO(jooyung) remove this
 #![allow(dead_code)]
 
-use anyhow::{anyhow, bail, Context, Result};
+use anyhow::{anyhow, bail, ensure, Context, Result};
 use bytes::Bytes;
-use ring::signature::{
-    UnparsedPublicKey, VerificationAlgorithm, ECDSA_P256_SHA256_ASN1, RSA_PKCS1_2048_8192_SHA256,
-    RSA_PKCS1_2048_8192_SHA512, RSA_PSS_2048_8192_SHA256, RSA_PSS_2048_8192_SHA512,
-};
+use openssl::hash::MessageDigest;
+use openssl::pkey::{self, PKey};
+use openssl::rsa::Padding;
+use openssl::sign::Verifier;
 use std::fs::File;
 use std::io::{Read, Seek};
 use std::ops::Range;
@@ -87,7 +87,7 @@
 type AdditionalAttributes = Bytes;
 
 /// Verifies APK Signature Scheme v3 signatures of the provided APK and returns the public key
-/// associated with the signer.
+/// associated with the signer in DER format.
 pub fn verify<P: AsRef<Path>>(path: P) -> Result<Box<[u8]>> {
     let f = File::open(path.as_ref())?;
     let mut sections = ApkSections::new(f)?;
@@ -200,14 +200,22 @@
     signature: &Signature,
     key_info: &SubjectPublicKeyInfo,
 ) -> Result<()> {
-    let verification_alg: &dyn VerificationAlgorithm = match signature.signature_algorithm_id {
-        SIGNATURE_RSA_PSS_WITH_SHA256 => &RSA_PSS_2048_8192_SHA256,
-        SIGNATURE_RSA_PSS_WITH_SHA512 => &RSA_PSS_2048_8192_SHA512,
-        SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256 | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256 => {
-            &RSA_PKCS1_2048_8192_SHA256
+    let (pkey_id, padding, digest) = match signature.signature_algorithm_id {
+        SIGNATURE_RSA_PSS_WITH_SHA256 => {
+            (pkey::Id::RSA, Padding::PKCS1_PSS, MessageDigest::sha256())
         }
-        SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512 => &RSA_PKCS1_2048_8192_SHA512,
-        SIGNATURE_ECDSA_WITH_SHA256 | SIGNATURE_VERITY_ECDSA_WITH_SHA256 => &ECDSA_P256_SHA256_ASN1,
+        SIGNATURE_RSA_PSS_WITH_SHA512 => {
+            (pkey::Id::RSA, Padding::PKCS1_PSS, MessageDigest::sha512())
+        }
+        SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA256 | SIGNATURE_VERITY_RSA_PKCS1_V1_5_WITH_SHA256 => {
+            (pkey::Id::RSA, Padding::PKCS1, MessageDigest::sha256())
+        }
+        SIGNATURE_RSA_PKCS1_V1_5_WITH_SHA512 => {
+            (pkey::Id::RSA, Padding::PKCS1, MessageDigest::sha512())
+        }
+        SIGNATURE_ECDSA_WITH_SHA256 | SIGNATURE_VERITY_ECDSA_WITH_SHA256 => {
+            (pkey::Id::EC, Padding::NONE, MessageDigest::sha256())
+        }
         // TODO(b/190343842) not implemented signature algorithm
         SIGNATURE_ECDSA_WITH_SHA512
         | SIGNATURE_DSA_WITH_SHA256
@@ -219,8 +227,15 @@
         }
         _ => bail!("Unsupported signature algorithm: {:#x}", signature.signature_algorithm_id),
     };
-    let key = UnparsedPublicKey::new(verification_alg, &key_info.subject_public_key);
-    key.verify(data.as_ref(), signature.signature.as_ref())?;
+    let key = PKey::public_key_from_der(key_info.raw)?;
+    ensure!(key.id() == pkey_id, "Public key has the wrong ID");
+    let mut verifier = Verifier::new(digest, &key)?;
+    if pkey_id == pkey::Id::RSA {
+        verifier.set_rsa_padding(padding)?;
+    }
+    verifier.update(data)?;
+    let verified = verifier.verify(&signature.signature)?;
+    ensure!(verified, "Signature is invalid ");
     Ok(())
 }
 
diff --git a/libs/apkverify/tests/apkverify_test.rs b/libs/apkverify/tests/apkverify_test.rs
index ade4468..22faba4 100644
--- a/libs/apkverify/tests/apkverify_test.rs
+++ b/libs/apkverify/tests/apkverify_test.rs
@@ -23,6 +23,11 @@
 }
 
 #[test]
+fn test_verify_v3_ecdsa_sha256_p256() {
+    assert!(verify("tests/data/v3-only-with-ecdsa-sha256-p256.apk").is_ok());
+}
+
+#[test]
 fn test_verify_v3_digest_mismatch() {
     let res = verify("tests/data/v3-only-with-rsa-pkcs1-sha512-8192-digest-mismatch.apk");
     assert!(res.is_err());
diff --git a/libs/apkverify/tests/data/v3-only-with-ecdsa-sha256-p256.apk b/libs/apkverify/tests/data/v3-only-with-ecdsa-sha256-p256.apk
new file mode 100644
index 0000000..5ef4fec
--- /dev/null
+++ b/libs/apkverify/tests/data/v3-only-with-ecdsa-sha256-p256.apk
Binary files differ
diff --git a/libs/idsig/Android.bp b/libs/idsig/Android.bp
index 3f70a64..2e9c663 100644
--- a/libs/idsig/Android.bp
+++ b/libs/idsig/Android.bp
@@ -11,8 +11,8 @@
     rustlibs: [
         "libanyhow",
         "libbyteorder",
-        "libring",
         "libnum_traits",
+        "libopenssl",
     ],
     proc_macros: ["libnum_derive"],
 }
diff --git a/libs/idsig/src/apksigv4.rs b/libs/idsig/src/apksigv4.rs
index a5578d8..3004ed1 100644
--- a/libs/idsig/src/apksigv4.rs
+++ b/libs/idsig/src/apksigv4.rs
@@ -175,7 +175,7 @@
 
         // Create hash tree (and root hash)
         let algorithm = match algorithm {
-            HashAlgorithm::SHA256 => &ring::digest::SHA256,
+            HashAlgorithm::SHA256 => openssl::hash::MessageDigest::sha256(),
         };
         let hash_tree = HashTree::from(&mut apk, size, salt, block_size, algorithm)?;
 
diff --git a/libs/idsig/src/hashtree.rs b/libs/idsig/src/hashtree.rs
index 63f83ea..038f839 100644
--- a/libs/idsig/src/hashtree.rs
+++ b/libs/idsig/src/hashtree.rs
@@ -14,9 +14,7 @@
  * limitations under the License.
  */
 
-pub use ring::digest::{Algorithm, Digest};
-
-use ring::digest;
+use openssl::hash::{DigestBytes, Hasher, MessageDigest};
 use std::io::{Cursor, Read, Result, Write};
 
 /// `HashTree` is a merkle tree (and its root hash) that is compatible with fs-verity.
@@ -35,7 +33,7 @@
         input_size: usize,
         salt: &[u8],
         block_size: usize,
-        algorithm: &'static Algorithm,
+        algorithm: MessageDigest,
     ) -> Result<Self> {
         let salt = zero_pad_salt(salt, algorithm);
         let tree = generate_hash_tree(input, input_size, &salt, block_size, algorithm)?;
@@ -45,10 +43,10 @@
         let root_hash = if tree.is_empty() {
             let mut data = Vec::new();
             input.read_to_end(&mut data)?;
-            hash_one_block(&data, &salt, block_size, algorithm).as_ref().to_vec()
+            hash_one_block(&data, &salt, block_size, algorithm)?.as_ref().to_vec()
         } else {
             let first_block = &tree[0..block_size];
-            hash_one_block(first_block, &salt, block_size, algorithm).as_ref().to_vec()
+            hash_one_block(first_block, &salt, block_size, algorithm)?.as_ref().to_vec()
         };
         Ok(HashTree { tree, root_hash })
     }
@@ -69,9 +67,9 @@
     input_size: usize,
     salt: &[u8],
     block_size: usize,
-    algorithm: &'static Algorithm,
+    algorithm: MessageDigest,
 ) -> Result<Vec<u8>> {
-    let digest_size = algorithm.output_len;
+    let digest_size = algorithm.size();
     let levels = calc_hash_levels(input_size, block_size, digest_size);
     let tree_size = levels.iter().map(|r| r.len()).sum();
 
@@ -89,7 +87,7 @@
             let mut num_blocks = (input_size + block_size - 1) / block_size;
             while num_blocks > 0 {
                 input.read_exact(&mut a_block)?;
-                let h = hash_one_block(&a_block, salt, block_size, algorithm);
+                let h = hash_one_block(&a_block, salt, block_size, algorithm)?;
                 level0.write_all(h.as_ref()).unwrap();
                 num_blocks -= 1;
             }
@@ -102,10 +100,10 @@
             let cur_and_prev = &mut hash_tree[cur.start..prev.end];
             let (cur, prev) = cur_and_prev.split_at_mut(prev.start - cur.start);
             let mut cur = Cursor::new(cur);
-            prev.chunks(block_size).for_each(|data| {
-                let h = hash_one_block(data, salt, block_size, algorithm);
+            for data in prev.chunks(block_size) {
+                let h = hash_one_block(data, salt, block_size, algorithm)?;
                 cur.write_all(h.as_ref()).unwrap();
-            });
+            }
         }
     }
     Ok(hash_tree)
@@ -117,14 +115,14 @@
     input: &[u8],
     salt: &[u8],
     block_size: usize,
-    algorithm: &'static Algorithm,
-) -> Digest {
-    let mut ctx = digest::Context::new(algorithm);
-    ctx.update(salt);
-    ctx.update(input);
+    algorithm: MessageDigest,
+) -> Result<DigestBytes> {
+    let mut ctx = Hasher::new(algorithm)?;
+    ctx.update(salt)?;
+    ctx.update(input)?;
     let pad_size = block_size - input.len();
-    ctx.update(&vec![0; pad_size]);
-    ctx.finish()
+    ctx.update(&vec![0; pad_size])?;
+    Ok(ctx.finish()?)
 }
 
 type Range = std::ops::Range<usize>;
@@ -180,11 +178,11 @@
 /// If a salt was specified, then it’s zero-padded to the closest multiple of the input size of the
 /// hash algorithm’s compression function, e.g. 64 bytes for SHA-256 or 128 bytes for SHA-512. The
 /// padded salt is prepended to every data or Merkle tree block that is hashed.
-fn zero_pad_salt(salt: &[u8], algorithm: &Algorithm) -> Vec<u8> {
+fn zero_pad_salt(salt: &[u8], algorithm: MessageDigest) -> Vec<u8> {
     if salt.is_empty() {
         salt.to_vec()
     } else {
-        let padded_len = round_to_multiple(salt.len(), algorithm.block_len);
+        let padded_len = round_to_multiple(salt.len(), algorithm.block_size());
         let mut salt = salt.to_vec();
         salt.resize(padded_len, 0);
         salt
@@ -194,7 +192,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use ring::digest;
+    use openssl::hash::MessageDigest;
     use std::fs::{self, File};
 
     #[test]
@@ -210,7 +208,7 @@
 
             let size = std::fs::metadata(&input_name)?.len() as usize;
             let salt = vec![1, 2, 3, 4, 5, 6];
-            let ht = HashTree::from(&mut input, size, &salt, 4096, &digest::SHA256)?;
+            let ht = HashTree::from(&mut input, size, &salt, 4096, MessageDigest::sha256())?;
 
             assert_eq!(golden_hash_tree.as_slice(), ht.tree.as_slice());
             assert_eq!(golden_root_hash, ht.root_hash.as_slice());
diff --git a/vm/Android.bp b/vm/Android.bp
index d1d53d0..2b83ca7 100644
--- a/vm/Android.bp
+++ b/vm/Android.bp
@@ -20,6 +20,7 @@
         "libserde",
         "libstructopt",
         "libvmconfig",
+        "libvmclient",
         "libzip",
     ],
     apex_available: [
diff --git a/vm/src/main.rs b/vm/src/main.rs
index 705e38f..8450b41 100644
--- a/vm/src/main.rs
+++ b/vm/src/main.rs
@@ -17,13 +17,12 @@
 mod create_idsig;
 mod create_partition;
 mod run;
-mod sync;
 
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
     IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
     VirtualMachineAppConfig::DebugLevel::DebugLevel,
 };
-use android_system_virtualizationservice::binder::{wait_for_interface, ProcessState, Strong};
+use android_system_virtualizationservice::binder::ProcessState;
 use anyhow::{Context, Error};
 use create_idsig::command_create_idsig;
 use create_partition::command_create_partition;
@@ -33,9 +32,6 @@
 use structopt::clap::AppSettings;
 use structopt::StructOpt;
 
-const VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER: &str =
-    "android.system.virtualizationservice";
-
 #[derive(Debug)]
 struct Idsigs(Vec<PathBuf>);
 
@@ -191,9 +187,7 @@
     // We need to start the thread pool for Binder to work properly, especially link_to_death.
     ProcessState::start_thread_pool();
 
-    let service: Strong<dyn IVirtualizationService> =
-        wait_for_interface(VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER)
-            .context("Failed to find VirtualizationService")?;
+    let service = vmclient::connect().context("Failed to find VirtualizationService")?;
 
     match opt {
         Opt::RunApp {
diff --git a/vm/src/run.rs b/vm/src/run.rs
index 2ae2c95..ca71665 100644
--- a/vm/src/run.rs
+++ b/vm/src/run.rs
@@ -15,19 +15,18 @@
 //! Command to run a VM.
 
 use crate::create_partition::command_create_partition;
-use crate::sync::AtomicFlag;
 use android_system_virtualizationservice::aidl::android::system::virtualizationservice::{
-    DeathReason::DeathReason, IVirtualMachine::IVirtualMachine,
-    IVirtualMachineCallback::BnVirtualMachineCallback,
-    IVirtualMachineCallback::IVirtualMachineCallback,
-    IVirtualizationService::IVirtualizationService, PartitionType::PartitionType,
+    DeathReason::DeathReason,
+    IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
+    IVirtualizationService::IVirtualizationService,
+    PartitionType::PartitionType,
     VirtualMachineAppConfig::DebugLevel::DebugLevel,
-    VirtualMachineAppConfig::VirtualMachineAppConfig, VirtualMachineConfig::VirtualMachineConfig,
+    VirtualMachineAppConfig::VirtualMachineAppConfig,
+    VirtualMachineConfig::VirtualMachineConfig,
     VirtualMachineState::VirtualMachineState,
 };
 use android_system_virtualizationservice::binder::{
-    BinderFeatures, DeathRecipient, IBinder, Interface, ParcelFileDescriptor,
-    Result as BinderResult,
+    BinderFeatures, Interface, ParcelFileDescriptor, Result as BinderResult,
 };
 use anyhow::{bail, Context, Error};
 use microdroid_payload_config::VmPayloadConfig;
@@ -35,6 +34,7 @@
 use std::io::{self, BufRead, BufReader};
 use std::os::unix::io::{AsRawFd, FromRawFd};
 use std::path::{Path, PathBuf};
+use vmclient::VmInstance;
 use vmconfig::{open_parcel_file, VmConfig};
 use zip::ZipArchive;
 
@@ -173,78 +173,53 @@
     log_path: Option<&Path>,
 ) -> Result<(), Error> {
     let console = if let Some(console_path) = console_path {
-        Some(ParcelFileDescriptor::new(
+        Some(
             File::create(console_path)
                 .with_context(|| format!("Failed to open console file {:?}", console_path))?,
-        ))
+        )
     } else if daemonize {
         None
     } else {
-        Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+        Some(duplicate_stdout()?)
     };
     let log = if let Some(log_path) = log_path {
-        Some(ParcelFileDescriptor::new(
+        Some(
             File::create(log_path)
                 .with_context(|| format!("Failed to open log file {:?}", log_path))?,
-        ))
+        )
     } else if daemonize {
         None
     } else {
-        Some(ParcelFileDescriptor::new(duplicate_stdout()?))
+        Some(duplicate_stdout()?)
     };
 
-    let vm =
-        service.createVm(config, console.as_ref(), log.as_ref()).context("Failed to create VM")?;
+    let vm = VmInstance::create(service, config, console, log).context("Failed to create VM")?;
+    let callback =
+        BnVirtualMachineCallback::new_binder(VirtualMachineCallback {}, BinderFeatures::default());
+    vm.vm.registerCallback(&callback)?;
+    vm.start().context("Failed to start VM")?;
 
-    let cid = vm.getCid().context("Failed to get CID")?;
     println!(
         "Created VM from {} with CID {}, state is {}.",
         config_path,
-        cid,
-        state_to_str(vm.getState()?)
+        vm.cid(),
+        state_to_str(vm.state()?)
     );
-    vm.start()?;
-    println!("Started VM, state now {}.", state_to_str(vm.getState()?));
 
     if daemonize {
         // Pass the VM reference back to VirtualizationService and have it hold it in the
         // background.
-        service.debugHoldVmRef(&vm).context("Failed to pass VM to VirtualizationService")
+        service.debugHoldVmRef(&vm.vm).context("Failed to pass VM to VirtualizationService")?;
     } else {
         // Wait until the VM or VirtualizationService dies. If we just returned immediately then the
         // IVirtualMachine Binder object would be dropped and the VM would be killed.
-        wait_for_vm(vm.as_ref())
+        let death_reason = vm.wait_for_death();
+        println!("{}", death_reason);
     }
-}
 
-/// Wait until the given VM or the VirtualizationService itself dies.
-fn wait_for_vm(vm: &dyn IVirtualMachine) -> Result<(), Error> {
-    let dead = AtomicFlag::default();
-    let callback = BnVirtualMachineCallback::new_binder(
-        VirtualMachineCallback { dead: dead.clone() },
-        BinderFeatures::default(),
-    );
-    vm.registerCallback(&callback)?;
-    let death_recipient = wait_for_death(&mut vm.as_binder(), dead.clone())?;
-    dead.wait();
-    // Ensure that death_recipient isn't dropped before we wait on the flag, as it is removed
-    // from the Binder when it's dropped.
-    drop(death_recipient);
     Ok(())
 }
 
-/// Raise the given flag when the given Binder object dies.
-///
-/// If the returned DeathRecipient is dropped then this will no longer do anything.
-fn wait_for_death(binder: &mut impl IBinder, dead: AtomicFlag) -> Result<DeathRecipient, Error> {
-    let mut death_recipient = DeathRecipient::new(move || {
-        eprintln!("VirtualizationService unexpectedly died");
-        dead.raise();
-    });
-    binder.link_to_death(&mut death_recipient)?;
-    Ok(death_recipient)
-}
-
 fn parse_extra_apk_list(apk: &Path, config_path: &str) -> Result<Vec<String>, Error> {
     let mut archive = ZipArchive::new(File::open(apk)?)?;
     let config_file = archive.by_name(config_path)?;
@@ -253,9 +228,7 @@
 }
 
 #[derive(Debug)]
-struct VirtualMachineCallback {
-    dead: AtomicFlag,
-}
+struct VirtualMachineCallback {}
 
 impl Interface for VirtualMachineCallback {}
 
@@ -295,31 +268,7 @@
         Ok(())
     }
 
-    fn onDied(&self, _cid: i32, reason: DeathReason) -> BinderResult<()> {
-        self.dead.raise();
-
-        match reason {
-            DeathReason::INFRASTRUCTURE_ERROR => println!("Error waiting for VM to finish."),
-            DeathReason::KILLED => println!("VM was killed."),
-            DeathReason::UNKNOWN => println!("VM died for an unknown reason."),
-            DeathReason::SHUTDOWN => println!("VM shutdown cleanly."),
-            DeathReason::ERROR => println!("Error starting VM."),
-            DeathReason::REBOOT => println!("VM tried to reboot, possibly due to a kernel panic."),
-            DeathReason::CRASH => println!("VM crashed."),
-            DeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH => println!(
-                "pVM firmware failed to verify the VM because the public key doesn't match."
-            ),
-            DeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED => {
-                println!("pVM firmware failed to verify the VM because the instance image changed.")
-            }
-            DeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH => {
-                println!("Bootloader failed to verify the VM because the public key doesn't match.")
-            }
-            DeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED => {
-                println!("Bootloader failed to verify the VM because the instance image changed.")
-            }
-            _ => println!("VM died for an unrecognised reason."),
-        }
+    fn onDied(&self, _cid: i32, _reason: DeathReason) -> BinderResult<()> {
         Ok(())
     }
 }
diff --git a/vm/src/sync.rs b/vm/src/sync.rs
deleted file mode 100644
index 82839b3..0000000
--- a/vm/src/sync.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021, The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Synchronisation utilities.
-
-use std::sync::{Arc, Condvar, Mutex};
-
-/// A flag which one thread can use to notify other threads when a condition becomes true. This is
-/// something like a single-use binary semaphore.
-#[derive(Clone, Debug)]
-pub struct AtomicFlag {
-    state: Arc<(Mutex<bool>, Condvar)>,
-}
-
-impl Default for AtomicFlag {
-    #[allow(clippy::mutex_atomic)]
-    fn default() -> Self {
-        Self { state: Arc::new((Mutex::new(false), Condvar::new())) }
-    }
-}
-
-#[allow(clippy::mutex_atomic)]
-impl AtomicFlag {
-    /// Wait until the flag is set.
-    pub fn wait(&self) {
-        let _flag = self.state.1.wait_while(self.state.0.lock().unwrap(), |flag| !*flag).unwrap();
-    }
-
-    /// Set the flag, and notify all waiting threads.
-    pub fn raise(&self) {
-        let mut flag = self.state.0.lock().unwrap();
-        *flag = true;
-        self.state.1.notify_all();
-    }
-}
diff --git a/vmclient/Android.bp b/vmclient/Android.bp
new file mode 100644
index 0000000..8ad5adf
--- /dev/null
+++ b/vmclient/Android.bp
@@ -0,0 +1,24 @@
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_library {
+    name: "libvmclient",
+    crate_name: "vmclient",
+    srcs: ["src/lib.rs"],
+    edition: "2021",
+    rustlibs: [
+        "android.system.virtualizationservice-rust",
+        "libbinder_rpc_unstable_bindgen",
+        "libbinder_rs",
+        "liblog_rust",
+        "libthiserror",
+    ],
+    shared_libs: [
+        "libbinder_rpc_unstable",
+    ],
+    apex_available: [
+        "com.android.compos",
+        "com.android.virt",
+    ],
+}
diff --git a/vmclient/src/death_reason.rs b/vmclient/src/death_reason.rs
new file mode 100644
index 0000000..657eaa2
--- /dev/null
+++ b/vmclient/src/death_reason.rs
@@ -0,0 +1,103 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::fmt::{self, Debug, Display, Formatter};
+use android_system_virtualizationservice::{
+        aidl::android::system::virtualizationservice::{
+            DeathReason::DeathReason as AidlDeathReason}};
+
+/// The reason why a VM died.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum DeathReason {
+    /// VirtualizationService died.
+    VirtualizationServiceDied,
+    /// There was an error waiting for the VM.
+    InfrastructureError,
+    /// The VM was killed.
+    Killed,
+    /// The VM died for an unknown reason.
+    Unknown,
+    /// The VM requested to shut down.
+    Shutdown,
+    /// crosvm had an error starting the VM.
+    Error,
+    /// The VM requested to reboot, possibly as the result of a kernel panic.
+    Reboot,
+    /// The VM or crosvm crashed.
+    Crash,
+    /// The pVM firmware failed to verify the VM because the public key doesn't match.
+    PvmFirmwarePublicKeyMismatch,
+    /// The pVM firmware failed to verify the VM because the instance image changed.
+    PvmFirmwareInstanceImageChanged,
+    /// The bootloader failed to verify the VM because the public key doesn't match.
+    BootloaderPublicKeyMismatch,
+    /// The bootloader failed to verify the VM because the instance image changed.
+    BootloaderInstanceImageChanged,
+    /// VirtualizationService sent a death reason which was not recognised by the client library.
+    Unrecognised(AidlDeathReason),
+}
+
+impl From<AidlDeathReason> for DeathReason {
+    fn from(reason: AidlDeathReason) -> Self {
+        match reason {
+            AidlDeathReason::INFRASTRUCTURE_ERROR => Self::InfrastructureError,
+            AidlDeathReason::KILLED => Self::Killed,
+            AidlDeathReason::UNKNOWN => Self::Unknown,
+            AidlDeathReason::SHUTDOWN => Self::Shutdown,
+            AidlDeathReason::ERROR => Self::Error,
+            AidlDeathReason::REBOOT => Self::Reboot,
+            AidlDeathReason::CRASH => Self::Crash,
+            AidlDeathReason::PVM_FIRMWARE_PUBLIC_KEY_MISMATCH => Self::PvmFirmwarePublicKeyMismatch,
+            AidlDeathReason::PVM_FIRMWARE_INSTANCE_IMAGE_CHANGED => {
+                Self::PvmFirmwareInstanceImageChanged
+            }
+            AidlDeathReason::BOOTLOADER_PUBLIC_KEY_MISMATCH => Self::BootloaderPublicKeyMismatch,
+            AidlDeathReason::BOOTLOADER_INSTANCE_IMAGE_CHANGED => {
+                Self::BootloaderInstanceImageChanged
+            }
+            _ => Self::Unrecognised(reason),
+        }
+    }
+}
+
+impl Display for DeathReason {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        let s = match self {
+            Self::VirtualizationServiceDied => "VirtualizationService died.",
+            Self::InfrastructureError => "Error waiting for VM to finish.",
+            Self::Killed => "VM was killed.",
+            Self::Unknown => "VM died for an unknown reason.",
+            Self::Shutdown => "VM shutdown cleanly.",
+            Self::Error => "Error starting VM.",
+            Self::Reboot => "VM tried to reboot, possibly due to a kernel panic.",
+            Self::Crash => "VM crashed.",
+            Self::PvmFirmwarePublicKeyMismatch => {
+                "pVM firmware failed to verify the VM because the public key doesn't match."
+            }
+            Self::PvmFirmwareInstanceImageChanged => {
+                "pVM firmware failed to verify the VM because the instance image changed."
+            }
+            Self::BootloaderPublicKeyMismatch => {
+                "Bootloader failed to verify the VM because the public key doesn't match."
+            }
+            Self::BootloaderInstanceImageChanged => {
+                "Bootloader failed to verify the VM because the instance image changed."
+            }
+            Self::Unrecognised(reason) => {
+                return write!(f, "Unrecognised death reason {:?}.", reason);
+            }
+        };
+        f.write_str(s)
+    }
+}
diff --git a/vmclient/src/errors.rs b/vmclient/src/errors.rs
new file mode 100644
index 0000000..532706d
--- /dev/null
+++ b/vmclient/src/errors.rs
@@ -0,0 +1,45 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::DeathReason;
+use android_system_virtualizationservice::binder::StatusCode;
+use thiserror::Error;
+
+/// An error while waiting for a VM to do something.
+#[derive(Clone, Debug, Error)]
+pub enum VmWaitError {
+    /// Timed out waiting for the VM.
+    #[error("Timed out waiting for VM.")]
+    TimedOut,
+    /// The VM died before it was ready.
+    #[error("VM died. ({reason})")]
+    Died {
+        /// The reason why the VM died.
+        reason: DeathReason,
+    },
+    /// The VM payload finished before becoming ready.
+    #[error("VM payload finished.")]
+    Finished,
+}
+
+/// An error connection to a VM RPC Binder service.
+#[derive(Clone, Debug, Error)]
+pub enum GetServiceError {
+    /// The RPC binder connection failed.
+    #[error("Vsock connection to RPC binder failed.")]
+    ConnectionFailed,
+    /// The AIDL service type didn't match.
+    #[error("Service type didn't match ({0}).")]
+    WrongServiceType(StatusCode),
+}
diff --git a/vmclient/src/lib.rs b/vmclient/src/lib.rs
new file mode 100644
index 0000000..d182b60
--- /dev/null
+++ b/vmclient/src/lib.rs
@@ -0,0 +1,227 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Client library for VirtualizationService.
+
+mod death_reason;
+mod errors;
+mod rpc_binder;
+mod sync;
+
+pub use crate::death_reason::DeathReason;
+pub use crate::errors::{GetServiceError, VmWaitError};
+use crate::{rpc_binder::VsockFactory, sync::Monitor};
+use android_system_virtualizationservice::{
+    aidl::android::system::virtualizationservice::{
+        DeathReason::DeathReason as AidlDeathReason,
+        IVirtualMachine::IVirtualMachine,
+        IVirtualMachineCallback::{BnVirtualMachineCallback, IVirtualMachineCallback},
+        IVirtualizationService::IVirtualizationService,
+        VirtualMachineConfig::VirtualMachineConfig,
+        VirtualMachineState::VirtualMachineState,
+    },
+    binder::{
+        wait_for_interface, BinderFeatures, DeathRecipient, FromIBinder, IBinder, Interface,
+        ParcelFileDescriptor, Result as BinderResult, StatusCode, Strong,
+    },
+};
+use log::warn;
+use std::{
+    fmt::{self, Debug, Formatter},
+    fs::File,
+    sync::Arc,
+    time::Duration,
+};
+
+const VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER: &str =
+    "android.system.virtualizationservice";
+
+/// Connects to the VirtualizationService AIDL service.
+pub fn connect() -> Result<Strong<dyn IVirtualizationService>, StatusCode> {
+    wait_for_interface(VIRTUALIZATION_SERVICE_BINDER_SERVICE_IDENTIFIER)
+}
+
+/// A virtual machine which has been started by the VirtualizationService.
+pub struct VmInstance {
+    /// The `IVirtualMachine` Binder object representing the VM.
+    pub vm: Strong<dyn IVirtualMachine>,
+    cid: i32,
+    state: Arc<Monitor<VmState>>,
+    // Ensure that the DeathRecipient isn't dropped while someone might call wait_for_death, as it
+    // is removed from the Binder when it's dropped.
+    _death_recipient: DeathRecipient,
+}
+
+impl VmInstance {
+    /// Creates (but doesn't start) a new VM with the given configuration.
+    pub fn create(
+        service: &dyn IVirtualizationService,
+        config: &VirtualMachineConfig,
+        console: Option<File>,
+        log: Option<File>,
+    ) -> BinderResult<Self> {
+        let console = console.map(ParcelFileDescriptor::new);
+        let log = log.map(ParcelFileDescriptor::new);
+
+        let vm = service.createVm(config, console.as_ref(), log.as_ref())?;
+
+        let cid = vm.getCid()?;
+
+        // Register callback before starting VM, in case it dies immediately.
+        let state = Arc::new(Monitor::new(VmState::default()));
+        let callback = BnVirtualMachineCallback::new_binder(
+            VirtualMachineCallback { state: state.clone() },
+            BinderFeatures::default(),
+        );
+        vm.registerCallback(&callback)?;
+        let death_recipient = wait_for_binder_death(&mut vm.as_binder(), state.clone())?;
+
+        Ok(Self { vm, cid, state, _death_recipient: death_recipient })
+    }
+
+    /// Starts the VM.
+    pub fn start(&self) -> BinderResult<()> {
+        self.vm.start()
+    }
+
+    /// Returns the CID used for vsock connections to the VM.
+    pub fn cid(&self) -> i32 {
+        self.cid
+    }
+
+    /// Returns the current lifecycle state of the VM.
+    pub fn state(&self) -> BinderResult<VirtualMachineState> {
+        self.vm.getState()
+    }
+
+    /// Blocks until the VM or the VirtualizationService itself dies, and then returns the reason
+    /// why it died.
+    pub fn wait_for_death(&self) -> DeathReason {
+        self.state.wait_while(|state| state.death_reason.is_none()).unwrap().death_reason.unwrap()
+    }
+
+    /// Waits until the VM reports that it is ready.
+    ///
+    /// Returns an error if the VM dies first, or the `timeout` elapses before the VM is ready.
+    pub fn wait_until_ready(&self, timeout: Duration) -> Result<(), VmWaitError> {
+        let (state, timeout_result) = self
+            .state
+            .wait_timeout_while(timeout, |state| {
+                state.reported_state < VirtualMachineState::READY && state.death_reason.is_none()
+            })
+            .unwrap();
+        if timeout_result.timed_out() {
+            Err(VmWaitError::TimedOut)
+        } else if let Some(reason) = state.death_reason {
+            Err(VmWaitError::Died { reason })
+        } else if state.reported_state != VirtualMachineState::READY {
+            Err(VmWaitError::Finished)
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Tries to connect to an RPC Binder service provided by the VM on the given vsock port.
+    pub fn get_service<T: FromIBinder + ?Sized>(
+        &self,
+        port: u32,
+    ) -> Result<Strong<T>, GetServiceError> {
+        let mut vsock_factory = VsockFactory::new(&*self.vm, port);
+
+        let ibinder = vsock_factory.connect_rpc_client()?;
+
+        FromIBinder::try_from(ibinder).map_err(GetServiceError::WrongServiceType)
+    }
+}
+
+impl Debug for VmInstance {
+    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
+        f.debug_struct("VmInstance").field("cid", &self.cid).field("state", &self.state).finish()
+    }
+}
+
+/// Notify the VmState when the given Binder object dies.
+///
+/// If the returned DeathRecipient is dropped then this will no longer do anything.
+fn wait_for_binder_death(
+    binder: &mut impl IBinder,
+    state: Arc<Monitor<VmState>>,
+) -> BinderResult<DeathRecipient> {
+    let mut death_recipient = DeathRecipient::new(move || {
+        warn!("VirtualizationService unexpectedly died");
+        state.notify_death(DeathReason::VirtualizationServiceDied);
+    });
+    binder.link_to_death(&mut death_recipient)?;
+    Ok(death_recipient)
+}
+
+#[derive(Debug, Default)]
+struct VmState {
+    death_reason: Option<DeathReason>,
+    reported_state: VirtualMachineState,
+}
+
+impl Monitor<VmState> {
+    fn notify_death(&self, reason: DeathReason) {
+        let state = &mut *self.state.lock().unwrap();
+        // In case this method is called more than once, ignore subsequent calls.
+        if state.death_reason.is_none() {
+            state.death_reason.replace(reason);
+            self.cv.notify_all();
+        }
+    }
+
+    fn notify_state(&self, state: VirtualMachineState) {
+        self.state.lock().unwrap().reported_state = state;
+        self.cv.notify_all();
+    }
+}
+
+#[derive(Debug)]
+struct VirtualMachineCallback {
+    state: Arc<Monitor<VmState>>,
+}
+
+impl Interface for VirtualMachineCallback {}
+
+impl IVirtualMachineCallback for VirtualMachineCallback {
+    fn onPayloadStarted(
+        &self,
+        _cid: i32,
+        _stream: Option<&ParcelFileDescriptor>,
+    ) -> BinderResult<()> {
+        self.state.notify_state(VirtualMachineState::STARTED);
+        Ok(())
+    }
+
+    fn onPayloadReady(&self, _cid: i32) -> BinderResult<()> {
+        self.state.notify_state(VirtualMachineState::READY);
+        Ok(())
+    }
+
+    fn onPayloadFinished(&self, _cid: i32, _exit_code: i32) -> BinderResult<()> {
+        self.state.notify_state(VirtualMachineState::FINISHED);
+        Ok(())
+    }
+
+    fn onError(&self, _cid: i32, _error_code: i32, _message: &str) -> BinderResult<()> {
+        self.state.notify_state(VirtualMachineState::FINISHED);
+        Ok(())
+    }
+
+    fn onDied(&self, _cid: i32, reason: AidlDeathReason) -> BinderResult<()> {
+        self.state.notify_death(reason.into());
+        Ok(())
+    }
+}
diff --git a/vmclient/src/rpc_binder.rs b/vmclient/src/rpc_binder.rs
new file mode 100644
index 0000000..fee643f
--- /dev/null
+++ b/vmclient/src/rpc_binder.rs
@@ -0,0 +1,72 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::errors::GetServiceError;
+use android_system_virtualizationservice::{
+    aidl::android::system::virtualizationservice::IVirtualMachine::IVirtualMachine,
+};
+use binder::unstable_api::{new_spibinder, AIBinder};
+use log::warn;
+use std::os::{raw, unix::io::IntoRawFd};
+
+pub struct VsockFactory<'a> {
+    vm: &'a dyn IVirtualMachine,
+    port: u32,
+}
+
+impl<'a> VsockFactory<'a> {
+    pub fn new(vm: &'a dyn IVirtualMachine, port: u32) -> Self {
+        Self { vm, port }
+    }
+
+    pub fn connect_rpc_client(&mut self) -> Result<binder::SpIBinder, GetServiceError> {
+        let param = self.as_void_ptr();
+
+        unsafe {
+            // SAFETY: AIBinder returned by RpcPreconnectedClient has correct reference count, and
+            // the ownership can be safely taken by new_spibinder.
+            // RpcPreconnectedClient does not take ownership of param, only passing it to
+            // request_fd.
+            let binder =
+                binder_rpc_unstable_bindgen::RpcPreconnectedClient(Some(Self::request_fd), param)
+                    as *mut AIBinder;
+            new_spibinder(binder).ok_or(GetServiceError::ConnectionFailed)
+        }
+    }
+
+    fn as_void_ptr(&mut self) -> *mut raw::c_void {
+        self as *mut _ as *mut raw::c_void
+    }
+
+    fn new_vsock_fd(&self) -> i32 {
+        match self.vm.connectVsock(self.port as i32) {
+            Ok(vsock) => {
+                // Ownership of the fd is transferred to binder
+                vsock.into_raw_fd()
+            }
+            Err(e) => {
+                warn!("Vsock connection failed: {}", e);
+                -1
+            }
+        }
+    }
+
+    unsafe extern "C" fn request_fd(param: *mut raw::c_void) -> raw::c_int {
+        // SAFETY: This is only ever called by RpcPreconnectedClient, within the lifetime of the
+        // VsockFactory, with param taking the value returned by as_void_ptr (so a properly aligned
+        // non-null pointer to an initialized instance).
+        let vsock_factory = param as *mut Self;
+        vsock_factory.as_ref().unwrap().new_vsock_fd()
+    }
+}
diff --git a/vmclient/src/sync.rs b/vmclient/src/sync.rs
new file mode 100644
index 0000000..a265f60
--- /dev/null
+++ b/vmclient/src/sync.rs
@@ -0,0 +1,59 @@
+// Copyright 2022, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::{
+    sync::{Condvar, LockResult, Mutex, MutexGuard, PoisonError, WaitTimeoutResult},
+    time::Duration,
+};
+
+/// A mutex with an associated condition variable.
+#[derive(Debug)]
+pub struct Monitor<T> {
+    pub state: Mutex<T>,
+    pub cv: Condvar,
+}
+
+impl<T> Monitor<T> {
+    /// Creates a new mutex wrapping the given value, and a new condition variable to go with it.
+    pub fn new(state: T) -> Self {
+        Self { state: Mutex::new(state), cv: Condvar::default() }
+    }
+
+    /// Waits on the condition variable while the given condition holds true on the contents of the
+    /// mutex.
+    ///
+    /// Blocks until the condition variable is notified and the function returns false.
+    pub fn wait_while(&self, condition: impl FnMut(&mut T) -> bool) -> LockResult<MutexGuard<T>> {
+        self.cv.wait_while(self.state.lock()?, condition)
+    }
+
+    /// Waits on the condition variable while the given condition holds true on the contents of the
+    /// mutex, with a timeout.
+    ///
+    /// Blocks until the condition variable is notified and the function returns false, or the
+    /// timeout elapses.
+    pub fn wait_timeout_while(
+        &self,
+        timeout: Duration,
+        condition: impl FnMut(&mut T) -> bool,
+    ) -> Result<(MutexGuard<T>, WaitTimeoutResult), PoisonError<MutexGuard<T>>> {
+        self.cv
+            .wait_timeout_while(self.state.lock()?, timeout, condition)
+            .map_err(convert_poison_error)
+    }
+}
+
+fn convert_poison_error<T>(err: PoisonError<(T, WaitTimeoutResult)>) -> PoisonError<T> {
+    PoisonError::new(err.into_inner().0)
+}