virtio,pc,pci: fixes,cleanups,features

more CXL patches
 RSA support for crypto
 fixes, cleanups all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmKrYLMPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpwpwH/2IS+V7wS3q/XXPz1HndJLpUP/z+mkeu9W6+
 X1U9CJ+66Ag4eD5T/jzoN0JEjiTeET/3xM+PY5NYZCh6QTAmA7EfFZv99oNWpGd1
 +nyxOdaMDPSscOKjLfDziVTi/QYIZBtU6TeixL9whkipYCqmgbs5gXV8ynltmKyF
 bIJVeaXm5yQLcCTGzKzdXf+HmTErpEGDCDHFjzrLVjICRDdekElGVwYTn+ycl7p7
 oLsWWVDgqo0p86BITlrHUXUrxTXF3wyg2B59cT7Ilbb3o+Fa2GsP+o9IXMuVoNNp
 A+zrq1QZ49UO3XwkS03xDDioUQ1T/V0L4w9dEfaGvpY4Horv0HI=
 =PvmT
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: fixes,cleanups,features

more CXL patches
RSA support for crypto
fixes, cleanups all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmKrYLMPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpwpwH/2IS+V7wS3q/XXPz1HndJLpUP/z+mkeu9W6+
# X1U9CJ+66Ag4eD5T/jzoN0JEjiTeET/3xM+PY5NYZCh6QTAmA7EfFZv99oNWpGd1
# +nyxOdaMDPSscOKjLfDziVTi/QYIZBtU6TeixL9whkipYCqmgbs5gXV8ynltmKyF
# bIJVeaXm5yQLcCTGzKzdXf+HmTErpEGDCDHFjzrLVjICRDdekElGVwYTn+ycl7p7
# oLsWWVDgqo0p86BITlrHUXUrxTXF3wyg2B59cT7Ilbb3o+Fa2GsP+o9IXMuVoNNp
# A+zrq1QZ49UO3XwkS03xDDioUQ1T/V0L4w9dEfaGvpY4Horv0HI=
# =PvmT
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 16 Jun 2022 09:56:19 AM PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu:
  acpi/erst: fix fallthrough code upon validation failure
  vhost: also check queue state in the vhost_dev_set_log error routine
  crypto: Introduce RSA algorithm
  virtio-iommu: Add an assert check in translate routine
  virtio-iommu: Use recursive lock to avoid deadlock
  virtio-iommu: Add bypass mode support to assigned device
  virtio/vhost-user: Fix wrong vhost notifier GPtrArray size
  docs/cxl: Add switch documentation
  pci-bridge/cxl_downstream: Add a CXL switch downstream port
  pci-bridge/cxl_upstream: Add a CXL switch upstream port

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-06-16 11:25:01 -07:00
commit a28498b1f9
18 changed files with 1343 additions and 158 deletions

View File

@ -26,6 +26,7 @@
#include "qapi/error.h" #include "qapi/error.h"
#include "standard-headers/linux/virtio_crypto.h" #include "standard-headers/linux/virtio_crypto.h"
#include "crypto/cipher.h" #include "crypto/cipher.h"
#include "crypto/akcipher.h"
#include "qom/object.h" #include "qom/object.h"
@ -42,10 +43,11 @@ typedef struct CryptoDevBackendBuiltinSession {
QCryptoCipher *cipher; QCryptoCipher *cipher;
uint8_t direction; /* encryption or decryption */ uint8_t direction; /* encryption or decryption */
uint8_t type; /* cipher? hash? aead? */ uint8_t type; /* cipher? hash? aead? */
QCryptoAkCipher *akcipher;
QTAILQ_ENTRY(CryptoDevBackendBuiltinSession) next; QTAILQ_ENTRY(CryptoDevBackendBuiltinSession) next;
} CryptoDevBackendBuiltinSession; } CryptoDevBackendBuiltinSession;
/* Max number of symmetric sessions */ /* Max number of symmetric/asymmetric sessions */
#define MAX_NUM_SESSIONS 256 #define MAX_NUM_SESSIONS 256
#define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512 #define CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN 512
@ -80,15 +82,17 @@ static void cryptodev_builtin_init(
backend->conf.crypto_services = backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_CIPHER | 1u << VIRTIO_CRYPTO_SERVICE_CIPHER |
1u << VIRTIO_CRYPTO_SERVICE_HASH | 1u << VIRTIO_CRYPTO_SERVICE_HASH |
1u << VIRTIO_CRYPTO_SERVICE_MAC; 1u << VIRTIO_CRYPTO_SERVICE_MAC |
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC; backend->conf.cipher_algo_l = 1u << VIRTIO_CRYPTO_CIPHER_AES_CBC;
backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1; backend->conf.hash_algo = 1u << VIRTIO_CRYPTO_HASH_SHA1;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
/* /*
* Set the Maximum length of crypto request. * Set the Maximum length of crypto request.
* Why this value? Just avoid to overflow when * Why this value? Just avoid to overflow when
* memory allocation for each crypto request. * memory allocation for each crypto request.
*/ */
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo); backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN; backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN; backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
@ -148,6 +152,55 @@ err:
return -1; return -1;
} }
static int cryptodev_builtin_get_rsa_hash_algo(
int virtio_rsa_hash, Error **errp)
{
switch (virtio_rsa_hash) {
case VIRTIO_CRYPTO_RSA_MD5:
return QCRYPTO_HASH_ALG_MD5;
case VIRTIO_CRYPTO_RSA_SHA1:
return QCRYPTO_HASH_ALG_SHA1;
case VIRTIO_CRYPTO_RSA_SHA256:
return QCRYPTO_HASH_ALG_SHA256;
case VIRTIO_CRYPTO_RSA_SHA512:
return QCRYPTO_HASH_ALG_SHA512;
default:
error_setg(errp, "Unsupported rsa hash algo: %d", virtio_rsa_hash);
return -1;
}
}
static int cryptodev_builtin_set_rsa_options(
int virtio_padding_algo,
int virtio_hash_algo,
QCryptoAkCipherOptionsRSA *opt,
Error **errp)
{
if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) {
int hash_alg;
hash_alg = cryptodev_builtin_get_rsa_hash_algo(virtio_hash_algo, errp);
if (hash_alg < 0) {
return -1;
}
opt->hash_alg = hash_alg;
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1;
return 0;
}
if (virtio_padding_algo == VIRTIO_CRYPTO_RSA_RAW_PADDING) {
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
return 0;
}
error_setg(errp, "Unsupported rsa padding algo: %d", virtio_padding_algo);
return -1;
}
static int cryptodev_builtin_create_cipher_session( static int cryptodev_builtin_create_cipher_session(
CryptoDevBackendBuiltin *builtin, CryptoDevBackendBuiltin *builtin,
CryptoDevBackendSymSessionInfo *sess_info, CryptoDevBackendSymSessionInfo *sess_info,
@ -240,26 +293,89 @@ static int cryptodev_builtin_create_cipher_session(
return index; return index;
} }
static int64_t cryptodev_builtin_sym_create_session( static int cryptodev_builtin_create_akcipher_session(
CryptoDevBackendBuiltin *builtin,
CryptoDevBackendAsymSessionInfo *sess_info,
Error **errp)
{
CryptoDevBackendBuiltinSession *sess;
QCryptoAkCipher *akcipher;
int index;
QCryptoAkCipherKeyType type;
QCryptoAkCipherOptions opts;
switch (sess_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
if (cryptodev_builtin_set_rsa_options(sess_info->u.rsa.padding_algo,
sess_info->u.rsa.hash_algo, &opts.u.rsa, errp) != 0) {
return -1;
}
break;
/* TODO support DSA&ECDSA until qemu crypto framework support these */
default:
error_setg(errp, "Unsupported akcipher alg %u", sess_info->algo);
return -1;
}
switch (sess_info->keytype) {
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
type = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
break;
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
type = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
break;
default:
error_setg(errp, "Unsupported akcipher keytype %u", sess_info->keytype);
return -1;
}
index = cryptodev_builtin_get_unused_session_index(builtin);
if (index < 0) {
error_setg(errp, "Total number of sessions created exceeds %u",
MAX_NUM_SESSIONS);
return -1;
}
akcipher = qcrypto_akcipher_new(&opts, type, sess_info->key,
sess_info->keylen, errp);
if (!akcipher) {
return -1;
}
sess = g_new0(CryptoDevBackendBuiltinSession, 1);
sess->akcipher = akcipher;
builtin->sessions[index] = sess;
return index;
}
static int64_t cryptodev_builtin_create_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info, CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
{ {
CryptoDevBackendBuiltin *builtin = CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend); CRYPTODEV_BACKEND_BUILTIN(backend);
int64_t session_id = -1; CryptoDevBackendSymSessionInfo *sym_sess_info;
int ret; CryptoDevBackendAsymSessionInfo *asym_sess_info;
switch (sess_info->op_code) { switch (sess_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
ret = cryptodev_builtin_create_cipher_session( sym_sess_info = &sess_info->u.sym_sess_info;
builtin, sess_info, errp); return cryptodev_builtin_create_cipher_session(
if (ret < 0) { builtin, sym_sess_info, errp);
return ret;
} else { case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
session_id = ret; asym_sess_info = &sess_info->u.asym_sess_info;
} return cryptodev_builtin_create_akcipher_session(
break; builtin, asym_sess_info, errp);
case VIRTIO_CRYPTO_HASH_CREATE_SESSION: case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION: case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
default: default:
@ -268,50 +384,44 @@ static int64_t cryptodev_builtin_sym_create_session(
return -1; return -1;
} }
return session_id; return -1;
} }
static int cryptodev_builtin_sym_close_session( static int cryptodev_builtin_close_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
uint64_t session_id, uint64_t session_id,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
{ {
CryptoDevBackendBuiltin *builtin = CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend); CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *session;
assert(session_id < MAX_NUM_SESSIONS && builtin->sessions[session_id]); assert(session_id < MAX_NUM_SESSIONS && builtin->sessions[session_id]);
qcrypto_cipher_free(builtin->sessions[session_id]->cipher); session = builtin->sessions[session_id];
g_free(builtin->sessions[session_id]); if (session->cipher) {
qcrypto_cipher_free(session->cipher);
} else if (session->akcipher) {
qcrypto_akcipher_free(session->akcipher);
}
g_free(session);
builtin->sessions[session_id] = NULL; builtin->sessions[session_id] = NULL;
return 0; return 0;
} }
static int cryptodev_builtin_sym_operation( static int cryptodev_builtin_sym_operation(
CryptoDevBackend *backend, CryptoDevBackendBuiltinSession *sess,
CryptoDevBackendSymOpInfo *op_info, CryptoDevBackendSymOpInfo *op_info, Error **errp)
uint32_t queue_index, Error **errp)
{ {
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *sess;
int ret; int ret;
if (op_info->session_id >= MAX_NUM_SESSIONS ||
builtin->sessions[op_info->session_id] == NULL) {
error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
op_info->session_id);
return -VIRTIO_CRYPTO_INVSESS;
}
if (op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { if (op_info->op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
error_setg(errp, error_setg(errp,
"Algorithm chain is unsupported for cryptdoev-builtin"); "Algorithm chain is unsupported for cryptdoev-builtin");
return -VIRTIO_CRYPTO_NOTSUPP; return -VIRTIO_CRYPTO_NOTSUPP;
} }
sess = builtin->sessions[op_info->session_id];
if (op_info->iv_len > 0) { if (op_info->iv_len > 0) {
ret = qcrypto_cipher_setiv(sess->cipher, op_info->iv, ret = qcrypto_cipher_setiv(sess->cipher, op_info->iv,
op_info->iv_len, errp); op_info->iv_len, errp);
@ -333,9 +443,99 @@ static int cryptodev_builtin_sym_operation(
return -VIRTIO_CRYPTO_ERR; return -VIRTIO_CRYPTO_ERR;
} }
} }
return VIRTIO_CRYPTO_OK; return VIRTIO_CRYPTO_OK;
} }
static int cryptodev_builtin_asym_operation(
CryptoDevBackendBuiltinSession *sess, uint32_t op_code,
CryptoDevBackendAsymOpInfo *op_info, Error **errp)
{
int ret;
switch (op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
ret = qcrypto_akcipher_encrypt(sess->akcipher,
op_info->src, op_info->src_len,
op_info->dst, op_info->dst_len, errp);
break;
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
ret = qcrypto_akcipher_decrypt(sess->akcipher,
op_info->src, op_info->src_len,
op_info->dst, op_info->dst_len, errp);
break;
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
ret = qcrypto_akcipher_sign(sess->akcipher,
op_info->src, op_info->src_len,
op_info->dst, op_info->dst_len, errp);
break;
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
ret = qcrypto_akcipher_verify(sess->akcipher,
op_info->src, op_info->src_len,
op_info->dst, op_info->dst_len, errp);
break;
default:
return -VIRTIO_CRYPTO_ERR;
}
if (ret < 0) {
if (op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
return -VIRTIO_CRYPTO_KEY_REJECTED;
}
return -VIRTIO_CRYPTO_ERR;
}
/* Buffer is too short, typically the driver should handle this case */
if (unlikely(ret > op_info->dst_len)) {
if (errp && !*errp) {
error_setg(errp, "dst buffer too short");
}
return -VIRTIO_CRYPTO_ERR;
}
op_info->dst_len = ret;
return VIRTIO_CRYPTO_OK;
}
static int cryptodev_builtin_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendBuiltinSession *sess;
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
int ret = -VIRTIO_CRYPTO_ERR;
if (op_info->session_id >= MAX_NUM_SESSIONS ||
builtin->sessions[op_info->session_id] == NULL) {
error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
op_info->session_id);
return -VIRTIO_CRYPTO_INVSESS;
}
sess = builtin->sessions[op_info->session_id];
if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
sym_op_info = op_info->u.sym_op_info;
ret = cryptodev_builtin_sym_operation(sess, sym_op_info, errp);
} else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
asym_op_info = op_info->u.asym_op_info;
ret = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, errp);
}
return ret;
}
static void cryptodev_builtin_cleanup( static void cryptodev_builtin_cleanup(
CryptoDevBackend *backend, CryptoDevBackend *backend,
Error **errp) Error **errp)
@ -348,7 +548,7 @@ static void cryptodev_builtin_cleanup(
for (i = 0; i < MAX_NUM_SESSIONS; i++) { for (i = 0; i < MAX_NUM_SESSIONS; i++) {
if (builtin->sessions[i] != NULL) { if (builtin->sessions[i] != NULL) {
cryptodev_builtin_sym_close_session(backend, i, 0, &error_abort); cryptodev_builtin_close_session(backend, i, 0, &error_abort);
} }
} }
@ -370,9 +570,9 @@ cryptodev_builtin_class_init(ObjectClass *oc, void *data)
bc->init = cryptodev_builtin_init; bc->init = cryptodev_builtin_init;
bc->cleanup = cryptodev_builtin_cleanup; bc->cleanup = cryptodev_builtin_cleanup;
bc->create_session = cryptodev_builtin_sym_create_session; bc->create_session = cryptodev_builtin_create_session;
bc->close_session = cryptodev_builtin_sym_close_session; bc->close_session = cryptodev_builtin_close_session;
bc->do_sym_op = cryptodev_builtin_sym_operation; bc->do_op = cryptodev_builtin_operation;
} }
static const TypeInfo cryptodev_builtin_info = { static const TypeInfo cryptodev_builtin_info = {

View File

@ -259,7 +259,33 @@ static int64_t cryptodev_vhost_user_sym_create_session(
return -1; return -1;
} }
static int cryptodev_vhost_user_sym_close_session( static int64_t cryptodev_vhost_user_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
{
uint32_t op_code = sess_info->op_code;
CryptoDevBackendSymSessionInfo *sym_sess_info;
switch (op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
sym_sess_info = &sess_info->u.sym_sess_info;
return cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
queue_index, errp);
default:
error_setg(errp, "Unsupported opcode :%" PRIu32 "",
sess_info->op_code);
return -1;
}
return -1;
}
static int cryptodev_vhost_user_close_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
uint64_t session_id, uint64_t session_id,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
@ -351,9 +377,9 @@ cryptodev_vhost_user_class_init(ObjectClass *oc, void *data)
bc->init = cryptodev_vhost_user_init; bc->init = cryptodev_vhost_user_init;
bc->cleanup = cryptodev_vhost_user_cleanup; bc->cleanup = cryptodev_vhost_user_cleanup;
bc->create_session = cryptodev_vhost_user_sym_create_session; bc->create_session = cryptodev_vhost_user_create_session;
bc->close_session = cryptodev_vhost_user_sym_close_session; bc->close_session = cryptodev_vhost_user_close_session;
bc->do_sym_op = NULL; bc->do_op = NULL;
object_class_property_add_str(oc, "chardev", object_class_property_add_str(oc, "chardev",
cryptodev_vhost_user_get_chardev, cryptodev_vhost_user_get_chardev,

View File

@ -72,9 +72,9 @@ void cryptodev_backend_cleanup(
} }
} }
int64_t cryptodev_backend_sym_create_session( int64_t cryptodev_backend_create_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info, CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
{ {
CryptoDevBackendClass *bc = CryptoDevBackendClass *bc =
@ -87,7 +87,7 @@ int64_t cryptodev_backend_sym_create_session(
return -1; return -1;
} }
int cryptodev_backend_sym_close_session( int cryptodev_backend_close_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
uint64_t session_id, uint64_t session_id,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
@ -102,16 +102,16 @@ int cryptodev_backend_sym_close_session(
return -1; return -1;
} }
static int cryptodev_backend_sym_operation( static int cryptodev_backend_operation(
CryptoDevBackend *backend, CryptoDevBackend *backend,
CryptoDevBackendSymOpInfo *op_info, CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
{ {
CryptoDevBackendClass *bc = CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend); CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->do_sym_op) { if (bc->do_op) {
return bc->do_sym_op(backend, op_info, queue_index, errp); return bc->do_op(backend, op_info, queue_index, errp);
} }
return -VIRTIO_CRYPTO_ERR; return -VIRTIO_CRYPTO_ERR;
@ -123,20 +123,18 @@ int cryptodev_backend_crypto_operation(
uint32_t queue_index, Error **errp) uint32_t queue_index, Error **errp)
{ {
VirtIOCryptoReq *req = opaque; VirtIOCryptoReq *req = opaque;
CryptoDevBackendOpInfo *op_info = &req->op_info;
enum CryptoDevBackendAlgType algtype = req->flags;
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
CryptoDevBackendSymOpInfo *op_info; && (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
op_info = req->u.sym_op_info;
return cryptodev_backend_sym_operation(backend,
op_info, queue_index, errp);
} else {
error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "", error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "",
req->flags); algtype);
return -VIRTIO_CRYPTO_NOTSUPP; return -VIRTIO_CRYPTO_NOTSUPP;
} }
return -VIRTIO_CRYPTO_ERR; return cryptodev_backend_operation(backend, op_info, queue_index, errp);
} }
static void static void

View File

@ -118,8 +118,6 @@ and associated component register access via PCI bars.
CXL Switch CXL Switch
~~~~~~~~~~ ~~~~~~~~~~
Not yet implemented in QEMU.
Here we consider a simple CXL switch with only a single Here we consider a simple CXL switch with only a single
virtual hierarchy. Whilst more complex devices exist, their virtual hierarchy. Whilst more complex devices exist, their
visibility to a particular host is generally the same as for visibility to a particular host is generally the same as for
@ -137,6 +135,10 @@ BARs. The Upstream Port has the configuration interfaces for
the HDM decoders which route incoming memory accesses to the the HDM decoders which route incoming memory accesses to the
appropriate downstream port. appropriate downstream port.
A CXL switch is created in a similar fashion to PCI switches
by creating an upstream port (cxl-upstream) and a number of
downstream ports on the internal switch bus (cxl-downstream).
CXL Memory Devices - Type 3 CXL Memory Devices - Type 3
~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
CXL type 3 devices use a PCI class code and are intended to be supported CXL type 3 devices use a PCI class code and are intended to be supported
@ -240,6 +242,62 @@ Notes:
they will take the Host Physical Addresses of accesses and map they will take the Host Physical Addresses of accesses and map
them to their own local Device Physical Address Space (DPA). them to their own local Device Physical Address Space (DPA).
Example topology involving a switch::
|<------------------SYSTEM PHYSICAL ADDRESS MAP (1)----------------->|
| __________ __________________________________ __________ |
| | | | | | | |
| | CFMW 0 | | CXL Fixed Memory Window 1 | | CFMW 1 | |
| | HB0 only | | Configured to interleave memory | | HB1 only | |
| | | | memory accesses across HB0/HB1 | | | |
| |____x_____| |__________________________________| |__________| |
| | | |
| | | |
| | |
Interleave Decoder | | |
Matches this HB | | |
\_____________| |_____________/
__________|__________ _____|_______________
| | | |
| CXL HB 0 | | CXL HB 1 |
| HB IntLv Decoders | | HB IntLv Decoders |
| PCI/CXL Root Bus 0c | | PCI/CXL Root Bus 0d |
| | | |
|___x_________________| |_____________________|
| | | |
|
A HB 0 HDM Decoder
matches this Port
___________|___
| Root Port 0 |
| Appears in |
| PCI topology |
| As 0c:00.0 |
|___________x___|
|
|
\_____________________
|
|
---------------------------------------------------
| Switch 0 USP as PCI 0d:00.0 |
| USP has HDM decoder which direct traffic to |
| appropiate downstream port |
| Switch BUS appears as 0e |
|x__________________________________________________|
| | | |
| | | |
_____|_________ ______|______ ______|_____ ______|_______
(4)| x | | | | | | |
| CXL Type3 0 | | CXL Type3 1 | | CXL type3 2| | CLX Type 3 3 |
| | | | | | | |
| PMEM0(Vol LSA)| | PMEM1 (...) | | PMEM2 (...)| | PMEM3 (...) |
| Decoder to go | | | | | | |
| from host PA | | PCI 10:00.0 | | PCI 11:00.0| | PCI 12:00.0 |
| to device PA | | | | | | |
| PCI as 0f:00.0| | | | | | |
|_______________| |_____________| |____________| |______________|
Example command lines Example command lines
--------------------- ---------------------
A very simple setup with just one directly attached CXL Type 3 device:: A very simple setup with just one directly attached CXL Type 3 device::
@ -279,6 +337,32 @@ the CXL Type3 device directly attached (no switches).::
-device cxl-type3,bus=root_port16,memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \ -device cxl-type3,bus=root_port16,memdev=cxl-mem4,lsa=cxl-lsa4,id=cxl-pmem3 \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k -M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.targets.1=cxl.2,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=8k
An example of 4 devices below a switch suitable for 1, 2 or 4 way interleave::
qemu-system-aarch64 -M virt,gic-version=3,cxl=on -m 4g,maxmem=8G,slots=8 -cpu max \
...
-object memory-backend-file,id=cxl-mem0,share=on,mem-path=/tmp/cxltest.raw,size=256M \
-object memory-backend-file,id=cxl-mem1,share=on,mem-path=/tmp/cxltest1.raw,size=256M \
-object memory-backend-file,id=cxl-mem2,share=on,mem-path=/tmp/cxltest2.raw,size=256M \
-object memory-backend-file,id=cxl-mem3,share=on,mem-path=/tmp/cxltest3.raw,size=256M \
-object memory-backend-file,id=cxl-lsa0,share=on,mem-path=/tmp/lsa0.raw,size=256M \
-object memory-backend-file,id=cxl-lsa1,share=on,mem-path=/tmp/lsa1.raw,size=256M \
-object memory-backend-file,id=cxl-lsa2,share=on,mem-path=/tmp/lsa2.raw,size=256M \
-object memory-backend-file,id=cxl-lsa3,share=on,mem-path=/tmp/lsa3.raw,size=256M \
-device pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1 \
-device cxl-rp,port=0,bus=cxl.1,id=root_port0,chassis=0,slot=0 \
-device cxl-rp,port=1,bus=cxl.1,id=root_port1,chassis=0,slot=1 \
-device cxl-upstream,bus=root_port0,id=us0 \
-device cxl-downstream,port=0,bus=us0,id=swport0,chassis=0,slot=4 \
-device cxl-type3,bus=swport0,memdev=cxl-mem0,lsa=cxl-lsa0,id=cxl-pmem0,size=256M \
-device cxl-downstream,port=1,bus=us0,id=swport1,chassis=0,slot=5 \
-device cxl-type3,bus=swport1,memdev=cxl-mem1,lsa=cxl-lsa1,id=cxl-pmem1,size=256M \
-device cxl-downstream,port=2,bus=us0,id=swport2,chassis=0,slot=6 \
-device cxl-type3,bus=swport2,memdev=cxl-mem2,lsa=cxl-lsa2,id=cxl-pmem2,size=256M \
-device cxl-downstream,port=3,bus=us0,id=swport3,chassis=0,slot=7 \
-device cxl-type3,bus=swport3,memdev=cxl-mem3,lsa=cxl-lsa3,id=cxl-pmem3,size=256M \
-M cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=4G,cxl-fmw.0.interleave-granularity=4k
Kernel Configuration Options Kernel Configuration Options
---------------------------- ----------------------------

View File

@ -440,6 +440,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(record_size >= 4096) /* PAGE_SIZE */ (record_size >= 4096) /* PAGE_SIZE */
)) { )) {
error_setg(errp, "ERST record_size %u is invalid", record_size); error_setg(errp, "ERST record_size %u is invalid", record_size);
return;
} }
/* Validity check header */ /* Validity check header */
@ -450,6 +451,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(le16_to_cpu(header->reserved) == 0) (le16_to_cpu(header->reserved) == 0)
)) { )) {
error_setg(errp, "ERST backend storage header is invalid"); error_setg(errp, "ERST backend storage header is invalid");
return;
} }
/* Check storage_size against record_size */ /* Check storage_size against record_size */
@ -457,6 +459,7 @@ static void check_erst_backend_storage(ERSTDeviceState *s, Error **errp)
(record_size > s->storage_size)) { (record_size > s->storage_size)) {
error_setg(errp, "ACPI ERST requires storage size be multiple of " error_setg(errp, "ACPI ERST requires storage size be multiple of "
"record size (%uKiB)", record_size); "record size (%uKiB)", record_size);
return;
} }
/* Compute offset of first and last record storage slot */ /* Compute offset of first and last record storage slot */

View File

@ -129,8 +129,9 @@ static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr,
static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr) static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr)
{ {
CXLComponentState *hb_cstate; CXLComponentState *hb_cstate, *usp_cstate;
PCIHostState *hb; PCIHostState *hb;
CXLUpstreamPort *usp;
int rb_index; int rb_index;
uint32_t *cache_mem; uint32_t *cache_mem;
uint8_t target; uint8_t target;
@ -164,8 +165,46 @@ static PCIDevice *cxl_cfmws_find_device(CXLFixedWindow *fw, hwaddr addr)
} }
d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0]; d = pci_bridge_get_sec_bus(PCI_BRIDGE(rp))->devices[0];
if (!d) {
return NULL;
}
if (!d || !object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) { if (object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) {
return d;
}
/*
* Could also be a switch. Note only one level of switching currently
* supported.
*/
if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_USP)) {
return NULL;
}
usp = CXL_USP(d);
usp_cstate = cxl_usp_to_cstate(usp);
if (!usp_cstate) {
return NULL;
}
cache_mem = usp_cstate->crb.cache_mem_registers;
target_found = cxl_hdm_find_target(cache_mem, addr, &target);
if (!target_found) {
return NULL;
}
d = pcie_find_port_by_pn(&PCI_BRIDGE(d)->sec_bus, target);
if (!d) {
return NULL;
}
d = pci_bridge_get_sec_bus(PCI_BRIDGE(d))->devices[0];
if (!d) {
return NULL;
}
if (!object_dynamic_cast(OBJECT(d), TYPE_CXL_TYPE3)) {
return NULL; return NULL;
} }

View File

@ -0,0 +1,249 @@
/*
* Emulated CXL Switch Downstream Port
*
* Copyright (c) 2022 Huawei Technologies.
*
* Based on xio3130_downstream.c
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/pci/msi.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_port.h"
#include "qapi/error.h"
typedef struct CXLDownStreamPort {
/*< private >*/
PCIESlot parent_obj;
/*< public >*/
CXLComponentState cxl_cstate;
} CXLDownstreamPort;
#define TYPE_CXL_DSP "cxl-downstream"
DECLARE_INSTANCE_CHECKER(CXLDownstreamPort, CXL_DSP, TYPE_CXL_DSP)
#define CXL_DOWNSTREAM_PORT_MSI_OFFSET 0x70
#define CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR 1
#define CXL_DOWNSTREAM_PORT_EXP_OFFSET 0x90
#define CXL_DOWNSTREAM_PORT_AER_OFFSET 0x100
#define CXL_DOWNSTREAM_PORT_DVSEC_OFFSET \
(CXL_DOWNSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
static void latch_registers(CXLDownstreamPort *dsp)
{
uint32_t *reg_state = dsp->cxl_cstate.crb.cache_mem_registers;
uint32_t *write_msk = dsp->cxl_cstate.crb.cache_mem_regs_write_mask;
cxl_component_register_init_common(reg_state, write_msk,
CXL2_DOWNSTREAM_PORT);
}
/* TODO: Look at sharing this code acorss all CXL port types */
static void cxl_dsp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
uint32_t val, int len)
{
CXLDownstreamPort *dsp = CXL_DSP(dev);
CXLComponentState *cxl_cstate = &dsp->cxl_cstate;
if (range_contains(&cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
uint8_t *reg = &dev->config[addr];
addr -= cxl_cstate->dvsecs[EXTENSIONS_PORT_DVSEC].lob;
if (addr == PORT_CONTROL_OFFSET) {
if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
/* unmask SBR */
qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
}
if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
/* Alt Memory & ID Space Enable */
qemu_log_mask(LOG_UNIMP,
"Alt Memory & ID space is not supported\n");
}
}
}
}
static void cxl_dsp_config_write(PCIDevice *d, uint32_t address,
uint32_t val, int len)
{
uint16_t slt_ctl, slt_sta;
pcie_cap_slot_get(d, &slt_ctl, &slt_sta);
pci_bridge_write_config(d, address, val, len);
pcie_cap_flr_write_config(d, address, val, len);
pcie_cap_slot_write_config(d, slt_ctl, slt_sta, address, val, len);
pcie_aer_write_config(d, address, val, len);
cxl_dsp_dvsec_write_config(d, address, val, len);
}
static void cxl_dsp_reset(DeviceState *qdev)
{
PCIDevice *d = PCI_DEVICE(qdev);
CXLDownstreamPort *dsp = CXL_DSP(qdev);
pcie_cap_deverr_reset(d);
pcie_cap_slot_reset(d);
pcie_cap_arifwd_reset(d);
pci_bridge_reset(qdev);
latch_registers(dsp);
}
static void build_dvsecs(CXLComponentState *cxl)
{
uint8_t *dvsec;
dvsec = (uint8_t *)&(CXLDVSECPortExtensions){ 0 };
cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
EXTENSIONS_PORT_DVSEC_LENGTH,
EXTENSIONS_PORT_DVSEC,
EXTENSIONS_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
.cap = 0x27, /* Cache, IO, Mem, non-MLD */
.ctrl = 0x02, /* IO always enabled */
.status = 0x26, /* same */
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
};
cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
PCIE_FLEXBUS_PORT_DVSEC,
PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
dvsec = (uint8_t *)&(CXLDVSECPortGPF){
.rsvd = 0,
.phase1_ctrl = 1, /* 1μs timeout */
.phase2_ctrl = 1, /* 1μs timeout */
};
cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
GPF_PORT_DVSEC_LENGTH, GPF_PORT_DVSEC,
GPF_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
.rsvd = 0,
.reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
.reg0_base_hi = 0,
};
cxl_component_create_dvsec(cxl, CXL2_DOWNSTREAM_PORT,
REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
REG_LOC_DVSEC_REVID, dvsec);
}
static void cxl_dsp_realize(PCIDevice *d, Error **errp)
{
PCIEPort *p = PCIE_PORT(d);
PCIESlot *s = PCIE_SLOT(d);
CXLDownstreamPort *dsp = CXL_DSP(d);
CXLComponentState *cxl_cstate = &dsp->cxl_cstate;
ComponentRegisters *cregs = &cxl_cstate->crb;
MemoryRegion *component_bar = &cregs->component_registers;
int rc;
pci_bridge_initfn(d, TYPE_PCIE_BUS);
pcie_port_init_reg(d);
rc = msi_init(d, CXL_DOWNSTREAM_PORT_MSI_OFFSET,
CXL_DOWNSTREAM_PORT_MSI_NR_VECTOR,
true, true, errp);
if (rc) {
assert(rc == -ENOTSUP);
goto err_bridge;
}
rc = pcie_cap_init(d, CXL_DOWNSTREAM_PORT_EXP_OFFSET,
PCI_EXP_TYPE_DOWNSTREAM, p->port,
errp);
if (rc < 0) {
goto err_msi;
}
pcie_cap_flr_init(d);
pcie_cap_deverr_init(d);
pcie_cap_slot_init(d, s);
pcie_cap_arifwd_init(d);
pcie_chassis_create(s->chassis);
rc = pcie_chassis_add_slot(s);
if (rc < 0) {
error_setg(errp, "Can't add chassis slot, error %d", rc);
goto err_pcie_cap;
}
rc = pcie_aer_init(d, PCI_ERR_VER, CXL_DOWNSTREAM_PORT_AER_OFFSET,
PCI_ERR_SIZEOF, errp);
if (rc < 0) {
goto err_chassis;
}
cxl_cstate->dvsec_offset = CXL_DOWNSTREAM_PORT_DVSEC_OFFSET;
cxl_cstate->pdev = d;
build_dvsecs(cxl_cstate);
cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_DSP);
pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64,
component_bar);
return;
err_chassis:
pcie_chassis_del_slot(s);
err_pcie_cap:
pcie_cap_exit(d);
err_msi:
msi_uninit(d);
err_bridge:
pci_bridge_exitfn(d);
}
static void cxl_dsp_exitfn(PCIDevice *d)
{
PCIESlot *s = PCIE_SLOT(d);
pcie_aer_exit(d);
pcie_chassis_del_slot(s);
pcie_cap_exit(d);
msi_uninit(d);
pci_bridge_exitfn(d);
}
static void cxl_dsp_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
k->is_bridge = true;
k->config_write = cxl_dsp_config_write;
k->realize = cxl_dsp_realize;
k->exit = cxl_dsp_exitfn;
k->vendor_id = 0x19e5; /* Huawei */
k->device_id = 0xa129; /* Emulated CXL Switch Downstream Port */
k->revision = 0;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "CXL Switch Downstream Port";
dc->reset = cxl_dsp_reset;
}
static const TypeInfo cxl_dsp_info = {
.name = TYPE_CXL_DSP,
.instance_size = sizeof(CXLDownstreamPort),
.parent = TYPE_PCIE_SLOT,
.class_init = cxl_dsp_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CXL_DEVICE },
{ }
},
};
static void cxl_dsp_register_type(void)
{
type_register_static(&cxl_dsp_info);
}
type_init(cxl_dsp_register_type);

View File

@ -0,0 +1,216 @@
/*
* Emulated CXL Switch Upstream Port
*
* Copyright (c) 2022 Huawei Technologies.
*
* Based on xio3130_upstream.c
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/pci/msi.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_port.h"
#define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 1
#define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70
#define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90
#define CXL_UPSTREAM_PORT_AER_OFFSET 0x100
#define CXL_UPSTREAM_PORT_DVSEC_OFFSET \
(CXL_UPSTREAM_PORT_AER_OFFSET + PCI_ERR_SIZEOF)
typedef struct CXLUpstreamPort {
/*< private >*/
PCIEPort parent_obj;
/*< public >*/
CXLComponentState cxl_cstate;
} CXLUpstreamPort;
CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp)
{
return &usp->cxl_cstate;
}
static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
uint32_t val, int len)
{
CXLUpstreamPort *usp = CXL_USP(dev);
if (range_contains(&usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC], addr)) {
uint8_t *reg = &dev->config[addr];
addr -= usp->cxl_cstate.dvsecs[EXTENSIONS_PORT_DVSEC].lob;
if (addr == PORT_CONTROL_OFFSET) {
if (pci_get_word(reg) & PORT_CONTROL_UNMASK_SBR) {
/* unmask SBR */
qemu_log_mask(LOG_UNIMP, "SBR mask control is not supported\n");
}
if (pci_get_word(reg) & PORT_CONTROL_ALT_MEMID_EN) {
/* Alt Memory & ID Space Enable */
qemu_log_mask(LOG_UNIMP,
"Alt Memory & ID space is not supported\n");
}
}
}
}
static void cxl_usp_write_config(PCIDevice *d, uint32_t address,
uint32_t val, int len)
{
pci_bridge_write_config(d, address, val, len);
pcie_cap_flr_write_config(d, address, val, len);
pcie_aer_write_config(d, address, val, len);
cxl_usp_dvsec_write_config(d, address, val, len);
}
static void latch_registers(CXLUpstreamPort *usp)
{
uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers;
uint32_t *write_msk = usp->cxl_cstate.crb.cache_mem_regs_write_mask;
cxl_component_register_init_common(reg_state, write_msk,
CXL2_UPSTREAM_PORT);
ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 8);
}
static void cxl_usp_reset(DeviceState *qdev)
{
PCIDevice *d = PCI_DEVICE(qdev);
CXLUpstreamPort *usp = CXL_USP(qdev);
pci_bridge_reset(qdev);
pcie_cap_deverr_reset(d);
latch_registers(usp);
}
static void build_dvsecs(CXLComponentState *cxl)
{
uint8_t *dvsec;
dvsec = (uint8_t *)&(CXLDVSECPortExtensions){
.status = 0x1, /* Port Power Management Init Complete */
};
cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
EXTENSIONS_PORT_DVSEC_LENGTH,
EXTENSIONS_PORT_DVSEC,
EXTENSIONS_PORT_DVSEC_REVID, dvsec);
dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
.cap = 0x27, /* Cache, IO, Mem, non-MLD */
.ctrl = 0x27, /* Cache, IO, Mem */
.status = 0x26, /* same */
.rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
};
cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
PCIE_FLEXBUS_PORT_DVSEC,
PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
.rsvd = 0,
.reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
.reg0_base_hi = 0,
};
cxl_component_create_dvsec(cxl, CXL2_UPSTREAM_PORT,
REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
REG_LOC_DVSEC_REVID, dvsec);
}
static void cxl_usp_realize(PCIDevice *d, Error **errp)
{
PCIEPort *p = PCIE_PORT(d);
CXLUpstreamPort *usp = CXL_USP(d);
CXLComponentState *cxl_cstate = &usp->cxl_cstate;
ComponentRegisters *cregs = &cxl_cstate->crb;
MemoryRegion *component_bar = &cregs->component_registers;
int rc;
pci_bridge_initfn(d, TYPE_PCIE_BUS);
pcie_port_init_reg(d);
rc = msi_init(d, CXL_UPSTREAM_PORT_MSI_OFFSET,
CXL_UPSTREAM_PORT_MSI_NR_VECTOR, true, true, errp);
if (rc) {
assert(rc == -ENOTSUP);
goto err_bridge;
}
rc = pcie_cap_init(d, CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET,
PCI_EXP_TYPE_UPSTREAM, p->port, errp);
if (rc < 0) {
goto err_msi;
}
pcie_cap_flr_init(d);
pcie_cap_deverr_init(d);
rc = pcie_aer_init(d, PCI_ERR_VER, CXL_UPSTREAM_PORT_AER_OFFSET,
PCI_ERR_SIZEOF, errp);
if (rc) {
goto err_cap;
}
cxl_cstate->dvsec_offset = CXL_UPSTREAM_PORT_DVSEC_OFFSET;
cxl_cstate->pdev = d;
build_dvsecs(cxl_cstate);
cxl_component_register_block_init(OBJECT(d), cxl_cstate, TYPE_CXL_USP);
pci_register_bar(d, CXL_COMPONENT_REG_BAR_IDX,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64,
component_bar);
return;
err_cap:
pcie_cap_exit(d);
err_msi:
msi_uninit(d);
err_bridge:
pci_bridge_exitfn(d);
}
static void cxl_usp_exitfn(PCIDevice *d)
{
pcie_aer_exit(d);
pcie_cap_exit(d);
msi_uninit(d);
pci_bridge_exitfn(d);
}
static void cxl_upstream_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *k = PCI_DEVICE_CLASS(oc);
k->is_bridge = true;
k->config_write = cxl_usp_write_config;
k->realize = cxl_usp_realize;
k->exit = cxl_usp_exitfn;
k->vendor_id = 0x19e5; /* Huawei */
k->device_id = 0xa128; /* Emulated CXL Switch Upstream Port */
k->revision = 0;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "CXL Switch Upstream Port";
dc->reset = cxl_usp_reset;
}
static const TypeInfo cxl_usp_info = {
.name = TYPE_CXL_USP,
.parent = TYPE_PCIE_PORT,
.instance_size = sizeof(CXLUpstreamPort),
.class_init = cxl_upstream_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CXL_DEVICE },
{ }
},
};
static void cxl_usp_register_type(void)
{
type_register_static(&cxl_usp_info);
}
type_init(cxl_usp_register_type);

View File

@ -6,7 +6,7 @@ pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pci
pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'), pci_ss.add(when: 'CONFIG_PXB', if_true: files('pci_expander_bridge.c'),
if_false: files('pci_expander_bridge_stubs.c')) if_false: files('pci_expander_bridge_stubs.c'))
pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c')) pci_ss.add(when: 'CONFIG_XIO3130', if_true: files('xio3130_upstream.c', 'xio3130_downstream.c'))
pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c')) pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c', 'cxl_downstream.c'))
# NewWorld PowerMac # NewWorld PowerMac
pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c')) pci_ss.add(when: 'CONFIG_DEC_PCI', if_true: files('dec.c'))

View File

@ -124,6 +124,7 @@ virtio_iommu_remap(const char *name, uint64_t virt_start, uint64_t virt_end, uin
virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64 virtio_iommu_set_page_size_mask(const char *name, uint64_t old, uint64_t new) "mr=%s old_mask=0x%"PRIx64" new_mask=0x%"PRIx64
virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s" virtio_iommu_notify_flag_add(const char *name) "add notifier to mr %s"
virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s" virtio_iommu_notify_flag_del(const char *name) "del notifier from mr %s"
virtio_iommu_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)"
# virtio-mem.c # virtio-mem.c
virtio_mem_send_response(uint16_t type) "type=%" PRIu16 virtio_mem_send_response(uint16_t type) "type=%" PRIu16

View File

@ -1525,7 +1525,7 @@ static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
{ {
VhostUserHostNotifier *n = NULL; VhostUserHostNotifier *n = NULL;
if (idx >= u->notifiers->len) { if (idx >= u->notifiers->len) {
g_ptr_array_set_size(u->notifiers, idx); g_ptr_array_set_size(u->notifiers, idx + 1);
} }
n = g_ptr_array_index(u->notifiers, idx); n = g_ptr_array_index(u->notifiers, idx);

View File

@ -886,6 +886,10 @@ static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
err_vq: err_vq:
for (; i >= 0; --i) { for (; i >= 0; --i) {
idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
addr = virtio_queue_get_desc_addr(dev->vdev, idx);
if (!addr) {
continue;
}
vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled); dev->log_enabled);
} }

View File

@ -83,7 +83,8 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
struct iovec *iov, unsigned int out_num) struct iovec *iov, unsigned int out_num)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendSymSessionInfo info; CryptoDevBackendSessionInfo info;
CryptoDevBackendSymSessionInfo *sym_info;
int64_t session_id; int64_t session_id;
int queue_index; int queue_index;
uint32_t op_type; uint32_t op_type;
@ -92,11 +93,13 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
op_type = ldl_le_p(&sess_req->op_type); op_type = ldl_le_p(&sess_req->op_type);
info.op_type = op_type;
info.op_code = opcode; info.op_code = opcode;
sym_info = &info.u.sym_sess_info;
sym_info->op_type = op_type;
if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
ret = virtio_crypto_cipher_session_helper(vdev, &info, ret = virtio_crypto_cipher_session_helper(vdev, sym_info,
&sess_req->u.cipher.para, &sess_req->u.cipher.para,
&iov, &out_num); &iov, &out_num);
if (ret < 0) { if (ret < 0) {
@ -105,47 +108,47 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
} else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
size_t s; size_t s;
/* cipher part */ /* cipher part */
ret = virtio_crypto_cipher_session_helper(vdev, &info, ret = virtio_crypto_cipher_session_helper(vdev, sym_info,
&sess_req->u.chain.para.cipher_param, &sess_req->u.chain.para.cipher_param,
&iov, &out_num); &iov, &out_num);
if (ret < 0) { if (ret < 0) {
goto err; goto err;
} }
/* hash part */ /* hash part */
info.alg_chain_order = ldl_le_p( sym_info->alg_chain_order = ldl_le_p(
&sess_req->u.chain.para.alg_chain_order); &sess_req->u.chain.para.alg_chain_order);
info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); sym_info->add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); sym_info->hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); sym_info->hash_alg =
info.auth_key_len = ldl_le_p( ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
sym_info->auth_key_len = ldl_le_p(
&sess_req->u.chain.para.u.mac_param.auth_key_len); &sess_req->u.chain.para.u.mac_param.auth_key_len);
info.hash_result_len = ldl_le_p( sym_info->hash_result_len = ldl_le_p(
&sess_req->u.chain.para.u.mac_param.hash_result_len); &sess_req->u.chain.para.u.mac_param.hash_result_len);
if (info.auth_key_len > vcrypto->conf.max_auth_key_len) { if (sym_info->auth_key_len > vcrypto->conf.max_auth_key_len) {
error_report("virtio-crypto length of auth key is too big: %u", error_report("virtio-crypto length of auth key is too big: %u",
info.auth_key_len); sym_info->auth_key_len);
ret = -VIRTIO_CRYPTO_ERR; ret = -VIRTIO_CRYPTO_ERR;
goto err; goto err;
} }
/* get auth key */ /* get auth key */
if (info.auth_key_len > 0) { if (sym_info->auth_key_len > 0) {
DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len); sym_info->auth_key = g_malloc(sym_info->auth_key_len);
info.auth_key = g_malloc(info.auth_key_len); s = iov_to_buf(iov, out_num, 0, sym_info->auth_key,
s = iov_to_buf(iov, out_num, 0, info.auth_key, sym_info->auth_key_len);
info.auth_key_len); if (unlikely(s != sym_info->auth_key_len)) {
if (unlikely(s != info.auth_key_len)) {
virtio_error(vdev, virtio_error(vdev,
"virtio-crypto authenticated key incorrect"); "virtio-crypto authenticated key incorrect");
ret = -EFAULT; ret = -EFAULT;
goto err; goto err;
} }
iov_discard_front(&iov, &out_num, info.auth_key_len); iov_discard_front(&iov, &out_num, sym_info->auth_key_len);
} }
} else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { } else if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
info.hash_alg = ldl_le_p( sym_info->hash_alg = ldl_le_p(
&sess_req->u.chain.para.u.hash_param.algo); &sess_req->u.chain.para.u.hash_param.algo);
info.hash_result_len = ldl_le_p( sym_info->hash_result_len = ldl_le_p(
&sess_req->u.chain.para.u.hash_param.hash_result_len); &sess_req->u.chain.para.u.hash_param.hash_result_len);
} else { } else {
/* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
@ -161,13 +164,10 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
} }
queue_index = virtio_crypto_vq2q(queue_id); queue_index = virtio_crypto_vq2q(queue_id);
session_id = cryptodev_backend_sym_create_session( session_id = cryptodev_backend_create_session(
vcrypto->cryptodev, vcrypto->cryptodev,
&info, queue_index, &local_err); &info, queue_index, &local_err);
if (session_id >= 0) { if (session_id >= 0) {
DPRINTF("create session_id=%" PRIu64 " successfully\n",
session_id);
ret = session_id; ret = session_id;
} else { } else {
if (local_err) { if (local_err) {
@ -177,11 +177,78 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
} }
err: err:
g_free(info.cipher_key); g_free(sym_info->cipher_key);
g_free(info.auth_key); g_free(sym_info->auth_key);
return ret; return ret;
} }
static int64_t
virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_akcipher_create_session_req *sess_req,
uint32_t queue_id, uint32_t opcode,
struct iovec *iov, unsigned int out_num)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendSessionInfo info = {0};
CryptoDevBackendAsymSessionInfo *asym_info;
int64_t session_id;
int queue_index;
uint32_t algo, keytype, keylen;
g_autofree uint8_t *key = NULL;
Error *local_err = NULL;
algo = ldl_le_p(&sess_req->para.algo);
keytype = ldl_le_p(&sess_req->para.keytype);
keylen = ldl_le_p(&sess_req->para.keylen);
if ((keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC)
&& (keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE)) {
error_report("unsupported asym keytype: %d", keytype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
if (keylen) {
key = g_malloc(keylen);
if (iov_to_buf(iov, out_num, 0, key, keylen) != keylen) {
virtio_error(vdev, "virtio-crypto asym key incorrect");
return -EFAULT;
}
iov_discard_front(&iov, &out_num, keylen);
}
info.op_code = opcode;
asym_info = &info.u.asym_sess_info;
asym_info->algo = algo;
asym_info->keytype = keytype;
asym_info->keylen = keylen;
asym_info->key = key;
switch (asym_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
asym_info->u.rsa.padding_algo =
ldl_le_p(&sess_req->para.u.rsa.padding_algo);
asym_info->u.rsa.hash_algo =
ldl_le_p(&sess_req->para.u.rsa.hash_algo);
break;
/* TODO DSA&ECDSA handling */
default:
return -VIRTIO_CRYPTO_ERR;
}
queue_index = virtio_crypto_vq2q(queue_id);
session_id = cryptodev_backend_create_session(vcrypto->cryptodev, &info,
queue_index, &local_err);
if (session_id < 0) {
if (local_err) {
error_report_err(local_err);
}
return -VIRTIO_CRYPTO_ERR;
}
return session_id;
}
static uint8_t static uint8_t
virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto, virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_destroy_session_req *close_sess_req, struct virtio_crypto_destroy_session_req *close_sess_req,
@ -195,7 +262,7 @@ virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
session_id = ldq_le_p(&close_sess_req->session_id); session_id = ldq_le_p(&close_sess_req->session_id);
DPRINTF("close session, id=%" PRIu64 "\n", session_id); DPRINTF("close session, id=%" PRIu64 "\n", session_id);
ret = cryptodev_backend_sym_close_session( ret = cryptodev_backend_close_session(
vcrypto->cryptodev, session_id, queue_id, &local_err); vcrypto->cryptodev, session_id, queue_id, &local_err);
if (ret == 0) { if (ret == 0) {
status = VIRTIO_CRYPTO_OK; status = VIRTIO_CRYPTO_OK;
@ -260,13 +327,22 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
opcode = ldl_le_p(&ctrl.header.opcode); opcode = ldl_le_p(&ctrl.header.opcode);
queue_id = ldl_le_p(&ctrl.header.queue_id); queue_id = ldl_le_p(&ctrl.header.queue_id);
memset(&input, 0, sizeof(input));
switch (opcode) { switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
memset(&input, 0, sizeof(input));
session_id = virtio_crypto_create_sym_session(vcrypto, session_id = virtio_crypto_create_sym_session(vcrypto,
&ctrl.u.sym_create_session, &ctrl.u.sym_create_session,
queue_id, opcode, queue_id, opcode,
out_iov, out_num); out_iov, out_num);
goto check_session;
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
session_id = virtio_crypto_create_asym_session(vcrypto,
&ctrl.u.akcipher_create_session,
queue_id, opcode,
out_iov, out_num);
check_session:
/* Serious errors, need to reset virtio crypto device */ /* Serious errors, need to reset virtio crypto device */
if (session_id == -EFAULT) { if (session_id == -EFAULT) {
virtqueue_detach_element(vq, elem, 0); virtqueue_detach_element(vq, elem, 0);
@ -290,10 +366,12 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
virtqueue_push(vq, elem, sizeof(input)); virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq); virtio_notify(vdev, vq);
break; break;
case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION:
status = virtio_crypto_handle_close_session(vcrypto, status = virtio_crypto_handle_close_session(vcrypto,
&ctrl.u.destroy_session, queue_id); &ctrl.u.destroy_session, queue_id);
/* The status only occupy one byte, we can directly use it */ /* The status only occupy one byte, we can directly use it */
@ -311,7 +389,6 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
default: default:
error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
memset(&input, 0, sizeof(input));
stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
if (unlikely(s != sizeof(input))) { if (unlikely(s != sizeof(input))) {
@ -339,15 +416,18 @@ static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
req->in_num = 0; req->in_num = 0;
req->in_len = 0; req->in_len = 0;
req->flags = CRYPTODEV_BACKEND_ALG__MAX; req->flags = CRYPTODEV_BACKEND_ALG__MAX;
req->u.sym_op_info = NULL; memset(&req->op_info, 0x00, sizeof(req->op_info));
} }
static void virtio_crypto_free_request(VirtIOCryptoReq *req) static void virtio_crypto_free_request(VirtIOCryptoReq *req)
{ {
if (req) { if (!req) {
return;
}
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
size_t max_len; size_t max_len;
CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info; CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info;
max_len = op_info->iv_len + max_len = op_info->iv_len +
op_info->aad_len + op_info->aad_len +
@ -358,9 +438,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
/* Zeroize and free request data structure */ /* Zeroize and free request data structure */
memset(op_info, 0, sizeof(*op_info) + max_len); memset(op_info, 0, sizeof(*op_info) + max_len);
g_free(op_info); g_free(op_info);
} else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info;
if (op_info) {
g_free(op_info->src);
g_free(op_info->dst);
memset(op_info, 0, sizeof(*op_info));
g_free(op_info);
} }
}
g_free(req); g_free(req);
}
} }
static void static void
@ -397,6 +485,35 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
} }
} }
static void
virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev,
VirtIOCryptoReq *req, int32_t status,
CryptoDevBackendAsymOpInfo *asym_op_info)
{
size_t s, len;
if (status != VIRTIO_CRYPTO_OK) {
return;
}
len = asym_op_info->dst_len;
if (!len) {
return;
}
s = iov_from_buf(req->in_iov, req->in_num, 0, asym_op_info->dst, len);
if (s != len) {
virtio_error(vdev, "virtio-crypto asym dest data incorrect");
return;
}
iov_discard_front(&req->in_iov, &req->in_num, len);
/* For akcipher, dst_len may be changed after operation */
req->in_len = sizeof(struct virtio_crypto_inhdr) + asym_op_info->dst_len;
}
static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status) static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
{ {
VirtIOCrypto *vcrypto = req->vcrypto; VirtIOCrypto *vcrypto = req->vcrypto;
@ -404,7 +521,10 @@ static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
virtio_crypto_sym_input_data_helper(vdev, req, status, virtio_crypto_sym_input_data_helper(vdev, req, status,
req->u.sym_op_info); req->op_info.u.sym_op_info);
} else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) {
virtio_crypto_akcipher_input_data_helper(vdev, req, status,
req->op_info.u.asym_op_info);
} }
stb_p(&req->in->status, status); stb_p(&req->in->status, status);
virtqueue_push(req->vq, &req->elem, req->in_len); virtqueue_push(req->vq, &req->elem, req->in_len);
@ -543,41 +663,100 @@ err:
static int static int
virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto, virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
struct virtio_crypto_sym_data_req *req, struct virtio_crypto_sym_data_req *req,
CryptoDevBackendSymOpInfo **sym_op_info, CryptoDevBackendOpInfo *op_info,
struct iovec *iov, unsigned int out_num) struct iovec *iov, unsigned int out_num)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendSymOpInfo *sym_op_info;
uint32_t op_type; uint32_t op_type;
CryptoDevBackendSymOpInfo *op_info;
op_type = ldl_le_p(&req->op_type); op_type = ldl_le_p(&req->op_type);
if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, sym_op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
NULL, iov, out_num); NULL, iov, out_num);
if (!op_info) { if (!sym_op_info) {
return -EFAULT; return -EFAULT;
} }
op_info->op_type = op_type;
} else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
op_info = virtio_crypto_sym_op_helper(vdev, NULL, sym_op_info = virtio_crypto_sym_op_helper(vdev, NULL,
&req->u.chain.para, &req->u.chain.para,
iov, out_num); iov, out_num);
if (!op_info) { if (!sym_op_info) {
return -EFAULT; return -EFAULT;
} }
op_info->op_type = op_type;
} else { } else {
/* VIRTIO_CRYPTO_SYM_OP_NONE */ /* VIRTIO_CRYPTO_SYM_OP_NONE */
error_report("virtio-crypto unsupported cipher type"); error_report("virtio-crypto unsupported cipher type");
return -VIRTIO_CRYPTO_NOTSUPP; return -VIRTIO_CRYPTO_NOTSUPP;
} }
*sym_op_info = op_info; sym_op_info->op_type = op_type;
op_info->u.sym_op_info = sym_op_info;
return 0; return 0;
} }
static int
virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto,
struct virtio_crypto_akcipher_data_req *req,
CryptoDevBackendOpInfo *op_info,
struct iovec *iov, unsigned int out_num)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendAsymOpInfo *asym_op_info;
uint32_t src_len;
uint32_t dst_len;
uint32_t len;
uint8_t *src = NULL;
uint8_t *dst = NULL;
asym_op_info = g_malloc0(sizeof(CryptoDevBackendAsymOpInfo));
src_len = ldl_le_p(&req->para.src_data_len);
dst_len = ldl_le_p(&req->para.dst_data_len);
if (src_len > 0) {
src = g_malloc0(src_len);
len = iov_to_buf(iov, out_num, 0, src, src_len);
if (unlikely(len != src_len)) {
virtio_error(vdev, "virtio-crypto asym src data incorrect"
"expected %u, actual %u", src_len, len);
goto err;
}
iov_discard_front(&iov, &out_num, src_len);
}
if (dst_len > 0) {
dst = g_malloc0(dst_len);
if (op_info->op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
len = iov_to_buf(iov, out_num, 0, dst, dst_len);
if (unlikely(len != dst_len)) {
virtio_error(vdev, "virtio-crypto asym dst data incorrect"
"expected %u, actual %u", dst_len, len);
goto err;
}
iov_discard_front(&iov, &out_num, dst_len);
}
}
asym_op_info->src_len = src_len;
asym_op_info->dst_len = dst_len;
asym_op_info->src = src;
asym_op_info->dst = dst;
op_info->u.asym_op_info = asym_op_info;
return 0;
err:
g_free(asym_op_info);
g_free(src);
g_free(dst);
return -EFAULT;
}
static int static int
virtio_crypto_handle_request(VirtIOCryptoReq *request) virtio_crypto_handle_request(VirtIOCryptoReq *request)
{ {
@ -595,8 +774,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
unsigned out_num; unsigned out_num;
uint32_t opcode; uint32_t opcode;
uint8_t status = VIRTIO_CRYPTO_ERR; uint8_t status = VIRTIO_CRYPTO_ERR;
uint64_t session_id; CryptoDevBackendOpInfo *op_info = &request->op_info;
CryptoDevBackendSymOpInfo *sym_op_info = NULL;
Error *local_err = NULL; Error *local_err = NULL;
if (elem->out_num < 1 || elem->in_num < 1) { if (elem->out_num < 1 || elem->in_num < 1) {
@ -639,15 +817,28 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
request->in_iov = in_iov; request->in_iov = in_iov;
opcode = ldl_le_p(&req.header.opcode); opcode = ldl_le_p(&req.header.opcode);
session_id = ldq_le_p(&req.header.session_id); op_info->session_id = ldq_le_p(&req.header.session_id);
op_info->op_code = opcode;
switch (opcode) { switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
case VIRTIO_CRYPTO_CIPHER_DECRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT:
op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM;
ret = virtio_crypto_handle_sym_req(vcrypto, ret = virtio_crypto_handle_sym_req(vcrypto,
&req.u.sym_req, &req.u.sym_req, op_info,
&sym_op_info,
out_iov, out_num); out_iov, out_num);
goto check_result;
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM;
ret = virtio_crypto_handle_asym_req(vcrypto,
&req.u.akcipher_req, op_info,
out_iov, out_num);
check_result:
/* Serious errors, need to reset virtio crypto device */ /* Serious errors, need to reset virtio crypto device */
if (ret == -EFAULT) { if (ret == -EFAULT) {
return -1; return -1;
@ -655,11 +846,8 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
virtio_crypto_free_request(request); virtio_crypto_free_request(request);
} else { } else {
sym_op_info->session_id = session_id;
/* Set request's parameter */ /* Set request's parameter */
request->flags = CRYPTODEV_BACKEND_ALG_SYM;
request->u.sym_op_info = sym_op_info;
ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
request, queue_index, &local_err); request, queue_index, &local_err);
if (ret < 0) { if (ret < 0) {
@ -674,6 +862,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
virtio_crypto_free_request(request); virtio_crypto_free_request(request);
} }
break; break;
case VIRTIO_CRYPTO_HASH: case VIRTIO_CRYPTO_HASH:
case VIRTIO_CRYPTO_MAC: case VIRTIO_CRYPTO_MAC:
case VIRTIO_CRYPTO_AEAD_ENCRYPT: case VIRTIO_CRYPTO_AEAD_ENCRYPT:
@ -779,6 +968,7 @@ static void virtio_crypto_init_config(VirtIODevice *vdev)
vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l; vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l;
vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h; vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h;
vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo; vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo;
vcrypto->conf.akcipher_algo = vcrypto->conf.cryptodev->conf.akcipher_algo;
vcrypto->conf.max_cipher_key_len = vcrypto->conf.max_cipher_key_len =
vcrypto->conf.cryptodev->conf.max_cipher_key_len; vcrypto->conf.cryptodev->conf.max_cipher_key_len;
vcrypto->conf.max_auth_key_len = vcrypto->conf.max_auth_key_len =
@ -891,6 +1081,7 @@ static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len); stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len);
stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len); stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len);
stq_le_p(&crypto_cfg.max_size, c->conf.max_size); stq_le_p(&crypto_cfg.max_size, c->conf.max_size);
stl_le_p(&crypto_cfg.akcipher_algo, c->conf.akcipher_algo);
memcpy(config, &crypto_cfg, c->config_size); memcpy(config, &crypto_cfg, c->config_size);
} }

View File

@ -69,6 +69,77 @@ static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn); return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
} }
static bool virtio_iommu_device_bypassed(IOMMUDevice *sdev)
{
uint32_t sid;
bool bypassed;
VirtIOIOMMU *s = sdev->viommu;
VirtIOIOMMUEndpoint *ep;
sid = virtio_iommu_get_bdf(sdev);
qemu_rec_mutex_lock(&s->mutex);
/* need to check bypass before system reset */
if (!s->endpoints) {
bypassed = s->config.bypass;
goto unlock;
}
ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
if (!ep || !ep->domain) {
bypassed = s->config.bypass;
} else {
bypassed = ep->domain->bypass;
}
unlock:
qemu_rec_mutex_unlock(&s->mutex);
return bypassed;
}
/* Return whether the device is using IOMMU translation. */
static bool virtio_iommu_switch_address_space(IOMMUDevice *sdev)
{
bool use_remapping;
assert(sdev);
use_remapping = !virtio_iommu_device_bypassed(sdev);
trace_virtio_iommu_switch_address_space(pci_bus_num(sdev->bus),
PCI_SLOT(sdev->devfn),
PCI_FUNC(sdev->devfn),
use_remapping);
/* Turn off first then on the other */
if (use_remapping) {
memory_region_set_enabled(&sdev->bypass_mr, false);
memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), true);
} else {
memory_region_set_enabled(MEMORY_REGION(&sdev->iommu_mr), false);
memory_region_set_enabled(&sdev->bypass_mr, true);
}
return use_remapping;
}
static void virtio_iommu_switch_address_space_all(VirtIOIOMMU *s)
{
GHashTableIter iter;
IOMMUPciBus *iommu_pci_bus;
int i;
g_hash_table_iter_init(&iter, s->as_by_busptr);
while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
for (i = 0; i < PCI_DEVFN_MAX; i++) {
if (!iommu_pci_bus->pbdev[i]) {
continue;
}
virtio_iommu_switch_address_space(iommu_pci_bus->pbdev[i]);
}
}
}
/** /**
* The bus number is used for lookup when SID based operations occur. * The bus number is used for lookup when SID based operations occur.
* In that case we lazily populate the IOMMUPciBus array from the bus hash * In that case we lazily populate the IOMMUPciBus array from the bus hash
@ -213,6 +284,7 @@ static gboolean virtio_iommu_notify_map_cb(gpointer key, gpointer value,
static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
{ {
VirtIOIOMMUDomain *domain = ep->domain; VirtIOIOMMUDomain *domain = ep->domain;
IOMMUDevice *sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
if (!ep->domain) { if (!ep->domain) {
return; return;
@ -221,6 +293,7 @@ static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
ep->iommu_mr); ep->iommu_mr);
QLIST_REMOVE(ep, next); QLIST_REMOVE(ep, next);
ep->domain = NULL; ep->domain = NULL;
virtio_iommu_switch_address_space(sdev);
} }
static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
@ -323,12 +396,39 @@ static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
trace_virtio_iommu_init_iommu_mr(name); trace_virtio_iommu_init_iommu_mr(name);
memory_region_init(&sdev->root, OBJECT(s), name, UINT64_MAX);
address_space_init(&sdev->as, &sdev->root, TYPE_VIRTIO_IOMMU);
/*
* Build the IOMMU disabled container with aliases to the
* shared MRs. Note that aliasing to a shared memory region
* could help the memory API to detect same FlatViews so we
* can have devices to share the same FlatView when in bypass
* mode. (either by not configuring virtio-iommu driver or with
* "iommu=pt"). It will greatly reduce the total number of
* FlatViews of the system hence VM runs faster.
*/
memory_region_init_alias(&sdev->bypass_mr, OBJECT(s),
"system", get_system_memory(), 0,
memory_region_size(get_system_memory()));
memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr), memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
TYPE_VIRTIO_IOMMU_MEMORY_REGION, TYPE_VIRTIO_IOMMU_MEMORY_REGION,
OBJECT(s), name, OBJECT(s), name,
UINT64_MAX); UINT64_MAX);
address_space_init(&sdev->as,
MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU); /*
* Hook both the containers under the root container, we
* switch between iommu & bypass MRs by enable/disable
* corresponding sub-containers
*/
memory_region_add_subregion_overlap(&sdev->root, 0,
MEMORY_REGION(&sdev->iommu_mr),
0);
memory_region_add_subregion_overlap(&sdev->root, 0,
&sdev->bypass_mr, 0);
virtio_iommu_switch_address_space(sdev);
g_free(name); g_free(name);
} }
return &sdev->as; return &sdev->as;
@ -342,6 +442,7 @@ static int virtio_iommu_attach(VirtIOIOMMU *s,
uint32_t flags = le32_to_cpu(req->flags); uint32_t flags = le32_to_cpu(req->flags);
VirtIOIOMMUDomain *domain; VirtIOIOMMUDomain *domain;
VirtIOIOMMUEndpoint *ep; VirtIOIOMMUEndpoint *ep;
IOMMUDevice *sdev;
trace_virtio_iommu_attach(domain_id, ep_id); trace_virtio_iommu_attach(domain_id, ep_id);
@ -375,6 +476,8 @@ static int virtio_iommu_attach(VirtIOIOMMU *s,
QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next); QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
ep->domain = domain; ep->domain = domain;
sdev = container_of(ep->iommu_mr, IOMMUDevice, iommu_mr);
virtio_iommu_switch_address_space(sdev);
/* Replay domain mappings on the associated memory region */ /* Replay domain mappings on the associated memory region */
g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb, g_tree_foreach(domain->mappings, virtio_iommu_notify_map_cb,
@ -642,7 +745,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
tail.status = VIRTIO_IOMMU_S_DEVERR; tail.status = VIRTIO_IOMMU_S_DEVERR;
goto out; goto out;
} }
qemu_mutex_lock(&s->mutex); qemu_rec_mutex_lock(&s->mutex);
switch (head.type) { switch (head.type) {
case VIRTIO_IOMMU_T_ATTACH: case VIRTIO_IOMMU_T_ATTACH:
tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt); tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
@ -671,7 +774,7 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
default: default:
tail.status = VIRTIO_IOMMU_S_UNSUPP; tail.status = VIRTIO_IOMMU_S_UNSUPP;
} }
qemu_mutex_unlock(&s->mutex); qemu_rec_mutex_unlock(&s->mutex);
out: out:
sz = iov_from_buf(elem->in_sg, elem->in_num, 0, sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
@ -759,9 +862,13 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
sid = virtio_iommu_get_bdf(sdev); sid = virtio_iommu_get_bdf(sdev);
trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag); trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
qemu_mutex_lock(&s->mutex); qemu_rec_mutex_lock(&s->mutex);
ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid));
if (bypass_allowed)
assert(ep && ep->domain && !ep->domain->bypass);
if (!ep) { if (!ep) {
if (!bypass_allowed) { if (!bypass_allowed) {
error_report_once("%s sid=%d is not known!!", __func__, sid); error_report_once("%s sid=%d is not known!!", __func__, sid);
@ -843,7 +950,7 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid); trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid);
unlock: unlock:
qemu_mutex_unlock(&s->mutex); qemu_rec_mutex_unlock(&s->mutex);
return entry; return entry;
} }
@ -887,6 +994,7 @@ static void virtio_iommu_set_config(VirtIODevice *vdev,
return; return;
} }
dev_config->bypass = in_config->bypass; dev_config->bypass = in_config->bypass;
virtio_iommu_switch_address_space_all(dev);
} }
trace_virtio_iommu_set_config(in_config->bypass); trace_virtio_iommu_set_config(in_config->bypass);
@ -931,7 +1039,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
sid = virtio_iommu_get_bdf(sdev); sid = virtio_iommu_get_bdf(sdev);
qemu_mutex_lock(&s->mutex); qemu_rec_mutex_lock(&s->mutex);
if (!s->endpoints) { if (!s->endpoints) {
goto unlock; goto unlock;
@ -945,7 +1053,7 @@ static void virtio_iommu_replay(IOMMUMemoryRegion *mr, IOMMUNotifier *n)
g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr); g_tree_foreach(ep->domain->mappings, virtio_iommu_remap, mr);
unlock: unlock:
qemu_mutex_unlock(&s->mutex); qemu_rec_mutex_unlock(&s->mutex);
} }
static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr, static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu_mr,
@ -1026,6 +1134,8 @@ static void virtio_iommu_system_reset(void *opaque)
* system reset * system reset
*/ */
s->config.bypass = s->boot_bypass; s->config.bypass = s->boot_bypass;
virtio_iommu_switch_address_space_all(s);
} }
static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
@ -1041,6 +1151,11 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_iommu_handle_command); virtio_iommu_handle_command);
s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL); s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
/*
* config.bypass is needed to get initial address space early, such as
* in vfio realize
*/
s->config.bypass = s->boot_bypass;
s->config.page_size_mask = TARGET_PAGE_MASK; s->config.page_size_mask = TARGET_PAGE_MASK;
s->config.input_range.end = UINT64_MAX; s->config.input_range.end = UINT64_MAX;
s->config.domain_range.end = UINT32_MAX; s->config.domain_range.end = UINT32_MAX;
@ -1056,7 +1171,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS_CONFIG);
qemu_mutex_init(&s->mutex); qemu_rec_mutex_init(&s->mutex);
s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free); s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
@ -1084,6 +1199,8 @@ static void virtio_iommu_device_unrealize(DeviceState *dev)
g_tree_destroy(s->endpoints); g_tree_destroy(s->endpoints);
} }
qemu_rec_mutex_destroy(&s->mutex);
virtio_delete_queue(s->req_vq); virtio_delete_queue(s->req_vq);
virtio_delete_queue(s->event_vq); virtio_delete_queue(s->event_vq);
virtio_cleanup(vdev); virtio_cleanup(vdev);

View File

@ -53,4 +53,9 @@ struct CXLHost {
#define TYPE_PXB_CXL_HOST "pxb-cxl-host" #define TYPE_PXB_CXL_HOST "pxb-cxl-host"
OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST) OBJECT_DECLARE_SIMPLE_TYPE(CXLHost, PXB_CXL_HOST)
#define TYPE_CXL_USP "cxl-upstream"
typedef struct CXLUpstreamPort CXLUpstreamPort;
DECLARE_INSTANCE_CHECKER(CXLUpstreamPort, CXL_USP, TYPE_CXL_USP)
CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp);
#endif #endif

View File

@ -50,6 +50,7 @@ typedef struct VirtIOCryptoConf {
uint32_t mac_algo_l; uint32_t mac_algo_l;
uint32_t mac_algo_h; uint32_t mac_algo_h;
uint32_t aead_algo; uint32_t aead_algo;
uint32_t akcipher_algo;
/* Maximum length of cipher key */ /* Maximum length of cipher key */
uint32_t max_cipher_key_len; uint32_t max_cipher_key_len;
@ -71,9 +72,7 @@ typedef struct VirtIOCryptoReq {
size_t in_len; size_t in_len;
VirtQueue *vq; VirtQueue *vq;
struct VirtIOCrypto *vcrypto; struct VirtIOCrypto *vcrypto;
union { CryptoDevBackendOpInfo op_info;
CryptoDevBackendSymOpInfo *sym_op_info;
} u;
} VirtIOCryptoReq; } VirtIOCryptoReq;
typedef struct VirtIOCryptoQueue { typedef struct VirtIOCryptoQueue {

View File

@ -37,6 +37,8 @@ typedef struct IOMMUDevice {
int devfn; int devfn;
IOMMUMemoryRegion iommu_mr; IOMMUMemoryRegion iommu_mr;
AddressSpace as; AddressSpace as;
MemoryRegion root; /* The root container of the device */
MemoryRegion bypass_mr; /* The alias of shared memory MR */
} IOMMUDevice; } IOMMUDevice;
typedef struct IOMMUPciBus { typedef struct IOMMUPciBus {
@ -56,7 +58,7 @@ struct VirtIOIOMMU {
ReservedRegion *reserved_regions; ReservedRegion *reserved_regions;
uint32_t nb_reserved_regions; uint32_t nb_reserved_regions;
GTree *domains; GTree *domains;
QemuMutex mutex; QemuRecMutex mutex;
GTree *endpoints; GTree *endpoints;
bool boot_bypass; bool boot_bypass;
}; };

View File

@ -50,13 +50,13 @@ typedef struct CryptoDevBackendClient
enum CryptoDevBackendAlgType { enum CryptoDevBackendAlgType {
CRYPTODEV_BACKEND_ALG_SYM, CRYPTODEV_BACKEND_ALG_SYM,
CRYPTODEV_BACKEND_ALG_ASYM,
CRYPTODEV_BACKEND_ALG__MAX, CRYPTODEV_BACKEND_ALG__MAX,
}; };
/** /**
* CryptoDevBackendSymSessionInfo: * CryptoDevBackendSymSessionInfo:
* *
* @op_code: operation code (refer to virtio_crypto.h)
* @cipher_alg: algorithm type of CIPHER * @cipher_alg: algorithm type of CIPHER
* @key_len: byte length of cipher key * @key_len: byte length of cipher key
* @hash_alg: algorithm type of HASH/MAC * @hash_alg: algorithm type of HASH/MAC
@ -74,7 +74,6 @@ enum CryptoDevBackendAlgType {
*/ */
typedef struct CryptoDevBackendSymSessionInfo { typedef struct CryptoDevBackendSymSessionInfo {
/* corresponding with virtio crypto spec */ /* corresponding with virtio crypto spec */
uint32_t op_code;
uint32_t cipher_alg; uint32_t cipher_alg;
uint32_t key_len; uint32_t key_len;
uint32_t hash_alg; uint32_t hash_alg;
@ -89,11 +88,36 @@ typedef struct CryptoDevBackendSymSessionInfo {
uint8_t *auth_key; uint8_t *auth_key;
} CryptoDevBackendSymSessionInfo; } CryptoDevBackendSymSessionInfo;
/**
* CryptoDevBackendAsymSessionInfo:
*/
typedef struct CryptoDevBackendRsaPara {
uint32_t padding_algo;
uint32_t hash_algo;
} CryptoDevBackendRsaPara;
typedef struct CryptoDevBackendAsymSessionInfo {
/* corresponding with virtio crypto spec */
uint32_t algo;
uint32_t keytype;
uint32_t keylen;
uint8_t *key;
union {
CryptoDevBackendRsaPara rsa;
} u;
} CryptoDevBackendAsymSessionInfo;
typedef struct CryptoDevBackendSessionInfo {
uint32_t op_code;
union {
CryptoDevBackendSymSessionInfo sym_sess_info;
CryptoDevBackendAsymSessionInfo asym_sess_info;
} u;
} CryptoDevBackendSessionInfo;
/** /**
* CryptoDevBackendSymOpInfo: * CryptoDevBackendSymOpInfo:
* *
* @session_id: session index which was previously
* created by cryptodev_backend_sym_create_session()
* @aad_len: byte length of additional authenticated data * @aad_len: byte length of additional authenticated data
* @iv_len: byte length of initialization vector or counter * @iv_len: byte length of initialization vector or counter
* @src_len: byte length of source data * @src_len: byte length of source data
@ -119,7 +143,6 @@ typedef struct CryptoDevBackendSymSessionInfo {
* *
*/ */
typedef struct CryptoDevBackendSymOpInfo { typedef struct CryptoDevBackendSymOpInfo {
uint64_t session_id;
uint32_t aad_len; uint32_t aad_len;
uint32_t iv_len; uint32_t iv_len;
uint32_t src_len; uint32_t src_len;
@ -138,6 +161,33 @@ typedef struct CryptoDevBackendSymOpInfo {
uint8_t data[]; uint8_t data[];
} CryptoDevBackendSymOpInfo; } CryptoDevBackendSymOpInfo;
/**
* CryptoDevBackendAsymOpInfo:
*
* @src_len: byte length of source data
* @dst_len: byte length of destination data
* @src: point to the source data
* @dst: point to the destination data
*
*/
typedef struct CryptoDevBackendAsymOpInfo {
uint32_t src_len;
uint32_t dst_len;
uint8_t *src;
uint8_t *dst;
} CryptoDevBackendAsymOpInfo;
typedef struct CryptoDevBackendOpInfo {
enum CryptoDevBackendAlgType algtype;
uint32_t op_code;
uint64_t session_id;
union {
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
} u;
} CryptoDevBackendOpInfo;
struct CryptoDevBackendClass { struct CryptoDevBackendClass {
ObjectClass parent_class; ObjectClass parent_class;
@ -145,13 +195,13 @@ struct CryptoDevBackendClass {
void (*cleanup)(CryptoDevBackend *backend, Error **errp); void (*cleanup)(CryptoDevBackend *backend, Error **errp);
int64_t (*create_session)(CryptoDevBackend *backend, int64_t (*create_session)(CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info, CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp); uint32_t queue_index, Error **errp);
int (*close_session)(CryptoDevBackend *backend, int (*close_session)(CryptoDevBackend *backend,
uint64_t session_id, uint64_t session_id,
uint32_t queue_index, Error **errp); uint32_t queue_index, Error **errp);
int (*do_sym_op)(CryptoDevBackend *backend, int (*do_op)(CryptoDevBackend *backend,
CryptoDevBackendSymOpInfo *op_info, CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp); uint32_t queue_index, Error **errp);
}; };
@ -190,6 +240,7 @@ struct CryptoDevBackendConf {
uint32_t mac_algo_l; uint32_t mac_algo_l;
uint32_t mac_algo_h; uint32_t mac_algo_h;
uint32_t aead_algo; uint32_t aead_algo;
uint32_t akcipher_algo;
/* Maximum length of cipher key */ /* Maximum length of cipher key */
uint32_t max_cipher_key_len; uint32_t max_cipher_key_len;
/* Maximum length of authenticated key */ /* Maximum length of authenticated key */
@ -247,34 +298,34 @@ void cryptodev_backend_cleanup(
Error **errp); Error **errp);
/** /**
* cryptodev_backend_sym_create_session: * cryptodev_backend_create_session:
* @backend: the cryptodev backend object * @backend: the cryptodev backend object
* @sess_info: parameters needed by session creating * @sess_info: parameters needed by session creating
* @queue_index: queue index of cryptodev backend client * @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object * @errp: pointer to a NULL-initialized error object
* *
* Create a session for symmetric algorithms * Create a session for symmetric/symmetric algorithms
* *
* Returns: session id on success, or -1 on error * Returns: session id on success, or -1 on error
*/ */
int64_t cryptodev_backend_sym_create_session( int64_t cryptodev_backend_create_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
CryptoDevBackendSymSessionInfo *sess_info, CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp); uint32_t queue_index, Error **errp);
/** /**
* cryptodev_backend_sym_close_session: * cryptodev_backend_close_session:
* @backend: the cryptodev backend object * @backend: the cryptodev backend object
* @session_id: the session id * @session_id: the session id
* @queue_index: queue index of cryptodev backend client * @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object * @errp: pointer to a NULL-initialized error object
* *
* Close a session for symmetric algorithms which was previously * Close a session for which was previously
* created by cryptodev_backend_sym_create_session() * created by cryptodev_backend_create_session()
* *
* Returns: 0 on success, or Negative on error * Returns: 0 on success, or Negative on error
*/ */
int cryptodev_backend_sym_close_session( int cryptodev_backend_close_session(
CryptoDevBackend *backend, CryptoDevBackend *backend,
uint64_t session_id, uint64_t session_id,
uint32_t queue_index, Error **errp); uint32_t queue_index, Error **errp);