pci,pc,virtio: features, tests, fixes, cleanups

lots of acpi rework
 first version of biosbits infrastructure
 ASID support in vhost-vdpa
 core_count2 support in smbios
 PCIe DOE emulation
 virtio vq reset
 HMAT support
 part of infrastructure for viommu support in vhost-vdpa
 VTD PASID support
 fixes, tests all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmNpXDkPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpD0AH/2G8ZPrgrxJC9y3uD5/5J6QRzO+TsDYbg5ut
 uBf4rKSHHzcu6zdyAfsrhbAKKzyD4HrEGNXZrBjnKM1xCiB/SGBcDIWntwrca2+s
 5Dpbi4xvd4tg6tVD4b47XNDCcn2uUbeI0e2M5QIbtCmzdi/xKbFAfl5G8DQp431X
 Kmz79G4CdKWyjVlM0HoYmdCw/4FxkdjD02tE/Uc5YMrePNaEg5Bw4hjCHbx1b6ur
 6gjeXAtncm9s4sO0l+sIdyiqlxiTry9FSr35WaQ0qPU+Og5zaf1EiWfdl8TRo4qU
 EAATw5A4hyw11GfOGp7oOVkTGvcNB/H7aIxD7emdWZV8+BMRPKo=
 =zTCn
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

pci,pc,virtio: features, tests, fixes, cleanups

lots of acpi rework
first version of biosbits infrastructure
ASID support in vhost-vdpa
core_count2 support in smbios
PCIe DOE emulation
virtio vq reset
HMAT support
part of infrastructure for viommu support in vhost-vdpa
VTD PASID support
fixes, tests all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmNpXDkPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpD0AH/2G8ZPrgrxJC9y3uD5/5J6QRzO+TsDYbg5ut
# uBf4rKSHHzcu6zdyAfsrhbAKKzyD4HrEGNXZrBjnKM1xCiB/SGBcDIWntwrca2+s
# 5Dpbi4xvd4tg6tVD4b47XNDCcn2uUbeI0e2M5QIbtCmzdi/xKbFAfl5G8DQp431X
# Kmz79G4CdKWyjVlM0HoYmdCw/4FxkdjD02tE/Uc5YMrePNaEg5Bw4hjCHbx1b6ur
# 6gjeXAtncm9s4sO0l+sIdyiqlxiTry9FSr35WaQ0qPU+Og5zaf1EiWfdl8TRo4qU
# EAATw5A4hyw11GfOGp7oOVkTGvcNB/H7aIxD7emdWZV8+BMRPKo=
# =zTCn
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 07 Nov 2022 14:27:53 EST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (83 commits)
  checkpatch: better pattern for inline comments
  hw/virtio: introduce virtio_device_should_start
  tests/acpi: update tables for new core count test
  bios-tables-test: add test for number of cores > 255
  tests/acpi: allow changes for core_count2 test
  bios-tables-test: teach test to use smbios 3.0 tables
  hw/smbios: add core_count2 to smbios table type 4
  vhost-user: Support vhost_dev_start
  vhost: Change the sequence of device start
  intel-iommu: PASID support
  intel-iommu: convert VTD_PE_GET_FPD_ERR() to be a function
  intel-iommu: drop VTDBus
  intel-iommu: don't warn guest errors when getting rid2pasid entry
  vfio: move implement of vfio_get_xlat_addr() to memory.c
  tests: virt: Update expected *.acpihmatvirt tables
  tests: acpi: aarch64/virt: add a test for hmat nodes with no initiators
  hw/arm/virt: Enable HMAT on arm virt machine
  tests: Add HMAT AArch64/virt empty table files
  tests: acpi: q35: update expected blobs *.hmat-noinitiators expected HMAT:
  tests: acpi: q35: add test for hmat nodes without initiators
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-11-07 18:43:56 -05:00
commit f21f1cfeb9
155 changed files with 7996 additions and 1020 deletions

View File

@ -1835,6 +1835,13 @@ F: qapi/pci.json
F: docs/pci*
F: docs/specs/*pci*
PCIE DOE
M: Huai-Cheng Kuo <hchkuo@avery-design.com.tw>
M: Chris Browy <cbrowy@avery-design.com>
S: Supported
F: include/hw/pci/pcie_doe.h
F: hw/pci/pcie_doe.c
ACPI/SMBIOS
M: Michael S. Tsirkin <mst@redhat.com>
M: Igor Mammedov <imammedo@redhat.com>
@ -1862,6 +1869,13 @@ S: Supported
F: hw/acpi/viot.c
F: hw/acpi/viot.h
ACPI/AVOCADO/BIOSBITS
M: Ani Sinha <ani@anisinha.ca>
S: Supported
F: tests/avocado/acpi-bits/*
F: tests/avocado/acpi-bits.py
F: docs/devel/acpi-bits.rst
ACPI/HEST/GHES
R: Dongjiu Geng <gengdongjiu1@gmail.com>
L: qemu-arm@nongnu.org
@ -2003,6 +2017,7 @@ S: Supported
F: hw/*/virtio*
F: hw/virtio/Makefile.objs
F: hw/virtio/trace-events
F: qapi/virtio.json
F: net/vhost-user.c
F: include/hw/virtio/

View File

@ -355,42 +355,62 @@ static int cryptodev_builtin_create_akcipher_session(
return index;
}
static int64_t cryptodev_builtin_create_session(
static int cryptodev_builtin_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
CryptoDevBackendSymSessionInfo *sym_sess_info;
CryptoDevBackendAsymSessionInfo *asym_sess_info;
int ret, status;
Error *local_error = NULL;
switch (sess_info->op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
sym_sess_info = &sess_info->u.sym_sess_info;
return cryptodev_builtin_create_cipher_session(
builtin, sym_sess_info, errp);
ret = cryptodev_builtin_create_cipher_session(
builtin, sym_sess_info, &local_error);
break;
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
asym_sess_info = &sess_info->u.asym_sess_info;
return cryptodev_builtin_create_akcipher_session(
builtin, asym_sess_info, errp);
ret = cryptodev_builtin_create_akcipher_session(
builtin, asym_sess_info, &local_error);
break;
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
default:
error_setg(errp, "Unsupported opcode :%" PRIu32 "",
error_setg(&local_error, "Unsupported opcode :%" PRIu32 "",
sess_info->op_code);
return -1;
return -VIRTIO_CRYPTO_NOTSUPP;
}
return -1;
if (local_error) {
error_report_err(local_error);
}
if (ret < 0) {
status = -VIRTIO_CRYPTO_ERR;
} else {
sess_info->session_id = ret;
status = VIRTIO_CRYPTO_OK;
}
if (cb) {
cb(opaque, status);
}
return 0;
}
static int cryptodev_builtin_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
@ -407,6 +427,9 @@ static int cryptodev_builtin_close_session(
g_free(session);
builtin->sessions[session_id] = NULL;
if (cb) {
cb(opaque, VIRTIO_CRYPTO_OK);
}
return 0;
}
@ -506,7 +529,9 @@ static int cryptodev_builtin_asym_operation(
static int cryptodev_builtin_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendBuiltin *builtin =
CRYPTODEV_BACKEND_BUILTIN(backend);
@ -514,11 +539,12 @@ static int cryptodev_builtin_operation(
CryptoDevBackendSymOpInfo *sym_op_info;
CryptoDevBackendAsymOpInfo *asym_op_info;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
int ret = -VIRTIO_CRYPTO_ERR;
int status = -VIRTIO_CRYPTO_ERR;
Error *local_error = NULL;
if (op_info->session_id >= MAX_NUM_SESSIONS ||
builtin->sessions[op_info->session_id] == NULL) {
error_setg(errp, "Cannot find a valid session id: %" PRIu64 "",
error_setg(&local_error, "Cannot find a valid session id: %" PRIu64 "",
op_info->session_id);
return -VIRTIO_CRYPTO_INVSESS;
}
@ -526,14 +552,21 @@ static int cryptodev_builtin_operation(
sess = builtin->sessions[op_info->session_id];
if (algtype == CRYPTODEV_BACKEND_ALG_SYM) {
sym_op_info = op_info->u.sym_op_info;
ret = cryptodev_builtin_sym_operation(sess, sym_op_info, errp);
status = cryptodev_builtin_sym_operation(sess, sym_op_info,
&local_error);
} else if (algtype == CRYPTODEV_BACKEND_ALG_ASYM) {
asym_op_info = op_info->u.asym_op_info;
ret = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, errp);
status = cryptodev_builtin_asym_operation(sess, op_info->op_code,
asym_op_info, &local_error);
}
return ret;
if (local_error) {
error_report_err(local_error);
}
if (cb) {
cb(opaque, status);
}
return 0;
}
static void cryptodev_builtin_cleanup(
@ -548,7 +581,7 @@ static void cryptodev_builtin_cleanup(
for (i = 0; i < MAX_NUM_SESSIONS; i++) {
if (builtin->sessions[i] != NULL) {
cryptodev_builtin_close_session(backend, i, 0, &error_abort);
cryptodev_builtin_close_session(backend, i, 0, NULL, NULL);
}
}

645
backends/cryptodev-lkcf.c Normal file
View File

@ -0,0 +1,645 @@
/*
* QEMU Cryptodev backend for QEMU cipher APIs
*
* Copyright (c) 2022 Bytedance.Inc
*
* Authors:
* lei he <helei.sig11@bytedance.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
*/
#include "qemu/osdep.h"
#include "crypto/cipher.h"
#include "crypto/akcipher.h"
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
#include "qemu/queue.h"
#include "qom/object.h"
#include "sysemu/cryptodev.h"
#include "standard-headers/linux/virtio_crypto.h"
#include <keyutils.h>
#include <sys/eventfd.h>
/**
* @TYPE_CRYPTODEV_BACKEND_LKCF:
* name of backend that uses linux kernel crypto framework
*/
#define TYPE_CRYPTODEV_BACKEND_LKCF "cryptodev-backend-lkcf"
OBJECT_DECLARE_SIMPLE_TYPE(CryptoDevBackendLKCF, CRYPTODEV_BACKEND_LKCF)
#define INVALID_KEY_ID -1
#define MAX_SESSIONS 256
#define NR_WORKER_THREAD 64
#define KCTL_KEY_TYPE_PKEY "asymmetric"
/**
* Here the key is uploaded to the thread-keyring of worker thread, at least
* util linux-6.0:
* 1. process keyring seems to behave unexpectedly if main-thread does not
* create the keyring before creating any other thread.
* 2. at present, the guest kernel never perform multiple operations on a
* session.
* 3. it can reduce the load of the main-loop because the key passed by the
* guest kernel has been already checked.
*/
#define KCTL_KEY_RING KEY_SPEC_THREAD_KEYRING
typedef struct CryptoDevBackendLKCFSession {
uint8_t *key;
size_t keylen;
QCryptoAkCipherKeyType keytype;
QCryptoAkCipherOptions akcipher_opts;
} CryptoDevBackendLKCFSession;
typedef struct CryptoDevBackendLKCF CryptoDevBackendLKCF;
typedef struct CryptoDevLKCFTask CryptoDevLKCFTask;
struct CryptoDevLKCFTask {
CryptoDevBackendLKCFSession *sess;
CryptoDevBackendOpInfo *op_info;
CryptoDevCompletionFunc cb;
void *opaque;
int status;
CryptoDevBackendLKCF *lkcf;
QSIMPLEQ_ENTRY(CryptoDevLKCFTask) queue;
};
typedef struct CryptoDevBackendLKCF {
CryptoDevBackend parent_obj;
CryptoDevBackendLKCFSession *sess[MAX_SESSIONS];
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) requests;
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses;
QemuMutex mutex;
QemuCond cond;
QemuMutex rsp_mutex;
/**
* There is no async interface for asymmetric keys like AF_ALG sockets,
* we don't seem to have better way than create a lots of thread.
*/
QemuThread worker_threads[NR_WORKER_THREAD];
bool running;
int eventfd;
} CryptoDevBackendLKCF;
static void *cryptodev_lkcf_worker(void *arg);
static int cryptodev_lkcf_close_session(CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
static void cryptodev_lkcf_handle_response(void *opaque)
{
CryptoDevBackendLKCF *lkcf = (CryptoDevBackendLKCF *)opaque;
QSIMPLEQ_HEAD(, CryptoDevLKCFTask) responses;
CryptoDevLKCFTask *task, *next;
eventfd_t nevent;
QSIMPLEQ_INIT(&responses);
eventfd_read(lkcf->eventfd, &nevent);
qemu_mutex_lock(&lkcf->rsp_mutex);
QSIMPLEQ_PREPEND(&responses, &lkcf->responses);
qemu_mutex_unlock(&lkcf->rsp_mutex);
QSIMPLEQ_FOREACH_SAFE(task, &responses, queue, next) {
if (task->cb) {
task->cb(task->opaque, task->status);
}
g_free(task);
}
}
static int cryptodev_lkcf_set_op_desc(QCryptoAkCipherOptions *opts,
char *key_desc,
size_t desc_len,
Error **errp)
{
QCryptoAkCipherOptionsRSA *rsa_opt;
if (opts->alg != QCRYPTO_AKCIPHER_ALG_RSA) {
error_setg(errp, "Unsupported alg: %u", opts->alg);
return -1;
}
rsa_opt = &opts->u.rsa;
if (rsa_opt->padding_alg == QCRYPTO_RSA_PADDING_ALG_PKCS1) {
snprintf(key_desc, desc_len, "enc=%s hash=%s",
QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg),
QCryptoHashAlgorithm_str(rsa_opt->hash_alg));
} else {
snprintf(key_desc, desc_len, "enc=%s",
QCryptoRSAPaddingAlgorithm_str(rsa_opt->padding_alg));
}
return 0;
}
static int cryptodev_lkcf_set_rsa_opt(int virtio_padding_alg,
int virtio_hash_alg,
QCryptoAkCipherOptionsRSA *opt,
Error **errp)
{
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_PKCS1_PADDING) {
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_PKCS1;
switch (virtio_hash_alg) {
case VIRTIO_CRYPTO_RSA_MD5:
opt->hash_alg = QCRYPTO_HASH_ALG_MD5;
break;
case VIRTIO_CRYPTO_RSA_SHA1:
opt->hash_alg = QCRYPTO_HASH_ALG_SHA1;
break;
case VIRTIO_CRYPTO_RSA_SHA256:
opt->hash_alg = QCRYPTO_HASH_ALG_SHA256;
break;
case VIRTIO_CRYPTO_RSA_SHA512:
opt->hash_alg = QCRYPTO_HASH_ALG_SHA512;
break;
default:
error_setg(errp, "Unsupported rsa hash algo: %d", virtio_hash_alg);
return -1;
}
return 0;
}
if (virtio_padding_alg == VIRTIO_CRYPTO_RSA_RAW_PADDING) {
opt->padding_alg = QCRYPTO_RSA_PADDING_ALG_RAW;
return 0;
}
error_setg(errp, "Unsupported rsa padding algo: %u", virtio_padding_alg);
return -1;
}
static int cryptodev_lkcf_get_unused_session_index(CryptoDevBackendLKCF *lkcf)
{
size_t i;
for (i = 0; i < MAX_SESSIONS; i++) {
if (lkcf->sess[i] == NULL) {
return i;
}
}
return -1;
}
static void cryptodev_lkcf_init(CryptoDevBackend *backend, Error **errp)
{
/* Only support one queue */
int queues = backend->conf.peers.queues, i;
CryptoDevBackendClient *cc;
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
if (queues != 1) {
error_setg(errp,
"Only support one queue in cryptodev-builtin backend");
return;
}
lkcf->eventfd = eventfd(0, 0);
if (lkcf->eventfd < 0) {
error_setg(errp, "Failed to create eventfd: %d", errno);
return;
}
cc = cryptodev_backend_new_client("cryptodev-lkcf", NULL);
cc->info_str = g_strdup_printf("cryptodev-lkcf0");
cc->queue_index = 0;
cc->type = CRYPTODEV_BACKEND_TYPE_LKCF;
backend->conf.peers.ccs[0] = cc;
backend->conf.crypto_services =
1u << VIRTIO_CRYPTO_SERVICE_AKCIPHER;
backend->conf.akcipher_algo = 1u << VIRTIO_CRYPTO_AKCIPHER_RSA;
lkcf->running = true;
QSIMPLEQ_INIT(&lkcf->requests);
QSIMPLEQ_INIT(&lkcf->responses);
qemu_mutex_init(&lkcf->mutex);
qemu_mutex_init(&lkcf->rsp_mutex);
qemu_cond_init(&lkcf->cond);
for (i = 0; i < NR_WORKER_THREAD; i++) {
qemu_thread_create(&lkcf->worker_threads[i], "lkcf-worker",
cryptodev_lkcf_worker, lkcf, 0);
}
qemu_set_fd_handler(
lkcf->eventfd, cryptodev_lkcf_handle_response, NULL, lkcf);
cryptodev_backend_set_ready(backend, true);
}
static void cryptodev_lkcf_cleanup(CryptoDevBackend *backend, Error **errp)
{
CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend);
size_t i;
int queues = backend->conf.peers.queues;
CryptoDevBackendClient *cc;
CryptoDevLKCFTask *task, *next;
qemu_mutex_lock(&lkcf->mutex);
lkcf->running = false;
qemu_mutex_unlock(&lkcf->mutex);
qemu_cond_broadcast(&lkcf->cond);
close(lkcf->eventfd);
for (i = 0; i < NR_WORKER_THREAD; i++) {
qemu_thread_join(&lkcf->worker_threads[i]);
}
QSIMPLEQ_FOREACH_SAFE(task, &lkcf->requests, queue, next) {
if (task->cb) {
task->cb(task->opaque, task->status);
}
g_free(task);
}
QSIMPLEQ_FOREACH_SAFE(task, &lkcf->responses, queue, next) {
if (task->cb) {
task->cb(task->opaque, task->status);
}
g_free(task);
}
qemu_mutex_destroy(&lkcf->mutex);
qemu_cond_destroy(&lkcf->cond);
qemu_mutex_destroy(&lkcf->rsp_mutex);
for (i = 0; i < MAX_SESSIONS; i++) {
if (lkcf->sess[i] != NULL) {
cryptodev_lkcf_close_session(backend, i, 0, NULL, NULL);
}
}
for (i = 0; i < queues; i++) {
cc = backend->conf.peers.ccs[i];
if (cc) {
cryptodev_backend_free_client(cc);
backend->conf.peers.ccs[i] = NULL;
}
}
cryptodev_backend_set_ready(backend, false);
}
static void cryptodev_lkcf_execute_task(CryptoDevLKCFTask *task)
{
CryptoDevBackendLKCFSession *session = task->sess;
CryptoDevBackendAsymOpInfo *asym_op_info;
bool kick = false;
int ret, status, op_code = task->op_info->op_code;
size_t p8info_len;
g_autofree uint8_t *p8info = NULL;
Error *local_error = NULL;
key_serial_t key_id = INVALID_KEY_ID;
char op_desc[64];
g_autoptr(QCryptoAkCipher) akcipher = NULL;
/**
* We only offload private key session:
* 1. currently, the Linux kernel can only accept public key wrapped
* with X.509 certificates, but unfortunately the cost of making a
* ceritificate with public key is too expensive.
* 2. generally, public key related compution is fast, just compute it with
* thread-pool.
*/
if (session->keytype == QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE) {
if (qcrypto_akcipher_export_p8info(&session->akcipher_opts,
session->key, session->keylen,
&p8info, &p8info_len,
&local_error) != 0 ||
cryptodev_lkcf_set_op_desc(&session->akcipher_opts, op_desc,
sizeof(op_desc), &local_error) != 0) {
error_report_err(local_error);
} else {
key_id = add_key(KCTL_KEY_TYPE_PKEY, "lkcf-backend-priv-key",
p8info, p8info_len, KCTL_KEY_RING);
}
}
if (key_id < 0) {
if (!qcrypto_akcipher_supports(&session->akcipher_opts)) {
status = -VIRTIO_CRYPTO_NOTSUPP;
goto out;
}
akcipher = qcrypto_akcipher_new(&session->akcipher_opts,
session->keytype,
session->key, session->keylen,
&local_error);
if (!akcipher) {
status = -VIRTIO_CRYPTO_ERR;
goto out;
}
}
asym_op_info = task->op_info->u.asym_op_info;
switch (op_code) {
case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
if (key_id >= 0) {
ret = keyctl_pkey_encrypt(key_id, op_desc,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len);
} else {
ret = qcrypto_akcipher_encrypt(akcipher,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len, &local_error);
}
break;
case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
if (key_id >= 0) {
ret = keyctl_pkey_decrypt(key_id, op_desc,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len);
} else {
ret = qcrypto_akcipher_decrypt(akcipher,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len, &local_error);
}
break;
case VIRTIO_CRYPTO_AKCIPHER_SIGN:
if (key_id >= 0) {
ret = keyctl_pkey_sign(key_id, op_desc,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len);
} else {
ret = qcrypto_akcipher_sign(akcipher,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len, &local_error);
}
break;
case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
if (key_id >= 0) {
ret = keyctl_pkey_verify(key_id, op_desc,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len);
} else {
ret = qcrypto_akcipher_verify(akcipher,
asym_op_info->src, asym_op_info->src_len,
asym_op_info->dst, asym_op_info->dst_len, &local_error);
}
break;
default:
error_setg(&local_error, "Unknown opcode: %u", op_code);
status = -VIRTIO_CRYPTO_ERR;
goto out;
}
if (ret < 0) {
if (!local_error) {
if (errno != EKEYREJECTED) {
error_report("Failed do operation with keyctl: %d", errno);
}
} else {
error_report_err(local_error);
}
status = op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY ?
-VIRTIO_CRYPTO_KEY_REJECTED : -VIRTIO_CRYPTO_ERR;
} else {
status = VIRTIO_CRYPTO_OK;
asym_op_info->dst_len = ret;
}
out:
if (key_id >= 0) {
keyctl_unlink(key_id, KCTL_KEY_RING);
}
task->status = status;
qemu_mutex_lock(&task->lkcf->rsp_mutex);
if (QSIMPLEQ_EMPTY(&task->lkcf->responses)) {
kick = true;
}
QSIMPLEQ_INSERT_TAIL(&task->lkcf->responses, task, queue);
qemu_mutex_unlock(&task->lkcf->rsp_mutex);
if (kick) {
eventfd_write(task->lkcf->eventfd, 1);
}
}
static void *cryptodev_lkcf_worker(void *arg)
{
CryptoDevBackendLKCF *backend = (CryptoDevBackendLKCF *)arg;
CryptoDevLKCFTask *task;
for (;;) {
task = NULL;
qemu_mutex_lock(&backend->mutex);
while (backend->running && QSIMPLEQ_EMPTY(&backend->requests)) {
qemu_cond_wait(&backend->cond, &backend->mutex);
}
if (backend->running) {
task = QSIMPLEQ_FIRST(&backend->requests);
QSIMPLEQ_REMOVE_HEAD(&backend->requests, queue);
}
qemu_mutex_unlock(&backend->mutex);
/* stopped */
if (!task) {
break;
}
cryptodev_lkcf_execute_task(task);
}
return NULL;
}
static int cryptodev_lkcf_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *sess;
enum CryptoDevBackendAlgType algtype = op_info->algtype;
CryptoDevLKCFTask *task;
if (op_info->session_id >= MAX_SESSIONS ||
lkcf->sess[op_info->session_id] == NULL) {
error_report("Cannot find a valid session id: %" PRIu64 "",
op_info->session_id);
return -VIRTIO_CRYPTO_INVSESS;
}
sess = lkcf->sess[op_info->session_id];
if (algtype != CRYPTODEV_BACKEND_ALG_ASYM) {
error_report("algtype not supported: %u", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
task = g_new0(CryptoDevLKCFTask, 1);
task->op_info = op_info;
task->cb = cb;
task->opaque = opaque;
task->sess = sess;
task->lkcf = lkcf;
task->status = -VIRTIO_CRYPTO_ERR;
qemu_mutex_lock(&lkcf->mutex);
QSIMPLEQ_INSERT_TAIL(&lkcf->requests, task, queue);
qemu_mutex_unlock(&lkcf->mutex);
qemu_cond_signal(&lkcf->cond);
return VIRTIO_CRYPTO_OK;
}
static int cryptodev_lkcf_create_asym_session(
CryptoDevBackendLKCF *lkcf,
CryptoDevBackendAsymSessionInfo *sess_info,
uint64_t *session_id)
{
Error *local_error = NULL;
int index;
g_autofree CryptoDevBackendLKCFSession *sess =
g_new0(CryptoDevBackendLKCFSession, 1);
switch (sess_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
sess->akcipher_opts.alg = QCRYPTO_AKCIPHER_ALG_RSA;
if (cryptodev_lkcf_set_rsa_opt(
sess_info->u.rsa.padding_algo, sess_info->u.rsa.hash_algo,
&sess->akcipher_opts.u.rsa, &local_error) != 0) {
error_report_err(local_error);
return -VIRTIO_CRYPTO_ERR;
}
break;
default:
error_report("Unsupported asym alg %u", sess_info->algo);
return -VIRTIO_CRYPTO_NOTSUPP;
}
switch (sess_info->keytype) {
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC:
sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
break;
case VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE:
sess->keytype = QCRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
break;
default:
error_report("Unknown akcipher keytype: %u", sess_info->keytype);
return -VIRTIO_CRYPTO_ERR;
}
index = cryptodev_lkcf_get_unused_session_index(lkcf);
if (index < 0) {
error_report("Total number of sessions created exceeds %u",
MAX_SESSIONS);
return -VIRTIO_CRYPTO_ERR;
}
sess->keylen = sess_info->keylen;
sess->key = g_malloc(sess_info->keylen);
memcpy(sess->key, sess_info->key, sess_info->keylen);
lkcf->sess[index] = g_steal_pointer(&sess);
*session_id = index;
return VIRTIO_CRYPTO_OK;
}
static int cryptodev_lkcf_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendAsymSessionInfo *asym_sess_info;
CryptoDevBackendLKCF *lkcf =
CRYPTODEV_BACKEND_LKCF(backend);
int ret;
switch (sess_info->op_code) {
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
asym_sess_info = &sess_info->u.asym_sess_info;
ret = cryptodev_lkcf_create_asym_session(
lkcf, asym_sess_info, &sess_info->session_id);
break;
default:
ret = -VIRTIO_CRYPTO_NOTSUPP;
error_report("Unsupported opcode: %" PRIu32 "",
sess_info->op_code);
break;
}
if (cb) {
cb(opaque, ret);
}
return 0;
}
static int cryptodev_lkcf_close_session(CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendLKCF *lkcf = CRYPTODEV_BACKEND_LKCF(backend);
CryptoDevBackendLKCFSession *session;
assert(session_id < MAX_SESSIONS && lkcf->sess[session_id]);
session = lkcf->sess[session_id];
lkcf->sess[session_id] = NULL;
g_free(session->key);
g_free(session);
if (cb) {
cb(opaque, VIRTIO_CRYPTO_OK);
}
return 0;
}
static void cryptodev_lkcf_class_init(ObjectClass *oc, void *data)
{
CryptoDevBackendClass *bc = CRYPTODEV_BACKEND_CLASS(oc);
bc->init = cryptodev_lkcf_init;
bc->cleanup = cryptodev_lkcf_cleanup;
bc->create_session = cryptodev_lkcf_create_session;
bc->close_session = cryptodev_lkcf_close_session;
bc->do_op = cryptodev_lkcf_operation;
}
static const TypeInfo cryptodev_builtin_info = {
.name = TYPE_CRYPTODEV_BACKEND_LKCF,
.parent = TYPE_CRYPTODEV_BACKEND,
.class_init = cryptodev_lkcf_class_init,
.instance_size = sizeof(CryptoDevBackendLKCF),
};
static void cryptodev_lkcf_register_types(void)
{
type_register_static(&cryptodev_builtin_info);
}
type_init(cryptodev_lkcf_register_types);

View File

@ -259,13 +259,18 @@ static int64_t cryptodev_vhost_user_sym_create_session(
return -1;
}
static int64_t cryptodev_vhost_user_create_session(
static int cryptodev_vhost_user_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
uint32_t op_code = sess_info->op_code;
CryptoDevBackendSymSessionInfo *sym_sess_info;
int64_t ret;
Error *local_error = NULL;
int status;
switch (op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
@ -273,27 +278,42 @@ static int64_t cryptodev_vhost_user_create_session(
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
sym_sess_info = &sess_info->u.sym_sess_info;
return cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
queue_index, errp);
default:
error_setg(errp, "Unsupported opcode :%" PRIu32 "",
sess_info->op_code);
return -1;
ret = cryptodev_vhost_user_sym_create_session(backend, sym_sess_info,
queue_index, &local_error);
break;
default:
error_setg(&local_error, "Unsupported opcode :%" PRIu32 "",
sess_info->op_code);
return -VIRTIO_CRYPTO_NOTSUPP;
}
return -1;
if (local_error) {
error_report_err(local_error);
}
if (ret < 0) {
status = -VIRTIO_CRYPTO_ERR;
} else {
sess_info->session_id = ret;
status = VIRTIO_CRYPTO_OK;
}
if (cb) {
cb(opaque, status);
}
return 0;
}
static int cryptodev_vhost_user_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendClient *cc =
backend->conf.peers.ccs[queue_index];
CryptoDevBackendVhost *vhost_crypto;
int ret;
int ret = -1, status;
vhost_crypto = cryptodev_vhost_user_get_vhost(cc, backend, queue_index);
if (vhost_crypto) {
@ -301,12 +321,17 @@ static int cryptodev_vhost_user_close_session(
ret = dev->vhost_ops->vhost_crypto_close_session(dev,
session_id);
if (ret < 0) {
return -1;
status = -VIRTIO_CRYPTO_ERR;
} else {
return 0;
status = VIRTIO_CRYPTO_OK;
}
} else {
status = -VIRTIO_CRYPTO_NOTSUPP;
}
return -1;
if (cb) {
cb(opaque, status);
}
return 0;
}
static void cryptodev_vhost_user_cleanup(

View File

@ -26,6 +26,7 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qom/object_interfaces.h"
#include "hw/virtio/virtio-crypto.h"
@ -72,69 +73,72 @@ void cryptodev_backend_cleanup(
}
}
int64_t cryptodev_backend_create_session(
int cryptodev_backend_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->create_session) {
return bc->create_session(backend, sess_info, queue_index, errp);
return bc->create_session(backend, sess_info, queue_index, cb, opaque);
}
return -1;
return -VIRTIO_CRYPTO_NOTSUPP;
}
int cryptodev_backend_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->close_session) {
return bc->close_session(backend, session_id, queue_index, errp);
return bc->close_session(backend, session_id, queue_index, cb, opaque);
}
return -1;
return -VIRTIO_CRYPTO_NOTSUPP;
}
static int cryptodev_backend_operation(
CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp)
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque)
{
CryptoDevBackendClass *bc =
CRYPTODEV_BACKEND_GET_CLASS(backend);
if (bc->do_op) {
return bc->do_op(backend, op_info, queue_index, errp);
return bc->do_op(backend, op_info, queue_index, cb, opaque);
}
return -VIRTIO_CRYPTO_ERR;
return -VIRTIO_CRYPTO_NOTSUPP;
}
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
void *opaque,
uint32_t queue_index, Error **errp)
void *opaque1,
uint32_t queue_index,
CryptoDevCompletionFunc cb, void *opaque2)
{
VirtIOCryptoReq *req = opaque;
VirtIOCryptoReq *req = opaque1;
CryptoDevBackendOpInfo *op_info = &req->op_info;
enum CryptoDevBackendAlgType algtype = req->flags;
if ((algtype != CRYPTODEV_BACKEND_ALG_SYM)
&& (algtype != CRYPTODEV_BACKEND_ALG_ASYM)) {
error_setg(errp, "Unsupported cryptodev alg type: %" PRIu32 "",
algtype);
error_report("Unsupported cryptodev alg type: %" PRIu32 "", algtype);
return -VIRTIO_CRYPTO_NOTSUPP;
}
return cryptodev_backend_operation(backend, op_info, queue_index, errp);
return cryptodev_backend_operation(backend, op_info, queue_index,
cb, opaque2);
}
static void

View File

@ -12,6 +12,9 @@ softmmu_ss.add([files(
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c'))
softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c'))
softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c'))
if keyutils.found()
softmmu_ss.add(keyutils, files('cryptodev-lkcf.c'))
endif
if have_vhost_user
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
endif

View File

@ -22,6 +22,8 @@
#include "qemu/osdep.h"
#include "crypto/akcipher.h"
#include "akcipherpriv.h"
#include "der.h"
#include "rsakey.h"
#if defined(CONFIG_GCRYPT)
#include "akcipher-gcrypt.c.inc"
@ -106,3 +108,19 @@ void qcrypto_akcipher_free(QCryptoAkCipher *akcipher)
drv->free(akcipher);
}
int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
uint8_t *key, size_t keylen,
uint8_t **dst, size_t *dst_len,
Error **errp)
{
switch (opts->alg) {
case QCRYPTO_AKCIPHER_ALG_RSA:
qcrypto_akcipher_rsakey_export_p8info(key, keylen, dst, dst_len);
return 0;
default:
error_setg(errp, "Unsupported algorithm: %u", opts->alg);
return -1;
}
}

View File

@ -22,20 +22,93 @@
#include "qemu/osdep.h"
#include "crypto/der.h"
typedef struct QCryptoDerEncodeNode {
uint8_t tag;
struct QCryptoDerEncodeNode *parent;
struct QCryptoDerEncodeNode *next;
/* for constructed type, data is null */
const uint8_t *data;
size_t dlen;
} QCryptoDerEncodeNode;
typedef struct QCryptoEncodeContext {
QCryptoDerEncodeNode root;
QCryptoDerEncodeNode *current_parent;
QCryptoDerEncodeNode *tail;
} QCryptoEncodeContext;
enum QCryptoDERTypeTag {
QCRYPTO_DER_TYPE_TAG_BOOL = 0x1,
QCRYPTO_DER_TYPE_TAG_INT = 0x2,
QCRYPTO_DER_TYPE_TAG_BIT_STR = 0x3,
QCRYPTO_DER_TYPE_TAG_OCT_STR = 0x4,
QCRYPTO_DER_TYPE_TAG_OCT_NULL = 0x5,
QCRYPTO_DER_TYPE_TAG_OCT_OID = 0x6,
QCRYPTO_DER_TYPE_TAG_NULL = 0x5,
QCRYPTO_DER_TYPE_TAG_OID = 0x6,
QCRYPTO_DER_TYPE_TAG_SEQ = 0x10,
QCRYPTO_DER_TYPE_TAG_SET = 0x11,
};
#define QCRYPTO_DER_CONSTRUCTED_MASK 0x20
enum QCryptoDERTagClass {
QCRYPTO_DER_TAG_CLASS_UNIV = 0x0,
QCRYPTO_DER_TAG_CLASS_APPL = 0x1,
QCRYPTO_DER_TAG_CLASS_CONT = 0x2,
QCRYPTO_DER_TAG_CLASS_PRIV = 0x3,
};
enum QCryptoDERTagEnc {
QCRYPTO_DER_TAG_ENC_PRIM = 0x0,
QCRYPTO_DER_TAG_ENC_CONS = 0x1,
};
#define QCRYPTO_DER_TAG_ENC_MASK 0x20
#define QCRYPTO_DER_TAG_ENC_SHIFT 5
#define QCRYPTO_DER_TAG_CLASS_MASK 0xc0
#define QCRYPTO_DER_TAG_CLASS_SHIFT 6
#define QCRYPTO_DER_TAG_VAL_MASK 0x1f
#define QCRYPTO_DER_SHORT_LEN_MASK 0x80
#define QCRYPTO_DER_TAG(class, enc, val) \
(((class) << QCRYPTO_DER_TAG_CLASS_SHIFT) | \
((enc) << QCRYPTO_DER_TAG_ENC_SHIFT) | (val))
/**
* qcrypto_der_encode_length:
* @src_len: the length of source data
* @dst: distination to save the encoded 'length', if dst is NULL, only compute
* the expected buffer size in bytes.
* @dst_len: output parameter, indicates how many bytes wrote.
*
* Encode the 'length' part of TLV tuple.
*/
static void qcrypto_der_encode_length(size_t src_len,
uint8_t *dst, size_t *dst_len)
{
size_t max_length = 0xFF;
uint8_t length_bytes = 0, header_byte;
if (src_len < QCRYPTO_DER_SHORT_LEN_MASK) {
header_byte = src_len;
*dst_len = 1;
} else {
for (length_bytes = 1; max_length < src_len; length_bytes++) {
max_length = (max_length << 8) + max_length;
}
header_byte = length_bytes;
header_byte |= QCRYPTO_DER_SHORT_LEN_MASK;
*dst_len = length_bytes + 1;
}
if (!dst) {
return;
}
*dst++ = header_byte;
/* Bigendian length bytes */
for (; length_bytes > 0; length_bytes--) {
*dst++ = ((src_len >> (length_bytes - 1) * 8) & 0xFF);
}
}
static uint8_t qcrypto_der_peek_byte(const uint8_t **data, size_t *dlen)
{
return **data;
@ -150,40 +223,230 @@ static int qcrypto_der_extract_data(const uint8_t **data, size_t *dlen,
return qcrypto_der_extract_definite_data(data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_int(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
static int qcrypto_der_decode_tlv(const uint8_t expected_tag,
const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb,
void *ctx, Error **errp)
{
const uint8_t *saved_data = *data;
size_t saved_dlen = *dlen;
uint8_t tag;
int data_length;
if (*dlen < 1) {
error_setg(errp, "Need more data");
return -1;
}
tag = qcrypto_der_cut_byte(data, dlen);
/* INTEGER must encoded in primitive-form */
if (tag != QCRYPTO_DER_TYPE_TAG_INT) {
error_setg(errp, "Invalid integer type tag: %u", tag);
return -1;
if (tag != expected_tag) {
error_setg(errp, "Unexpected tag: expected: %u, actual: %u",
expected_tag, tag);
goto error;
}
return qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
data_length = qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
if (data_length < 0) {
goto error;
}
return data_length;
error:
*data = saved_data;
*dlen = saved_dlen;
return -1;
}
int qcrypto_der_decode_int(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
const uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_INT);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_seq(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
uint8_t tag;
if (*dlen < 1) {
error_setg(errp, "Need more data");
return -1;
}
tag = qcrypto_der_cut_byte(data, dlen);
/* SEQUENCE must use constructed form */
if (tag != (QCRYPTO_DER_TYPE_TAG_SEQ | QCRYPTO_DER_CONSTRUCTED_MASK)) {
error_setg(errp, "Invalid type sequence tag: %u", tag);
return -1;
}
return qcrypto_der_extract_data(data, dlen, cb, ctx, errp);
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_CONS,
QCRYPTO_DER_TYPE_TAG_SEQ);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_octet_str(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_OCT_STR);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_bit_str(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_BIT_STR);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_oid(const uint8_t **data, size_t *dlen,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_OID);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
int qcrypto_der_decode_ctx_tag(const uint8_t **data, size_t *dlen, int tag_id,
QCryptoDERDecodeCb cb, void *ctx, Error **errp)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_CONT,
QCRYPTO_DER_TAG_ENC_CONS,
tag_id);
return qcrypto_der_decode_tlv(tag, data, dlen, cb, ctx, errp);
}
static void qcrypto_der_encode_prim(QCryptoEncodeContext *ctx, uint8_t tag,
const uint8_t *data, size_t dlen)
{
QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1);
size_t nbytes_len;
node->tag = tag;
node->data = data;
node->dlen = dlen;
node->parent = ctx->current_parent;
qcrypto_der_encode_length(dlen, NULL, &nbytes_len);
/* 1 byte for Tag, nbyte_len for Length, and dlen for Value */
node->parent->dlen += 1 + nbytes_len + dlen;
ctx->tail->next = node;
ctx->tail = node;
}
QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void)
{
QCryptoEncodeContext *ctx = g_new0(QCryptoEncodeContext, 1);
ctx->current_parent = &ctx->root;
ctx->tail = &ctx->root;
return ctx;
}
static void qcrypto_der_encode_cons_begin(QCryptoEncodeContext *ctx,
uint8_t tag)
{
QCryptoDerEncodeNode *node = g_new0(QCryptoDerEncodeNode, 1);
node->tag = tag;
node->parent = ctx->current_parent;
ctx->current_parent = node;
ctx->tail->next = node;
ctx->tail = node;
}
static void qcrypto_der_encode_cons_end(QCryptoEncodeContext *ctx)
{
QCryptoDerEncodeNode *cons_node = ctx->current_parent;
size_t nbytes_len;
qcrypto_der_encode_length(cons_node->dlen, NULL, &nbytes_len);
/* 1 byte for Tag, nbyte_len for Length, and dlen for Value */
cons_node->parent->dlen += 1 + nbytes_len + cons_node->dlen;
ctx->current_parent = cons_node->parent;
}
void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_CONS,
QCRYPTO_DER_TYPE_TAG_SEQ);
qcrypto_der_encode_cons_begin(ctx, tag);
}
void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx)
{
qcrypto_der_encode_cons_end(ctx);
}
void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_OID);
qcrypto_der_encode_prim(ctx, tag, src, src_len);
}
void qcrypto_der_encode_int(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_INT);
qcrypto_der_encode_prim(ctx, tag, src, src_len);
}
void qcrypto_der_encode_null(QCryptoEncodeContext *ctx)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_NULL);
qcrypto_der_encode_prim(ctx, tag, NULL, 0);
}
void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_OCT_STR);
qcrypto_der_encode_prim(ctx, tag, src, src_len);
}
void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx)
{
uint8_t tag = QCRYPTO_DER_TAG(QCRYPTO_DER_TAG_CLASS_UNIV,
QCRYPTO_DER_TAG_ENC_PRIM,
QCRYPTO_DER_TYPE_TAG_OCT_STR);
qcrypto_der_encode_cons_begin(ctx, tag);
}
void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx)
{
qcrypto_der_encode_cons_end(ctx);
}
size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx)
{
return ctx->root.dlen;
}
void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx,
uint8_t *dst)
{
QCryptoDerEncodeNode *node, *prev;
size_t len;
for (prev = &ctx->root;
(node = prev->next) && (prev->next = node->next, 1);) {
/* Tag */
*dst++ = node->tag;
/* Length */
qcrypto_der_encode_length(node->dlen, dst, &len);
dst += len;
/* Value */
if (node->data) {
memcpy(dst, node->data, node->dlen);
dst += node->dlen;
}
g_free(node);
}
g_free(ctx);
}

View File

@ -22,6 +22,11 @@
#include "qapi/error.h"
typedef struct QCryptoEncodeContext QCryptoEncodeContext;
/* rsaEncryption: 1.2.840.113549.1.1.1 */
#define QCRYPTO_OID_rsaEncryption "\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01"
/* Simple decoder used to parse DER encoded rsa keys. */
/**
@ -47,14 +52,13 @@ typedef int (*QCryptoDERDecodeCb) (void *opaque, const uint8_t *value,
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded INTEGER will be returned. Otherwise, -1 is
* returned.
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_int(const uint8_t **data,
size_t *dlen,
QCryptoDERDecodeCb cb,
void *opaque,
Error **errp);
/**
* qcrypto_der_decode_seq:
*
@ -70,7 +74,7 @@ int qcrypto_der_decode_int(const uint8_t **data,
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded SEQUENCE will be returned. Otherwise, -1 is
* returned.
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_seq(const uint8_t **data,
size_t *dlen,
@ -78,4 +82,205 @@ int qcrypto_der_decode_seq(const uint8_t **data,
void *opaque,
Error **errp);
/**
* qcrypto_der_decode_oid:
*
* Decode OID from DER-encoded data, similar with der_decode_int.
*
* @data: pointer to address of input data
* @dlen: pointer to length of input data
* @cb: callback invoked when decode succeed, if cb equals NULL, no
* callback will be invoked
* @opaque: parameter passed to cb
*
* Returns: On success, *data points to rest data, and *dlen
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded OID will be returned. Otherwise, -1 is
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_oid(const uint8_t **data,
size_t *dlen,
QCryptoDERDecodeCb cb,
void *opaque,
Error **errp);
/**
* qcrypto_der_decode_octet_str:
*
* Decode OCTET STRING from DER-encoded data, similar with der_decode_int.
*
* @data: pointer to address of input data
* @dlen: pointer to length of input data
* @cb: callback invoked when decode succeed, if cb equals NULL, no
* callback will be invoked
* @opaque: parameter passed to cb
*
* Returns: On success, *data points to rest data, and *dlen
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded OCTET STRING will be returned. Otherwise, -1 is
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_octet_str(const uint8_t **data,
size_t *dlen,
QCryptoDERDecodeCb cb,
void *opaque,
Error **errp);
/**
* qcrypto_der_decode_bit_str:
*
* Decode BIT STRING from DER-encoded data, similar with der_decode_int.
*
* @data: pointer to address of input data
* @dlen: pointer to length of input data
* @cb: callback invoked when decode succeed, if cb equals NULL, no
* callback will be invoked
* @opaque: parameter passed to cb
*
* Returns: On success, *data points to rest data, and *dlen
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded BIT STRING will be returned. Otherwise, -1 is
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_bit_str(const uint8_t **data,
size_t *dlen,
QCryptoDERDecodeCb cb,
void *opaque,
Error **errp);
/**
* qcrypto_der_decode_ctx_tag:
*
* Decode context specific tag
*
* @data: pointer to address of input data
* @dlen: pointer to length of input data
* @tag: expected value of context specific tag
* @cb: callback invoked when decode succeed, if cb equals NULL, no
* callback will be invoked
* @opaque: parameter passed to cb
*
* Returns: On success, *data points to rest data, and *dlen
* will be set to the rest length of data, if cb is not NULL, must
* return 0 to make decode success, at last, the length of the data
* part of the decoded BIT STRING will be returned. Otherwise, -1 is
* returned and the valued of *data and *dlen keep unchanged.
*/
int qcrypto_der_decode_ctx_tag(const uint8_t **data,
size_t *dlen, int tag_id,
QCryptoDERDecodeCb cb,
void *opaque,
Error **errp);
/**
* qcrypto_der_encode_ctx_new:
*
* Allocate a context used for der encoding.
*/
QCryptoEncodeContext *qcrypto_der_encode_ctx_new(void);
/**
* qcrypto_der_encode_seq_begin:
* @ctx: the encode context.
*
* Start encoding a SEQUENCE for ctx.
*
*/
void qcrypto_der_encode_seq_begin(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_seq_begin:
* @ctx: the encode context.
*
* Finish uencoding a SEQUENCE for ctx.
*
*/
void qcrypto_der_encode_seq_end(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_oid:
* @ctx: the encode context.
* @src: the source data of oid, note it should be already encoded, this
* function only add tag and length part for it.
*
* Encode an oid into ctx.
*/
void qcrypto_der_encode_oid(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len);
/**
* qcrypto_der_encode_int:
* @ctx: the encode context.
* @src: the source data of integer, note it should be already encoded, this
* function only add tag and length part for it.
*
* Encode an integer into ctx.
*/
void qcrypto_der_encode_int(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len);
/**
* qcrypto_der_encode_null:
* @ctx: the encode context.
*
* Encode a null into ctx.
*/
void qcrypto_der_encode_null(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_octet_str:
* @ctx: the encode context.
* @src: the source data of the octet string.
*
* Encode a octet string into ctx.
*/
void qcrypto_der_encode_octet_str(QCryptoEncodeContext *ctx,
const uint8_t *src, size_t src_len);
/**
* qcrypto_der_encode_octet_str_begin:
* @ctx: the encode context.
*
* Start encoding a octet string, All fields between
* qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
* are encoded as an octet string. This is useful when we need to encode a
* encoded SEQUNCE as OCTET STRING.
*/
void qcrypto_der_encode_octet_str_begin(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_octet_str_end:
* @ctx: the encode context.
*
* Finish encoding a octet string, All fields between
* qcrypto_der_encode_octet_str_begin and qcrypto_der_encode_octet_str_end
* are encoded as an octet string. This is useful when we need to encode a
* encoded SEQUNCE as OCTET STRING.
*/
void qcrypto_der_encode_octet_str_end(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_ctx_buffer_len:
* @ctx: the encode context.
*
* Compute the expected buffer size to save all encoded things.
*/
size_t qcrypto_der_encode_ctx_buffer_len(QCryptoEncodeContext *ctx);
/**
* qcrypto_der_encode_ctx_flush_and_free:
* @ctx: the encode context.
* @dst: the distination to save the encoded data, the length of dst should
* not less than qcrypto_der_encode_cxt_buffer_len
*
* Flush all encoded data into dst, then free ctx.
*/
void qcrypto_der_encode_ctx_flush_and_free(QCryptoEncodeContext *ctx,
uint8_t *dst);
#endif /* QCRYPTO_ASN1_DECODER_H */

View File

@ -19,6 +19,8 @@
*
*/
#include "qemu/osdep.h"
#include "der.h"
#include "rsakey.h"
void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *rsa_key)
@ -37,6 +39,46 @@ void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *rsa_key)
g_free(rsa_key);
}
/**
* PKCS#8 private key info for RSA
*
* PrivateKeyInfo ::= SEQUENCE {
* version INTEGER,
* privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
* privateKey OCTET STRING,
* attributes [0] IMPLICIT Attributes OPTIONAL
* }
*/
void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key,
size_t keylen,
uint8_t **dst,
size_t *dlen)
{
QCryptoEncodeContext *ctx = qcrypto_der_encode_ctx_new();
uint8_t version = 0;
qcrypto_der_encode_seq_begin(ctx);
/* version */
qcrypto_der_encode_int(ctx, &version, sizeof(version));
/* algorithm identifier */
qcrypto_der_encode_seq_begin(ctx);
qcrypto_der_encode_oid(ctx, (uint8_t *)QCRYPTO_OID_rsaEncryption,
sizeof(QCRYPTO_OID_rsaEncryption) - 1);
qcrypto_der_encode_null(ctx);
qcrypto_der_encode_seq_end(ctx);
/* RSA private key */
qcrypto_der_encode_octet_str(ctx, key, keylen);
qcrypto_der_encode_seq_end(ctx);
*dlen = qcrypto_der_encode_ctx_buffer_len(ctx);
*dst = g_malloc(*dlen);
qcrypto_der_encode_ctx_flush_and_free(ctx, *dst);
}
#if defined(CONFIG_NETTLE) && defined(CONFIG_HOGWEED)
#include "rsakey-nettle.c.inc"
#else

View File

@ -22,7 +22,6 @@
#ifndef QCRYPTO_RSAKEY_H
#define QCRYPTO_RSAKEY_H
#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "crypto/akcipher.h"
@ -84,6 +83,16 @@ QCryptoAkCipherRSAKey *qcrypto_akcipher_rsakey_parse(
QCryptoAkCipherKeyType type,
const uint8_t *key, size_t keylen, Error **errp);
/**
* qcrypto_akcipher_rsakey_export_as_p8info:
*
* Export RSA private key to PKCS#8 private key info.
*/
void qcrypto_akcipher_rsakey_export_p8info(const uint8_t *key,
size_t keylen,
uint8_t **dst,
size_t *dlen);
void qcrypto_akcipher_rsakey_free(QCryptoAkCipherRSAKey *key);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipherRSAKey,

145
docs/devel/acpi-bits.rst Normal file
View File

@ -0,0 +1,145 @@
=============================================================================
ACPI/SMBIOS avocado tests using biosbits
=============================================================================
Biosbits is a software written by Josh Triplett that can be downloaded
from https://biosbits.org/. The github codebase can be found
`here <https://github.com/biosbits/bits/tree/master>`__. It is a software that executes
the bios components such as acpi and smbios tables directly through acpica
bios interpreter (a freely available C based library written by Intel,
downloadable from https://acpica.org/ and is included with biosbits) without an
operating system getting involved in between.
There are several advantages to directly testing the bios in a real physical
machine or VM as opposed to indirectly discovering bios issues through the
operating system. For one thing, the OSes tend to hide bios problems from the
end user. The other is that we have more control of what we wanted to test
and how by directly using acpica interpreter on top of the bios on a running
system. More details on the inspiration for developing biosbits and its real
life uses can be found in [#a]_ and [#b]_.
This directory contains tests written in python using avocado framework that
exercises the QEMU bios components using biosbits and reports test failures.
For QEMU, we maintain a fork of bios bits in gitlab along with all the
dependent submodules:
https://gitlab.com/qemu-project/biosbits-bits
This fork contains numerous fixes, a newer acpica and changes specific to
running this avocado QEMU tests using bits. The author of this document
is the sole maintainer of the QEMU fork of bios bits repo.
Under the directory ``tests/avocado/``, ``acpi-bits.py`` is a QEMU avocado
test that drives all this.
A brief description of the various test files follows.
Under ``tests/avocado/`` as the root we have:
::
├── acpi-bits
│ ├── bits-config
│ │ └── bits-cfg.txt
│ ├── bits-tests
│ │ ├── smbios.py2
│ │ ├── testacpi.py2
│ │ └── testcpuid.py2
│ └── README
├── acpi-bits.py
* ``tests/avocado``:
``acpi-bits.py``:
This is the main python avocado test script that generates a
biosbits iso. It then spawns a QEMU VM with it, collects the log and reports
test failures. This is the script one would be interested in if they wanted
to add or change some component of the log parsing, add a new command line
to alter how QEMU is spawned etc. Test writers typically would not need to
modify this script unless they wanted to enhance or change the log parsing
for their tests. In order to enable debugging, you can set **V=1**
environment variable. This enables verbose mode for the test and also dumps
the entire log from bios bits and more information in case failure happens.
In order to run this test, please perform the following steps from the QEMU
build directory:
::
$ make check-venv (needed only the first time to create the venv)
$ ./tests/venv/bin/avocado run -t acpi tests/avocado
The above will run all acpi avocado tests including this one.
In order to run the individual tests, perform the following:
::
$ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py --tap -
The above will produce output in tap format. You can omit "--tap -" in the
end and it will produce output like the following:
::
$ ./tests/venv/bin/avocado run tests/avocado/acpi-bits.py
Fetching asset from tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits
JOB ID : eab225724da7b64c012c65705dc2fa14ab1defef
JOB LOG : /home/anisinha/avocado/job-results/job-2022-10-10T17.58-eab2257/job.log
(1/1) tests/avocado/acpi-bits.py:AcpiBitsTest.test_acpi_smbios_bits: PASS (33.09 s)
RESULTS : PASS 1 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
JOB TIME : 39.22 s
You can inspect the log file for more information about the run or in order
to diagnoze issues. If you pass V=1 in the environment, more diagnostic logs
would be found in the test log.
* ``tests/avocado/acpi-bits/bits-config``:
This location contains biosbits configuration files that determine how the
software runs the tests.
``bits-config.txt``:
This is the biosbits config file that determines what tests
or actions are performed by bits. The description of the config options are
provided in the file itself.
* ``tests/avocado/acpi-bits/bits-tests``:
This directory contains biosbits python based tests that are run from within
the biosbits environment in the spawned VM. New additions of test cases can
be made in the appropriate test file. For example, new acpi tests can go
into testacpi.py2 and one would call testsuite.add_test() to register the new
test so that it gets executed as a part of the ACPI tests.
It might be occasionally necessary to disable some subtests or add a new
test that belongs to a test suite not already present in this directory. To
do this, please clone the bits source from
https://gitlab.com/qemu-project/biosbits-bits/-/tree/qemu-bits.
Note that this is the "qemu-bits" branch and not the "bits" branch of the
repository. "qemu-bits" is the branch where we have made all the QEMU
specific enhancements and we must use the source from this branch only.
Copy the test suite/script that needs modification (addition of new tests
or disabling them) from python directory into this directory. For
example, in order to change cpuid related tests, copy the following
file into this directory and rename it with .py2 extension:
https://gitlab.com/qemu-project/biosbits-bits/-/blob/qemu-bits/python/testcpuid.py
Then make your additions and changes here. Therefore, the steps are:
(a) Copy unmodified test script to this directory from bits source.
(b) Add a SPDX license header.
(c) Perform modifications to the test.
Commits (a), (b) and (c) should go under separate commits so that the original
test script and the changes we have made are separated and clear.
The test framework will then use your modified test script to run the test.
No further changes would be needed. Please check the logs to make sure that
appropriate changes have taken effect.
The tests have an extension .py2 in order to indicate that:
(a) They are python2.7 based scripts and not python 3 scripts.
(b) They are run from within the bios bits VM and is not subjected to QEMU
build/test python script maintainance and dependency resolutions.
(c) They need not be loaded by avocado framework when running tests.
Author: Ani Sinha <ani@anisinha.ca>
References:
-----------
.. [#a] https://blog.linuxplumbersconf.org/2011/ocw/system/presentations/867/original/bits.pdf
.. [#b] https://www.youtube.com/watch?v=36QIepyUuhg

View File

@ -11,6 +11,7 @@ the basics if you are adding new files and targets to the build.
build-system
kconfig
testing
acpi-bits
qtest
ci
qapi-code-gen

View File

@ -26,6 +26,16 @@ void aml_append(Aml *parent_ctx, Aml *child)
{
}
Aml *aml_return(Aml *val)
{
return NULL;
}
Aml *aml_method(const char *name, int arg_count, AmlSerializeFlag sflag)
{
return NULL;
}
Aml *aml_resource_template(void)
{
return NULL;

View File

@ -2070,7 +2070,7 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
acpi_table_end(linker, &table);
}
/* build rev1/rev3/rev5.1 FADT */
/* build rev1/rev3/rev5.1/rev6.0 FADT */
void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
const char *oem_id, const char *oem_table_id)
{
@ -2193,8 +2193,15 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
/* SLEEP_STATUS_REG */
build_append_gas_from_struct(tbl, &f->sleep_sts);
/* TODO: extra fields need to be added to support revisions above rev5 */
assert(f->rev == 5);
if (f->rev == 5) {
goto done;
}
/* Hypervisor Vendor Identity */
build_append_padded_str(tbl, "QEMU", 8, '\0');
/* TODO: extra fields need to be added to support revisions above rev6 */
assert(f->rev == 6);
done:
acpi_table_end(linker, &table);

View File

@ -635,7 +635,7 @@ static unsigned read_erst_record(ERSTDeviceState *s)
if (record_length < UEFI_CPER_RECORD_MIN_SIZE) {
rc = STATUS_FAILED;
}
if ((s->record_offset + record_length) > exchange_length) {
if (record_length > exchange_length - s->record_offset) {
rc = STATUS_FAILED;
}
/* If all is ok, copy the record to the exchange buffer */
@ -684,7 +684,7 @@ static unsigned write_erst_record(ERSTDeviceState *s)
if (record_length < UEFI_CPER_RECORD_MIN_SIZE) {
return STATUS_FAILED;
}
if ((s->record_offset + record_length) > exchange_length) {
if (record_length > exchange_length - s->record_offset) {
return STATUS_FAILED;
}
@ -716,7 +716,7 @@ static unsigned write_erst_record(ERSTDeviceState *s)
if (nvram) {
/* Write the record into the slot */
memcpy(nvram, exchange, record_length);
memset(nvram + record_length, exchange_length - record_length, 0xFF);
memset(nvram + record_length, 0xFF, exchange_length - record_length);
/* If a new record, increment the record_count */
if (!record_found) {
uint32_t record_count;

View File

@ -922,6 +922,7 @@ void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
#define NVDIMM_DSM_RFIT_STATUS "RSTA"
#define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
#define NVDIMM_DEVICE_DSM_UUID "4309AC30-0D11-11E4-9191-0800200C9A66"
static void nvdimm_build_common_dsm(Aml *dev,
NVDIMMState *nvdimm_state)
@ -1029,15 +1030,14 @@ static void nvdimm_build_common_dsm(Aml *dev,
/* UUID for QEMU internal use */), expected_uuid));
aml_append(elsectx, ifctx);
elsectx2 = aml_else();
aml_append(elsectx2, aml_store(
aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
aml_append(elsectx2, aml_store(aml_touuid(NVDIMM_DEVICE_DSM_UUID)
/* UUID for NVDIMM Devices */, expected_uuid));
aml_append(elsectx, elsectx2);
aml_append(method, elsectx);
uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
unsupport = aml_if(aml_lor(unpatched, uuid_invalid));
/*
* function 0 is called to inquire what functions are supported by
@ -1069,10 +1069,9 @@ static void nvdimm_build_common_dsm(Aml *dev,
* in the DSM Spec.
*/
pckg = aml_arg(3);
ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
ifctx = aml_if(aml_land(aml_equal(aml_object_type(pckg),
aml_int(4 /* Package */)) /* It is a Package? */,
aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
NULL));
aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */));
pckg_index = aml_local(2);
pckg_buf = aml_local(3);
@ -1244,6 +1243,7 @@ static void nvdimm_build_fit(Aml *dev)
static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
{
uint32_t slot;
Aml *method, *pkg, *field, *com_call;
for (slot = 0; slot < ram_slots; slot++) {
uint32_t handle = nvdimm_slot_to_handle(slot);
@ -1261,6 +1261,100 @@ static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
*/
aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
/*
* ACPI v6.4: Section 6.5.10 NVDIMM Label Methods
*/
/* _LSI */
method = aml_method("_LSI", 0, AML_SERIALIZED);
com_call = aml_call5(NVDIMM_COMMON_DSM,
aml_touuid(NVDIMM_DEVICE_DSM_UUID),
aml_int(1), aml_int(4), aml_int(0),
aml_int(handle));
aml_append(method, aml_store(com_call, aml_local(0)));
aml_append(method, aml_create_dword_field(aml_local(0),
aml_int(0), "STTS"));
aml_append(method, aml_create_dword_field(aml_local(0), aml_int(4),
"SLSA"));
aml_append(method, aml_create_dword_field(aml_local(0), aml_int(8),
"MAXT"));
pkg = aml_package(3);
aml_append(pkg, aml_name("STTS"));
aml_append(pkg, aml_name("SLSA"));
aml_append(pkg, aml_name("MAXT"));
aml_append(method, aml_store(pkg, aml_local(1)));
aml_append(method, aml_return(aml_local(1)));
aml_append(nvdimm_dev, method);
/* _LSR */
method = aml_method("_LSR", 2, AML_SERIALIZED);
aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL)));
aml_append(method, aml_create_dword_field(aml_name("INPT"),
aml_int(0), "OFST"));
aml_append(method, aml_create_dword_field(aml_name("INPT"),
aml_int(4), "LEN"));
aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
aml_append(method, aml_store(aml_arg(1), aml_name("LEN")));
pkg = aml_package(1);
aml_append(pkg, aml_name("INPT"));
aml_append(method, aml_store(pkg, aml_local(0)));
com_call = aml_call5(NVDIMM_COMMON_DSM,
aml_touuid(NVDIMM_DEVICE_DSM_UUID),
aml_int(1), aml_int(5), aml_local(0),
aml_int(handle));
aml_append(method, aml_store(com_call, aml_local(3)));
field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS");
aml_append(method, field);
field = aml_create_field(aml_local(3), aml_int(32),
aml_shiftleft(aml_name("LEN"), aml_int(3)),
"LDAT");
aml_append(method, field);
aml_append(method, aml_name_decl("LSA", aml_buffer(0, NULL)));
aml_append(method, aml_to_buffer(aml_name("LDAT"), aml_name("LSA")));
pkg = aml_package(2);
aml_append(pkg, aml_name("STTS"));
aml_append(pkg, aml_name("LSA"));
aml_append(method, aml_store(pkg, aml_local(1)));
aml_append(method, aml_return(aml_local(1)));
aml_append(nvdimm_dev, method);
/* _LSW */
method = aml_method("_LSW", 3, AML_SERIALIZED);
aml_append(method, aml_store(aml_arg(2), aml_local(2)));
aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL)));
field = aml_create_dword_field(aml_name("INPT"),
aml_int(0), "OFST");
aml_append(method, field);
field = aml_create_dword_field(aml_name("INPT"),
aml_int(4), "TLEN");
aml_append(method, field);
aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
aml_append(method, aml_store(aml_arg(1), aml_name("TLEN")));
aml_append(method, aml_concatenate(aml_name("INPT"), aml_local(2),
aml_name("INPT")));
pkg = aml_package(1);
aml_append(pkg, aml_name("INPT"));
aml_append(method, aml_store(pkg, aml_local(0)));
com_call = aml_call5(NVDIMM_COMMON_DSM,
aml_touuid(NVDIMM_DEVICE_DSM_UUID),
aml_int(1), aml_int(6), aml_local(0),
aml_int(handle));
aml_append(method, aml_store(com_call, aml_local(3)));
field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS");
aml_append(method, field);
aml_append(method, aml_return(aml_name("STTS")));
aml_append(nvdimm_dev, method);
nvdimm_build_device_dsm(nvdimm_dev, handle);
aml_append(root_dev, nvdimm_dev);
}

View File

@ -30,6 +30,7 @@ config ARM_VIRT
select ACPI_VIOT
select VIRTIO_MEM_SUPPORTED
select ACPI_CXL
select ACPI_HMAT
config CHEETAH
bool

View File

@ -42,6 +42,7 @@
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/generic_event_device.h"
#include "hw/acpi/tpm.h"
#include "hw/acpi/hmat.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
@ -685,7 +686,7 @@ build_dbg2(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
};
/*
* ACPI spec, Revision 5.1 Errata A
* ACPI spec, Revision 6.0 Errata A
* 5.2.12 Multiple APIC Description Table (MADT)
*/
static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size)
@ -704,7 +705,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
int i;
VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
const MemMapEntry *memmap = vms->memmap;
AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = vms->oem_id,
AcpiTable table = { .sig = "APIC", .rev = 4, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
acpi_table_begin(&table, table_data);
@ -739,7 +740,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
/* 5.2.12.14 GIC Structure */
build_append_int_noprefix(table_data, 0xB, 1); /* Type */
build_append_int_noprefix(table_data, 76, 1); /* Length */
build_append_int_noprefix(table_data, 80, 1); /* Length */
build_append_int_noprefix(table_data, 0, 2); /* Reserved */
build_append_int_noprefix(table_data, i, 4); /* GIC ID */
build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */
@ -759,6 +760,10 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/
/* MPIDR */
build_append_int_noprefix(table_data, armcpu->mp_affinity, 8);
/* Processor Power Efficiency Class */
build_append_int_noprefix(table_data, 0, 1);
/* Reserved */
build_append_int_noprefix(table_data, 0, 3);
}
if (vms->gic_version != VIRT_GIC_VERSION_2) {
@ -770,12 +775,6 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
if (its_class_name() && !vmc->no_its) {
/*
* FIXME: Structure is from Revision 6.0 where 'GIC Structure'
* has additional fields on top of implemented 5.1 Errata A,
* to make it consistent with v6.0 we need to bump everything
* to v6.0
*/
/*
* ACPI spec, Revision 6.0 Errata A
* (original 6.0 definition has invalid Length)
@ -808,13 +807,13 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
/* FADT */
static void build_fadt_rev5(GArray *table_data, BIOSLinker *linker,
static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker,
VirtMachineState *vms, unsigned dsdt_tbl_offset)
{
/* ACPI v5.1 */
/* ACPI v6.0 */
AcpiFadtData fadt = {
.rev = 5,
.minor_ver = 1,
.rev = 6,
.minor_ver = 0,
.flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
.xdsdt_tbl_offset = &dsdt_tbl_offset,
};
@ -944,7 +943,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
/* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */
acpi_add_table(table_offsets, tables_blob);
build_fadt_rev5(tables_blob, tables->linker, vms, dsdt);
build_fadt_rev6(tables_blob, tables->linker, vms, dsdt);
acpi_add_table(table_offsets, tables_blob);
build_madt(tables_blob, tables->linker, vms);
@ -989,6 +988,12 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
build_slit(tables_blob, tables->linker, ms, vms->oem_id,
vms->oem_table_id);
}
if (ms->numa_state->hmat_enabled) {
acpi_add_table(table_offsets, tables_blob);
build_hmat(tables_blob, tables->linker, ms->numa_state,
vms->oem_id, vms->oem_table_id);
}
}
if (ms->nvdimms_state->is_enabled) {

View File

@ -168,13 +168,6 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
goto err_guest_notifiers;
}
ret = vhost_dev_start(&s->dev, vdev);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers;
}
s->started_vu = true;
/* guest_notifier_mask/pending not used yet, so just unmask
* everything here. virtio-pci will do the right thing by
* enabling/disabling irqfd.
@ -183,9 +176,20 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
vhost_virtqueue_mask(&s->dev, vdev, i, false);
}
s->dev.vq_index_end = s->dev.nvqs;
ret = vhost_dev_start(&s->dev, vdev);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers;
}
s->started_vu = true;
return ret;
err_guest_notifiers:
for (i = 0; i < s->dev.nvqs; i++) {
vhost_virtqueue_mask(&s->dev, vdev, i, true);
}
k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&s->dev, vdev);
@ -222,14 +226,10 @@ static void vhost_user_blk_stop(VirtIODevice *vdev)
static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserBlk *s = VHOST_USER_BLK(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
Error *local_err = NULL;
int ret;
if (!vdev->vm_running) {
should_start = false;
}
if (!s->connected) {
return;
}

View File

@ -40,7 +40,9 @@
#include "hw/virtio/virtio-pci.h"
#include "qom/object_interfaces.h"
GlobalProperty hw_compat_7_1[] = {};
GlobalProperty hw_compat_7_1[] = {
{ "virtio-device", "queue_reset", "false" },
};
const size_t hw_compat_7_1_len = G_N_ELEMENTS(hw_compat_7_1);
GlobalProperty hw_compat_7_0[] = {
@ -1176,9 +1178,7 @@ static void numa_validate_initiator(NumaState *numa_state)
for (i = 0; i < numa_state->num_nodes; i++) {
if (numa_info[i].initiator == MAX_NODES) {
error_report("The initiator of NUMA node %d is missing, use "
"'-numa node,initiator' option to declare it", i);
exit(1);
continue;
}
if (!numa_info[numa_info[i].initiator].present) {

224
hw/cxl/cxl-cdat.c Normal file
View File

@ -0,0 +1,224 @@
/*
* CXL CDAT Structure
*
* Copyright (C) 2021 Avery Design Systems, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "hw/cxl/cxl.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
static void cdat_len_check(CDATSubHeader *hdr, Error **errp)
{
assert(hdr->length);
assert(hdr->reserved == 0);
switch (hdr->type) {
case CDAT_TYPE_DSMAS:
assert(hdr->length == sizeof(CDATDsmas));
break;
case CDAT_TYPE_DSLBIS:
assert(hdr->length == sizeof(CDATDslbis));
break;
case CDAT_TYPE_DSMSCIS:
assert(hdr->length == sizeof(CDATDsmscis));
break;
case CDAT_TYPE_DSIS:
assert(hdr->length == sizeof(CDATDsis));
break;
case CDAT_TYPE_DSEMTS:
assert(hdr->length == sizeof(CDATDsemts));
break;
case CDAT_TYPE_SSLBIS:
assert(hdr->length >= sizeof(CDATSslbisHeader));
assert((hdr->length - sizeof(CDATSslbisHeader)) %
sizeof(CDATSslbe) == 0);
break;
default:
error_setg(errp, "Type %d is reserved", hdr->type);
}
}
static void ct3_build_cdat(CDATObject *cdat, Error **errp)
{
g_autofree CDATTableHeader *cdat_header = NULL;
g_autofree CDATEntry *cdat_st = NULL;
uint8_t sum = 0;
int ent, i;
/* Use default table if fopen == NULL */
assert(cdat->build_cdat_table);
cdat_header = g_malloc0(sizeof(*cdat_header));
if (!cdat_header) {
error_setg(errp, "Failed to allocate CDAT header");
return;
}
cdat->built_buf_len = cdat->build_cdat_table(&cdat->built_buf, cdat->private);
if (!cdat->built_buf_len) {
/* Build later as not all data available yet */
cdat->to_update = true;
return;
}
cdat->to_update = false;
cdat_st = g_malloc0(sizeof(*cdat_st) * (cdat->built_buf_len + 1));
if (!cdat_st) {
error_setg(errp, "Failed to allocate CDAT entry array");
return;
}
/* Entry 0 for CDAT header, starts with Entry 1 */
for (ent = 1; ent < cdat->built_buf_len + 1; ent++) {
CDATSubHeader *hdr = cdat->built_buf[ent - 1];
uint8_t *buf = (uint8_t *)cdat->built_buf[ent - 1];
cdat_st[ent].base = hdr;
cdat_st[ent].length = hdr->length;
cdat_header->length += hdr->length;
for (i = 0; i < hdr->length; i++) {
sum += buf[i];
}
}
/* CDAT header */
cdat_header->revision = CXL_CDAT_REV;
/* For now, no runtime updates */
cdat_header->sequence = 0;
cdat_header->length += sizeof(CDATTableHeader);
sum += cdat_header->revision + cdat_header->sequence +
cdat_header->length;
/* Sum of all bytes including checksum must be 0 */
cdat_header->checksum = ~sum + 1;
cdat_st[0].base = g_steal_pointer(&cdat_header);
cdat_st[0].length = sizeof(*cdat_header);
cdat->entry_len = 1 + cdat->built_buf_len;
cdat->entry = g_steal_pointer(&cdat_st);
}
static void ct3_load_cdat(CDATObject *cdat, Error **errp)
{
g_autofree CDATEntry *cdat_st = NULL;
uint8_t sum = 0;
int num_ent;
int i = 0, ent = 1, file_size = 0;
CDATSubHeader *hdr;
FILE *fp = NULL;
/* Read CDAT file and create its cache */
fp = fopen(cdat->filename, "r");
if (!fp) {
error_setg(errp, "CDAT: Unable to open file");
return;
}
fseek(fp, 0, SEEK_END);
file_size = ftell(fp);
fseek(fp, 0, SEEK_SET);
cdat->buf = g_malloc0(file_size);
if (fread(cdat->buf, file_size, 1, fp) == 0) {
error_setg(errp, "CDAT: File read failed");
return;
}
fclose(fp);
if (file_size < sizeof(CDATTableHeader)) {
error_setg(errp, "CDAT: File too short");
return;
}
i = sizeof(CDATTableHeader);
num_ent = 1;
while (i < file_size) {
hdr = (CDATSubHeader *)(cdat->buf + i);
cdat_len_check(hdr, errp);
i += hdr->length;
num_ent++;
}
if (i != file_size) {
error_setg(errp, "CDAT: File length missmatch");
return;
}
cdat_st = g_malloc0(sizeof(*cdat_st) * num_ent);
if (!cdat_st) {
error_setg(errp, "CDAT: Failed to allocate entry array");
return;
}
/* Set CDAT header, Entry = 0 */
cdat_st[0].base = cdat->buf;
cdat_st[0].length = sizeof(CDATTableHeader);
i = 0;
while (i < cdat_st[0].length) {
sum += cdat->buf[i++];
}
/* Read CDAT structures */
while (i < file_size) {
hdr = (CDATSubHeader *)(cdat->buf + i);
cdat_len_check(hdr, errp);
cdat_st[ent].base = hdr;
cdat_st[ent].length = hdr->length;
while (cdat->buf + i <
(uint8_t *)cdat_st[ent].base + cdat_st[ent].length) {
assert(i < file_size);
sum += cdat->buf[i++];
}
ent++;
}
if (sum != 0) {
warn_report("CDAT: Found checksum mismatch in %s", cdat->filename);
}
cdat->entry_len = num_ent;
cdat->entry = g_steal_pointer(&cdat_st);
}
void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp)
{
CDATObject *cdat = &cxl_cstate->cdat;
if (cdat->filename) {
ct3_load_cdat(cdat, errp);
} else {
ct3_build_cdat(cdat, errp);
}
}
void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp)
{
CDATObject *cdat = &cxl_cstate->cdat;
if (cdat->to_update) {
ct3_build_cdat(cdat, errp);
}
}
void cxl_doe_cdat_release(CXLComponentState *cxl_cstate)
{
CDATObject *cdat = &cxl_cstate->cdat;
free(cdat->entry);
if (cdat->built_buf) {
cdat->free_cdat_table(cdat->built_buf, cdat->built_buf_len,
cdat->private);
}
if (cdat->buf) {
free(cdat->buf);
}
}

View File

@ -4,6 +4,7 @@ softmmu_ss.add(when: 'CONFIG_CXL',
'cxl-device-utils.c',
'cxl-mailbox-utils.c',
'cxl-host.c',
'cxl-cdat.c',
),
if_false: files(
'cxl-host-stubs.c',

View File

@ -0,0 +1,7 @@
#include "qemu/osdep.h"
#include "hw/acpi/acpi_aml_interface.h"
#include "vga_int.h"
void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope)
{
}

26
hw/display/acpi-vga.c Normal file
View File

@ -0,0 +1,26 @@
#include "qemu/osdep.h"
#include "hw/acpi/acpi_aml_interface.h"
#include "hw/pci/pci.h"
#include "vga_int.h"
void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope)
{
int s3d = 0;
Aml *method;
if (object_dynamic_cast(OBJECT(adev), "qxl-vga")) {
s3d = 3;
}
method = aml_method("_S1D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0)));
aml_append(scope, method);
method = aml_method("_S2D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0)));
aml_append(scope, method);
method = aml_method("_S3D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(s3d)));
aml_append(scope, method);
}

View File

@ -38,10 +38,21 @@ softmmu_ss.add(when: 'CONFIG_NEXTCUBE', if_true: files('next-fb.c'))
specific_ss.add(when: 'CONFIG_VGA', if_true: files('vga.c'))
if (config_all_devices.has_key('CONFIG_VGA_CIRRUS') or
config_all_devices.has_key('CONFIG_VGA_PCI') or
config_all_devices.has_key('CONFIG_VMWARE_VGA') or
config_all_devices.has_key('CONFIG_ATI_VGA')
)
softmmu_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'),
if_false: files('acpi-vga-stub.c'))
endif
if config_all_devices.has_key('CONFIG_QXL')
qxl_ss = ss.source_set()
qxl_ss.add(when: 'CONFIG_QXL', if_true: [files('qxl.c', 'qxl-logger.c', 'qxl-render.c'),
pixman, spice])
qxl_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'),
if_false: files('acpi-vga-stub.c'))
hw_display_modules += {'qxl': qxl_ss}
endif
@ -52,6 +63,7 @@ softmmu_ss.add(when: 'CONFIG_ARTIST', if_true: files('artist.c'))
softmmu_ss.add(when: [pixman, 'CONFIG_ATI_VGA'], if_true: files('ati.c', 'ati_2d.c', 'ati_dbg.c'))
if config_all_devices.has_key('CONFIG_VIRTIO_GPU')
virtio_gpu_ss = ss.source_set()
virtio_gpu_ss.add(when: 'CONFIG_VIRTIO_GPU',
@ -87,14 +99,19 @@ if config_all_devices.has_key('CONFIG_VIRTIO_VGA')
if_true: [files('virtio-vga.c'), pixman])
virtio_vga_ss.add(when: 'CONFIG_VHOST_USER_VGA',
if_true: files('vhost-user-vga.c'))
virtio_vga_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'),
if_false: files('acpi-vga-stub.c'))
hw_display_modules += {'virtio-vga': virtio_vga_ss}
virtio_vga_gl_ss = ss.source_set()
virtio_vga_gl_ss.add(when: ['CONFIG_VIRTIO_VGA', virgl, opengl],
if_true: [files('virtio-vga-gl.c'), pixman])
virtio_vga_gl_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-vga.c'),
if_false: files('acpi-vga-stub.c'))
hw_display_modules += {'virtio-vga-gl': virtio_vga_gl_ss}
endif
specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c'))
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c'))
modules += { 'hw-display': hw_display_modules }

View File

@ -35,6 +35,7 @@
#include "hw/loader.h"
#include "hw/display/edid.h"
#include "qom/object.h"
#include "hw/acpi/acpi_aml_interface.h"
enum vga_pci_flags {
PCI_VGA_FLAG_ENABLE_MMIO = 1,
@ -354,11 +355,13 @@ static void vga_pci_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
k->vendor_id = PCI_VENDOR_ID_QEMU;
k->device_id = PCI_DEVICE_ID_QEMU_VGA;
dc->vmsd = &vmstate_vga_pci;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
adevc->build_dev_aml = build_vga_aml;
}
static const TypeInfo vga_pci_type_info = {
@ -369,6 +372,7 @@ static const TypeInfo vga_pci_type_info = {
.class_init = vga_pci_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
{ },
},
};

View File

@ -30,6 +30,7 @@
#include "ui/console.h"
#include "hw/display/bochs-vbe.h"
#include "hw/acpi/acpi_aml_interface.h"
#define ST01_V_RETRACE 0x08
#define ST01_DISP_ENABLE 0x01
@ -195,4 +196,5 @@ void pci_std_vga_mmio_region_init(VGACommonState *s,
MemoryRegion *subs,
bool qext, bool edid);
void build_vga_aml(AcpiDevAmlIf *adev, Aml *scope);
#endif

View File

@ -60,6 +60,7 @@
#include "hw/i386/fw_cfg.h"
#include "hw/i386/ich9.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci-host/i440fx.h"
#include "hw/pci-host/q35.h"
#include "hw/i386/x86-iommu.h"
@ -112,7 +113,6 @@ typedef struct AcpiPmInfo {
} AcpiPmInfo;
typedef struct AcpiMiscInfo {
bool is_piix4;
bool has_hpet;
#ifdef CONFIG_TPM
TPMVersion tpm_version;
@ -121,13 +121,6 @@ typedef struct AcpiMiscInfo {
unsigned dsdt_size;
} AcpiMiscInfo;
typedef struct AcpiBuildPciBusHotplugState {
GArray *device_table;
GArray *notify_table;
struct AcpiBuildPciBusHotplugState *parent;
bool pcihp_bridge_en;
} AcpiBuildPciBusHotplugState;
typedef struct FwCfgTPMConfig {
uint32_t tpmppi_address;
uint8_t tpm_version;
@ -288,17 +281,6 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
static void acpi_get_misc_info(AcpiMiscInfo *info)
{
Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM);
Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE);
assert(!!piix != !!lpc);
if (piix) {
info->is_piix4 = true;
}
if (lpc) {
info->is_piix4 = false;
}
info->has_hpet = hpet_find();
#ifdef CONFIG_TPM
info->tpm_version = tpm_get_version(tpm_find());
@ -430,18 +412,11 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
bool hotpluggbale_slot = false;
bool bridge_in_acpi = false;
bool cold_plugged_bridge = false;
bool is_vga = false;
if (pdev) {
pc = PCI_DEVICE_GET_CLASS(pdev);
dc = DEVICE_GET_CLASS(pdev);
if (pc->class_id == PCI_CLASS_BRIDGE_ISA) {
continue;
}
is_vga = pc->class_id == PCI_CLASS_DISPLAY_VGA;
/*
* Cold plugged bridges aren't themselves hot-pluggable.
* Hotplugged bridges *are* hot-pluggable.
@ -455,9 +430,10 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
/*
* allow describing coldplugged bridges in ACPI even if they are not
* on function 0, as they are not unpluggable, for all other devices
* generate description only for function 0 per slot
* generate description only for function 0 per slot, and for other
* functions if device on function provides its own AML
*/
if (func && !bridge_in_acpi) {
if (func && !bridge_in_acpi && !get_dev_aml_func(DEVICE(pdev))) {
continue;
}
} else {
@ -489,28 +465,7 @@ static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
aml_append(dev, aml_pci_device_dsm());
}
if (is_vga) {
/* add VGA specific AML methods */
int s3d;
if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) {
s3d = 3;
} else {
s3d = 0;
}
method = aml_method("_S1D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0)));
aml_append(dev, method);
method = aml_method("_S2D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0)));
aml_append(dev, method);
method = aml_method("_S3D", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(s3d)));
aml_append(dev, method);
}
call_dev_aml_func(DEVICE(pdev), dev);
bridge_in_acpi = cold_plugged_bridge && pcihp_bridge_en;
if (bridge_in_acpi) {
@ -1030,7 +985,6 @@ static void build_piix4_pci0_int(Aml *table)
{
Aml *dev;
Aml *crs;
Aml *field;
Aml *method;
uint32_t irqs;
Aml *sb_scope = aml_scope("_SB");
@ -1039,13 +993,6 @@ static void build_piix4_pci0_int(Aml *table)
aml_append(pci0_scope, build_prt(true));
aml_append(sb_scope, pci0_scope);
field = aml_field("PCI0.ISA.P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRQ0", 8));
aml_append(field, aml_named_field("PRQ1", 8));
aml_append(field, aml_named_field("PRQ2", 8));
aml_append(field, aml_named_field("PRQ3", 8));
aml_append(sb_scope, field);
aml_append(sb_scope, build_irq_status_method());
aml_append(sb_scope, build_iqcr_method(true));
@ -1149,7 +1096,6 @@ static Aml *build_q35_routing_table(const char *str)
static void build_q35_pci0_int(Aml *table)
{
Aml *field;
Aml *method;
Aml *sb_scope = aml_scope("_SB");
Aml *pci0_scope = aml_scope("PCI0");
@ -1186,18 +1132,6 @@ static void build_q35_pci0_int(Aml *table)
aml_append(pci0_scope, method);
aml_append(sb_scope, pci0_scope);
field = aml_field("PCI0.ISA.PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRQA", 8));
aml_append(field, aml_named_field("PRQB", 8));
aml_append(field, aml_named_field("PRQC", 8));
aml_append(field, aml_named_field("PRQD", 8));
aml_append(field, aml_reserved_field(0x20));
aml_append(field, aml_named_field("PRQE", 8));
aml_append(field, aml_named_field("PRQF", 8));
aml_append(field, aml_named_field("PRQG", 8));
aml_append(field, aml_named_field("PRQH", 8));
aml_append(sb_scope, field);
aml_append(sb_scope, build_irq_status_method());
aml_append(sb_scope, build_iqcr_method(false));
@ -1262,54 +1196,6 @@ static Aml *build_q35_dram_controller(const AcpiMcfgInfo *mcfg)
return dev;
}
static void build_q35_isa_bridge(Aml *table)
{
Aml *dev;
Aml *scope;
Object *obj;
bool ambiguous;
/*
* temporarily fish out isa bridge, build_q35_isa_bridge() will be dropped
* once PCI is converted to AcpiDevAmlIf and would be ble to generate
* AML for bridge itself
*/
obj = object_resolve_path_type("", TYPE_ICH9_LPC_DEVICE, &ambiguous);
assert(obj && !ambiguous);
scope = aml_scope("_SB.PCI0");
dev = aml_device("ISA");
aml_append(dev, aml_name_decl("_ADR", aml_int(0x001F0000)));
call_dev_aml_func(DEVICE(obj), dev);
aml_append(scope, dev);
aml_append(table, scope);
}
static void build_piix4_isa_bridge(Aml *table)
{
Aml *dev;
Aml *scope;
Object *obj;
bool ambiguous;
/*
* temporarily fish out isa bridge, build_piix4_isa_bridge() will be dropped
* once PCI is converted to AcpiDevAmlIf and would be ble to generate
* AML for bridge itself
*/
obj = object_resolve_path_type("", TYPE_PIIX3_PCI_DEVICE, &ambiguous);
assert(obj && !ambiguous);
scope = aml_scope("_SB.PCI0");
dev = aml_device("ISA");
aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010000)));
call_dev_aml_func(DEVICE(obj), dev);
aml_append(scope, dev);
aml_append(table, scope);
}
static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr)
{
Aml *scope;
@ -1416,25 +1302,6 @@ static Aml *build_q35_osc_method(bool enable_native_pcie_hotplug)
return method;
}
static void build_smb0(Aml *table, int devnr, int func)
{
Aml *scope = aml_scope("_SB.PCI0");
Aml *dev = aml_device("SMB0");
bool ambiguous;
Object *obj;
/*
* temporarily fish out device hosting SMBUS, build_smb0 will be gone once
* PCI enumeration will be switched to call_dev_aml_func()
*/
obj = object_resolve_path_type("", TYPE_ICH9_SMB_DEVICE, &ambiguous);
assert(obj && !ambiguous);
aml_append(dev, aml_name_decl("_ADR", aml_int(devnr << 16 | func)));
call_dev_aml_func(DEVICE(obj), dev);
aml_append(scope, dev);
aml_append(table, scope);
}
static void build_acpi0017(Aml *table)
{
Aml *dev, *scope, *method;
@ -1456,6 +1323,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
AcpiPmInfo *pm, AcpiMiscInfo *misc,
Range *pci_hole, Range *pci_hole64, MachineState *machine)
{
Object *i440fx = object_resolve_type_unambiguous(TYPE_I440FX_PCI_HOST_BRIDGE);
Object *q35 = object_resolve_type_unambiguous(TYPE_Q35_HOST_DEVICE);
CrsRangeEntry *entry;
Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs;
CrsRangeSet crs_range_set;
@ -1476,11 +1345,13 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = x86ms->oem_id,
.oem_table_id = x86ms->oem_table_id };
assert(!!i440fx != !!q35);
acpi_table_begin(&table, table_data);
dsdt = init_aml_allocator();
build_dbg_aml(dsdt);
if (misc->is_piix4) {
if (i440fx) {
sb_scope = aml_scope("_SB");
dev = aml_device("PCI0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
@ -1489,12 +1360,11 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(sb_scope, dev);
aml_append(dsdt, sb_scope);
build_piix4_isa_bridge(dsdt);
if (pm->pcihp_bridge_en || pm->pcihp_root_en) {
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
}
build_piix4_pci0_int(dsdt);
} else {
} else if (q35) {
sb_scope = aml_scope("_SB");
dev = aml_device("PCI0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
@ -1534,14 +1404,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(dsdt, sb_scope);
build_q35_isa_bridge(dsdt);
if (pm->pcihp_bridge_en) {
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
}
build_q35_pci0_int(dsdt);
if (pcms->smbus) {
build_smb0(dsdt, ICH9_SMB_DEV, ICH9_SMB_FUNC);
}
}
if (misc->has_hpet) {
@ -1554,6 +1420,18 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(dsdt, sb_scope);
}
scope = aml_scope("_GPE");
{
aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006")));
if (machine->nvdimms_state->is_enabled) {
method = aml_method("_E04", 0, AML_NOTSERIALIZED);
aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
aml_int(0x80)));
aml_append(scope, method);
}
}
aml_append(dsdt, scope);
if (pcmc->legacy_cpu_hotplug) {
build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base);
} else {
@ -1572,28 +1450,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
pcms->memhp_io_base);
}
scope = aml_scope("_GPE");
{
aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006")));
if (pm->pcihp_bridge_en || pm->pcihp_root_en) {
method = aml_method("_E01", 0, AML_NOTSERIALIZED);
aml_append(method,
aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF));
aml_append(method, aml_call0("\\_SB.PCI0.PCNT"));
aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK")));
aml_append(scope, method);
}
if (machine->nvdimms_state->is_enabled) {
method = aml_method("_E04", 0, AML_NOTSERIALIZED);
aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
aml_int(0x80)));
aml_append(scope, method);
}
}
aml_append(dsdt, scope);
crs_range_set_init(&crs_range_set);
bus = PC_MACHINE(machine)->bus;
if (bus) {
@ -1872,6 +1728,19 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
}
aml_append(dsdt, sb_scope);
if (pm->pcihp_bridge_en || pm->pcihp_root_en) {
scope = aml_scope("_GPE");
{
method = aml_method("_E01", 0, AML_NOTSERIALIZED);
aml_append(method,
aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF));
aml_append(method, aml_call0("\\_SB.PCI0.PCNT"));
aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK")));
aml_append(scope, method);
}
aml_append(dsdt, scope);
}
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
acpi_table_end(linker, &table);

View File

@ -11,29 +11,11 @@
#include "e820_memory_layout.h"
static size_t e820_entries;
struct e820_table e820_reserve;
struct e820_entry *e820_table;
int e820_add_entry(uint64_t address, uint64_t length, uint32_t type)
{
int index = le32_to_cpu(e820_reserve.count);
struct e820_entry *entry;
if (type != E820_RAM) {
/* old FW_CFG_E820_TABLE entry -- reservations only */
if (index >= E820_NR_ENTRIES) {
return -EBUSY;
}
entry = &e820_reserve.entry[index++];
entry->address = cpu_to_le64(address);
entry->length = cpu_to_le64(length);
entry->type = cpu_to_le32(type);
e820_reserve.count = cpu_to_le32(index);
}
/* new "etc/e820" file -- include ram too */
/* new "etc/e820" file -- include ram and reserved entries */
e820_table = g_renew(struct e820_entry, e820_table, e820_entries + 1);
e820_table[e820_entries].address = cpu_to_le64(address);
e820_table[e820_entries].length = cpu_to_le64(length);

View File

@ -16,20 +16,12 @@
#define E820_NVS 4
#define E820_UNUSABLE 5
#define E820_NR_ENTRIES 16
struct e820_entry {
uint64_t address;
uint64_t length;
uint32_t type;
} QEMU_PACKED __attribute((__aligned__(4)));
struct e820_table {
uint32_t count;
struct e820_entry entry[E820_NR_ENTRIES];
} QEMU_PACKED __attribute((__aligned__(4)));
extern struct e820_table e820_reserve;
extern struct e820_entry *e820_table;
int e820_add_entry(uint64_t address, uint64_t length, uint32_t type);

View File

@ -36,7 +36,6 @@ const char *fw_cfg_arch_key_name(uint16_t key)
{FW_CFG_ACPI_TABLES, "acpi_tables"},
{FW_CFG_SMBIOS_ENTRIES, "smbios_entries"},
{FW_CFG_IRQ0_OVERRIDE, "irq0_override"},
{FW_CFG_E820_TABLE, "e820_table"},
{FW_CFG_HPET, "hpet"},
};
@ -127,8 +126,6 @@ FWCfgState *fw_cfg_arch_create(MachineState *ms,
#endif
fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1);
fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE,
&e820_reserve, sizeof(e820_reserve));
fw_cfg_add_file(fw_cfg, "etc/e820", e820_table,
sizeof(struct e820_entry) * e820_get_num_entries());

View File

@ -17,7 +17,6 @@
#define FW_CFG_ACPI_TABLES (FW_CFG_ARCH_LOCAL + 0)
#define FW_CFG_SMBIOS_ENTRIES (FW_CFG_ARCH_LOCAL + 1)
#define FW_CFG_IRQ0_OVERRIDE (FW_CFG_ARCH_LOCAL + 2)
#define FW_CFG_E820_TABLE (FW_CFG_ARCH_LOCAL + 3)
#define FW_CFG_HPET (FW_CFG_ARCH_LOCAL + 4)
FWCfgState *fw_cfg_arch_create(MachineState *ms,

File diff suppressed because it is too large Load Diff

View File

@ -114,8 +114,9 @@
VTD_INTERRUPT_ADDR_FIRST + 1)
/* The shift of source_id in the key of IOTLB hash table */
#define VTD_IOTLB_SID_SHIFT 36
#define VTD_IOTLB_LVL_SHIFT 52
#define VTD_IOTLB_SID_SHIFT 20
#define VTD_IOTLB_LVL_SHIFT 28
#define VTD_IOTLB_PASID_SHIFT 30
#define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */
/* IOTLB_REG */
@ -191,6 +192,7 @@
#define VTD_ECAP_SC (1ULL << 7)
#define VTD_ECAP_MHMV (15ULL << 20)
#define VTD_ECAP_SRS (1ULL << 31)
#define VTD_ECAP_PASID (1ULL << 40)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SLTS (1ULL << 46)
@ -211,6 +213,8 @@
#define VTD_CAP_DRAIN_READ (1ULL << 55)
#define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE)
#define VTD_CAP_CM (1ULL << 7)
#define VTD_PASID_ID_SHIFT 20
#define VTD_PASID_ID_MASK ((1ULL << VTD_PASID_ID_SHIFT) - 1)
/* Supported Adjusted Guest Address Widths */
#define VTD_CAP_SAGAW_SHIFT 8
@ -262,6 +266,8 @@
#define VTD_FRCD_SID(val) ((val) & VTD_FRCD_SID_MASK)
/* For the low 64-bit of 128-bit */
#define VTD_FRCD_FI(val) ((val) & ~0xfffULL)
#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40)
#define VTD_FRCD_PP(val) (((val) & 0x1) << 31)
/* DMA Remapping Fault Conditions */
typedef enum VTDFaultReason {
@ -379,6 +385,11 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL)
#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL
#define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL
#define VTD_INV_DESC_IOTLB_PASID_PASID (2ULL << 4)
#define VTD_INV_DESC_IOTLB_PASID_PAGE (3ULL << 4)
#define VTD_INV_DESC_IOTLB_PASID(val) (((val) >> 32) & VTD_PASID_ID_MASK)
#define VTD_INV_DESC_IOTLB_PASID_RSVD_LO 0xfff00000000001c0ULL
#define VTD_INV_DESC_IOTLB_PASID_RSVD_HI 0xf80ULL
/* Mask for Device IOTLB Invalidate Descriptor */
#define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL)
@ -413,6 +424,7 @@ typedef union VTDInvDesc VTDInvDesc;
/* Information about page-selective IOTLB invalidate */
struct VTDIOTLBPageInvInfo {
uint16_t domain_id;
uint32_t pasid;
uint64_t addr;
uint8_t mask;
};

View File

@ -324,8 +324,6 @@ static void microvm_memory_init(MicrovmMachineState *mms)
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, machine->smp.max_cpus);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)machine->ram_size);
fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, 1);
fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE,
&e820_reserve, sizeof(e820_reserve));
fw_cfg_add_file(fw_cfg, "etc/e820", e820_table,
sizeof(struct e820_entry) * e820_get_num_entries());

View File

@ -1061,7 +1061,6 @@ void pc_memory_init(PCMachineState *pcms,
hwaddr cxl_size = MiB;
cxl_base = pc_get_cxl_range_start(pcms);
e820_add_entry(cxl_base, cxl_size, E820_RESERVED);
memory_region_init(mr, OBJECT(machine), "cxl_host_reg", cxl_size);
memory_region_add_subregion(system_memory, cxl_base, mr);
cxl_resv_end = cxl_base + cxl_size;
@ -1077,7 +1076,6 @@ void pc_memory_init(PCMachineState *pcms,
memory_region_init_io(&fw->mr, OBJECT(machine), &cfmws_ops, fw,
"cxl-fixed-memory-region", fw->size);
memory_region_add_subregion(system_memory, fw->base, &fw->mr);
e820_add_entry(fw->base, fw->size, E820_RESERVED);
cxl_fmw_base += fw->size;
cxl_resv_end = cxl_fmw_base;
}

View File

@ -12,6 +12,8 @@ vtd_inv_desc_cc_devices(uint16_t sid, uint16_t fmask) "context invalidate device
vtd_inv_desc_iotlb_global(void) "iotlb invalidate global"
vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16
vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8
vtd_inv_desc_iotlb_pasid_pages(uint16_t domain, uint64_t addr, uint8_t mask, uint32_t pasid) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8" pasid 0x%"PRIx32
vtd_inv_desc_iotlb_pasid(uint16_t domain, uint32_t pasid) "iotlb invalidate domain 0x%"PRIx16" pasid 0x%"PRIx32
vtd_inv_desc_wait_sw(uint64_t addr, uint32_t data) "wait invalidate status write addr 0x%"PRIx64" data 0x%"PRIx32
vtd_inv_desc_wait_irq(const char *msg) "%s"
vtd_inv_desc_wait_write_fail(uint64_t hi, uint64_t lo) "write fail for wait desc hi 0x%"PRIx64" lo 0x%"PRIx64

View File

@ -809,6 +809,7 @@ static void ich9_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
{
Aml *field;
BusChild *kid;
ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
BusState *bus = BUS(s->isa_bus);
@ -816,6 +817,28 @@ static void build_ich9_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
/* ICH9 PCI to ISA irq remapping */
aml_append(scope, aml_operation_region("PIRQ", AML_PCI_CONFIG,
aml_int(0x60), 0x0C));
/* Fields declarion has to happen *after* operation region */
field = aml_field("PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRQA", 8));
aml_append(field, aml_named_field("PRQB", 8));
aml_append(field, aml_named_field("PRQC", 8));
aml_append(field, aml_named_field("PRQD", 8));
aml_append(field, aml_reserved_field(0x20));
aml_append(field, aml_named_field("PRQE", 8));
aml_append(field, aml_named_field("PRQF", 8));
aml_append(field, aml_named_field("PRQG", 8));
aml_append(field, aml_named_field("PRQH", 8));
aml_append(scope, field);
/* hack: put fields into _SB scope for LNKx to find them */
aml_append(scope, aml_alias("PRQA", "\\_SB.PRQA"));
aml_append(scope, aml_alias("PRQB", "\\_SB.PRQB"));
aml_append(scope, aml_alias("PRQC", "\\_SB.PRQC"));
aml_append(scope, aml_alias("PRQD", "\\_SB.PRQD"));
aml_append(scope, aml_alias("PRQE", "\\_SB.PRQE"));
aml_append(scope, aml_alias("PRQF", "\\_SB.PRQF"));
aml_append(scope, aml_alias("PRQG", "\\_SB.PRQG"));
aml_append(scope, aml_alias("PRQH", "\\_SB.PRQH"));
QTAILQ_FOREACH(kid, &bus->children, sibling) {
call_dev_aml_func(DEVICE(kid->child), scope);

View File

@ -316,12 +316,27 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
{
Aml *field;
BusChild *kid;
BusState *bus = qdev_get_child_bus(DEVICE(adev), "isa.0");
/* PIIX PCI to ISA irq remapping */
aml_append(scope, aml_operation_region("P40C", AML_PCI_CONFIG,
aml_int(0x60), 0x04));
aml_int(0x60), 0x04));
/* Fields declarion has to happen *after* operation region */
field = aml_field("P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
aml_append(field, aml_named_field("PRQ0", 8));
aml_append(field, aml_named_field("PRQ1", 8));
aml_append(field, aml_named_field("PRQ2", 8));
aml_append(field, aml_named_field("PRQ3", 8));
aml_append(scope, field);
/* hack: put fields into _SB scope for LNKx to find them */
aml_append(scope, aml_alias("PRQ0", "\\_SB.PRQ0"));
aml_append(scope, aml_alias("PRQ1", "\\_SB.PRQ1"));
aml_append(scope, aml_alias("PRQ2", "\\_SB.PRQ2"));
aml_append(scope, aml_alias("PRQ3", "\\_SB.PRQ3"));
QTAILQ_FOREACH(kid, &bus->children, sibling) {
call_dev_aml_func(DEVICE(kid->child), scope);
}

View File

@ -12,7 +12,245 @@
#include "qemu/range.h"
#include "qemu/rcu.h"
#include "sysemu/hostmem.h"
#include "sysemu/numa.h"
#include "hw/cxl/cxl.h"
#include "hw/pci/msix.h"
#define DWORD_BYTE 4
/* Default CDAT entries for a memory region */
enum {
CT3_CDAT_DSMAS,
CT3_CDAT_DSLBIS0,
CT3_CDAT_DSLBIS1,
CT3_CDAT_DSLBIS2,
CT3_CDAT_DSLBIS3,
CT3_CDAT_DSEMTS,
CT3_CDAT_NUM_ENTRIES
};
static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
int dsmad_handle, MemoryRegion *mr)
{
g_autofree CDATDsmas *dsmas = NULL;
g_autofree CDATDslbis *dslbis0 = NULL;
g_autofree CDATDslbis *dslbis1 = NULL;
g_autofree CDATDslbis *dslbis2 = NULL;
g_autofree CDATDslbis *dslbis3 = NULL;
g_autofree CDATDsemts *dsemts = NULL;
dsmas = g_malloc(sizeof(*dsmas));
if (!dsmas) {
return -ENOMEM;
}
*dsmas = (CDATDsmas) {
.header = {
.type = CDAT_TYPE_DSMAS,
.length = sizeof(*dsmas),
},
.DSMADhandle = dsmad_handle,
.flags = CDAT_DSMAS_FLAG_NV,
.DPA_base = 0,
.DPA_length = int128_get64(mr->size),
};
/* For now, no memory side cache, plausiblish numbers */
dslbis0 = g_malloc(sizeof(*dslbis0));
if (!dslbis0) {
return -ENOMEM;
}
*dslbis0 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
.length = sizeof(*dslbis0),
},
.handle = dsmad_handle,
.flags = HMAT_LB_MEM_MEMORY,
.data_type = HMAT_LB_DATA_READ_LATENCY,
.entry_base_unit = 10000, /* 10ns base */
.entry[0] = 15, /* 150ns */
};
dslbis1 = g_malloc(sizeof(*dslbis1));
if (!dslbis1) {
return -ENOMEM;
}
*dslbis1 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
.length = sizeof(*dslbis1),
},
.handle = dsmad_handle,
.flags = HMAT_LB_MEM_MEMORY,
.data_type = HMAT_LB_DATA_WRITE_LATENCY,
.entry_base_unit = 10000,
.entry[0] = 25, /* 250ns */
};
dslbis2 = g_malloc(sizeof(*dslbis2));
if (!dslbis2) {
return -ENOMEM;
}
*dslbis2 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
.length = sizeof(*dslbis2),
},
.handle = dsmad_handle,
.flags = HMAT_LB_MEM_MEMORY,
.data_type = HMAT_LB_DATA_READ_BANDWIDTH,
.entry_base_unit = 1000, /* GB/s */
.entry[0] = 16,
};
dslbis3 = g_malloc(sizeof(*dslbis3));
if (!dslbis3) {
return -ENOMEM;
}
*dslbis3 = (CDATDslbis) {
.header = {
.type = CDAT_TYPE_DSLBIS,
.length = sizeof(*dslbis3),
},
.handle = dsmad_handle,
.flags = HMAT_LB_MEM_MEMORY,
.data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
.entry_base_unit = 1000, /* GB/s */
.entry[0] = 16,
};
dsemts = g_malloc(sizeof(*dsemts));
if (!dsemts) {
return -ENOMEM;
}
*dsemts = (CDATDsemts) {
.header = {
.type = CDAT_TYPE_DSEMTS,
.length = sizeof(*dsemts),
},
.DSMAS_handle = dsmad_handle,
/* Reserved - the non volatile from DSMAS matters */
.EFI_memory_type_attr = 2,
.DPA_offset = 0,
.DPA_length = int128_get64(mr->size),
};
/* Header always at start of structure */
cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
return 0;
}
static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
{
g_autofree CDATSubHeader **table = NULL;
MemoryRegion *nonvolatile_mr;
CXLType3Dev *ct3d = priv;
int dsmad_handle = 0;
int rc;
if (!ct3d->hostmem) {
return 0;
}
nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem);
if (!nonvolatile_mr) {
return -EINVAL;
}
table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table));
if (!table) {
return -ENOMEM;
}
rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr);
if (rc < 0) {
return rc;
}
*cdat_table = g_steal_pointer(&table);
return CT3_CDAT_NUM_ENTRIES;
}
static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
{
int i;
for (i = 0; i < num; i++) {
g_free(cdat_table[i]);
}
g_free(cdat_table);
}
static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
{
CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
uint16_t ent;
void *base;
uint32_t len;
CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
CDATRsp rsp;
assert(cdat->entry_len);
/* Discard if request length mismatched */
if (pcie_doe_get_obj_len(req) <
DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
return false;
}
ent = req->entry_handle;
base = cdat->entry[ent].base;
len = cdat->entry[ent].length;
rsp = (CDATRsp) {
.header = {
.vendor_id = CXL_VENDOR_ID,
.data_obj_type = CXL_DOE_TABLE_ACCESS,
.reserved = 0x0,
.length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
},
.rsp_code = CXL_DOE_TAB_RSP,
.table_type = CXL_DOE_TAB_TYPE_CDAT,
.entry_handle = (ent < cdat->entry_len - 1) ?
ent + 1 : CXL_DOE_TAB_ENT_MAX,
};
memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
base, len);
doe_cap->read_mbox_len += rsp.header.length;
return true;
}
static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
{
CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
uint32_t val;
if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
return val;
}
return pci_default_read_config(pci_dev, addr, size);
}
static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
int size)
{
CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
pci_default_write_config(pci_dev, addr, val, size);
}
/*
* Null value of all Fs suggested by IEEE RA guidelines for use of
@ -139,6 +377,11 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
return true;
}
static DOEProtocol doe_cdat_prot[] = {
{ CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
{ }
};
static void ct3_realize(PCIDevice *pci_dev, Error **errp)
{
CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
@ -146,6 +389,8 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
ComponentRegisters *regs = &cxl_cstate->crb;
MemoryRegion *mr = &regs->component_registers;
uint8_t *pci_conf = pci_dev->config;
unsigned short msix_num = 1;
int i;
if (!cxl_setup_memory(ct3d, errp)) {
return;
@ -180,6 +425,20 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64,
&ct3d->cxl_dstate.device_registers);
/* MSI(-X) Initailization */
msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
for (i = 0; i < msix_num; i++) {
msix_vector_use(pci_dev, i);
}
/* DOE Initailization */
pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
cxl_cstate->cdat.private = ct3d;
cxl_doe_cdat_init(cxl_cstate, errp);
}
static void ct3_exit(PCIDevice *pci_dev)
@ -188,6 +447,7 @@ static void ct3_exit(PCIDevice *pci_dev)
CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
ComponentRegisters *regs = &cxl_cstate->crb;
cxl_doe_cdat_release(cxl_cstate);
g_free(regs->special_ops);
address_space_destroy(&ct3d->hostmem_as);
}
@ -287,6 +547,7 @@ static Property ct3_props[] = {
DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
HostMemoryBackend *),
DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
DEFINE_PROP_END_OF_LIST(),
};
@ -352,6 +613,9 @@ static void ct3_class_init(ObjectClass *oc, void *data)
pc->device_id = 0xd93; /* LVF for now */
pc->revision = 1;
pc->config_write = ct3d_config_write;
pc->config_read = ct3d_config_read;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "CXL PMEM Device (Type 3)";
dc->reset = ct3d_reset;

View File

@ -276,25 +276,18 @@ e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors)
}
}
static bool
static void
e1000e_use_msix_vectors(E1000EState *s, int num_vectors)
{
int i;
for (i = 0; i < num_vectors; i++) {
int res = msix_vector_use(PCI_DEVICE(s), i);
if (res < 0) {
trace_e1000e_msix_use_vector_fail(i, res);
e1000e_unuse_msix_vectors(s, i);
return false;
}
msix_vector_use(PCI_DEVICE(s), i);
}
return true;
}
static void
e1000e_init_msix(E1000EState *s)
{
PCIDevice *d = PCI_DEVICE(s);
int res = msix_init(PCI_DEVICE(s), E1000E_MSIX_VEC_NUM,
&s->msix,
E1000E_MSIX_IDX, E1000E_MSIX_TABLE,
@ -305,9 +298,7 @@ e1000e_init_msix(E1000EState *s)
if (res < 0) {
trace_e1000e_msix_init_fail(res);
} else {
if (!e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM)) {
msix_uninit(d, &s->msix, &s->msix);
}
e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM);
}
}

View File

@ -1212,24 +1212,14 @@ static void rocker_msix_vectors_unuse(Rocker *r,
}
}
static int rocker_msix_vectors_use(Rocker *r,
unsigned int num_vectors)
static void rocker_msix_vectors_use(Rocker *r, unsigned int num_vectors)
{
PCIDevice *dev = PCI_DEVICE(r);
int err;
int i;
for (i = 0; i < num_vectors; i++) {
err = msix_vector_use(dev, i);
if (err) {
goto rollback;
}
msix_vector_use(dev, i);
}
return 0;
rollback:
rocker_msix_vectors_unuse(r, i);
return err;
}
static int rocker_msix_init(Rocker *r, Error **errp)
@ -1247,16 +1237,9 @@ static int rocker_msix_init(Rocker *r, Error **errp)
return err;
}
err = rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
if (err) {
goto err_msix_vectors_use;
}
rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
return 0;
err_msix_vectors_use:
msix_uninit(dev, &r->msix_bar, &r->msix_bar);
return err;
}
static void rocker_msix_uninit(Rocker *r)

View File

@ -101,3 +101,15 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
{
return 0;
}
void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
int vq_index)
{
}
int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
int vq_index)
{
return 0;
}

View File

@ -34,6 +34,7 @@
#include "standard-headers/linux/virtio_ring.h"
#include "hw/virtio/vhost.h"
#include "hw/virtio/virtio-bus.h"
#include "linux-headers/linux/vhost.h"
/* Features supported by host kernel. */
@ -46,6 +47,7 @@ static const int kernel_feature_bits[] = {
VIRTIO_NET_F_MTU,
VIRTIO_F_IOMMU_PLATFORM,
VIRTIO_F_RING_PACKED,
VIRTIO_F_RING_RESET,
VIRTIO_NET_F_HASH_REPORT,
VHOST_INVALID_FEATURE_BIT
};
@ -387,21 +389,20 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
goto err_start;
}
if (peer->vring_enable) {
/* restore vring enable state */
r = vhost_set_vring_enable(peer, peer->vring_enable);
if (r < 0) {
vhost_net_stop_one(get_vhost_net(peer), dev);
goto err_start;
}
}
r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) {
goto err_start;
}
}
return 0;
@ -531,3 +532,80 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
}
void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
int vq_index)
{
VHostNetState *net = get_vhost_net(nc->peer);
const VhostOps *vhost_ops = net->dev.vhost_ops;
struct vhost_vring_file file = { .fd = -1 };
int idx;
/* should only be called after backend is connected */
assert(vhost_ops);
idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
file.index = idx;
int r = vhost_net_set_backend(&net->dev, &file);
assert(r >= 0);
}
vhost_virtqueue_stop(&net->dev,
vdev,
net->dev.vqs + idx,
net->dev.vq_index + idx);
}
int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
int vq_index)
{
VHostNetState *net = get_vhost_net(nc->peer);
const VhostOps *vhost_ops = net->dev.vhost_ops;
struct vhost_vring_file file = { };
int idx, r;
if (!net->dev.started) {
return -EBUSY;
}
/* should only be called after backend is connected */
assert(vhost_ops);
idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
r = vhost_virtqueue_start(&net->dev,
vdev,
net->dev.vqs + idx,
net->dev.vq_index + idx);
if (r < 0) {
goto err_start;
}
if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
file.index = idx;
file.fd = net->backend;
r = vhost_net_set_backend(&net->dev, &file);
if (r < 0) {
r = -errno;
goto err_start;
}
}
return 0;
err_start:
error_report("Error when restarting the queue.");
if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
file.fd = VHOST_FILE_UNBIND;
file.index = idx;
int r = vhost_net_set_backend(&net->dev, &file);
assert(r >= 0);
}
vhost_dev_stop(&net->dev, vdev);
return r;
}

View File

@ -124,6 +124,16 @@ static int vq2q(int queue_index)
return queue_index / 2;
}
static void flush_or_purge_queued_packets(NetClientState *nc)
{
if (!nc->peer) {
return;
}
qemu_flush_or_purge_queued_packets(nc->peer, true);
assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
}
/* TODO
* - we could suppress RX interrupt if we were so inclined.
*/
@ -536,6 +546,43 @@ static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
return info;
}
static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
if (!nc->peer) {
return;
}
if (get_vhost_net(nc->peer) &&
nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
vhost_net_virtqueue_reset(vdev, nc, queue_index);
}
flush_or_purge_queued_packets(nc);
}
static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
int r;
if (!nc->peer || !vdev->vhost_started) {
return;
}
if (get_vhost_net(nc->peer) &&
nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
if (r < 0) {
error_report("unable to restart vhost net virtqueue: %d, "
"when resetting the queue", queue_index);
}
}
}
static void virtio_net_reset(VirtIODevice *vdev)
{
VirtIONet *n = VIRTIO_NET(vdev);
@ -566,12 +613,7 @@ static void virtio_net_reset(VirtIODevice *vdev)
/* Flush any async TX */
for (i = 0; i < n->max_queue_pairs; i++) {
NetClientState *nc = qemu_get_subqueue(n->nic, i);
if (nc->peer) {
qemu_flush_or_purge_queued_packets(nc->peer, true);
assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
}
flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
}
}
@ -746,6 +788,7 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
}
if (!get_vhost_net(nc->peer)) {
virtio_add_feature(&features, VIRTIO_F_RING_RESET);
return features;
}
@ -3822,6 +3865,8 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
vdc->set_features = virtio_net_set_features;
vdc->bad_features = virtio_net_bad_features;
vdc->reset = virtio_net_reset;
vdc->queue_reset = virtio_net_queue_reset;
vdc->queue_enable = virtio_net_queue_enable;
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;

View File

@ -2110,20 +2110,14 @@ vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors)
}
}
static bool
static void
vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors)
{
PCIDevice *d = PCI_DEVICE(s);
int i;
for (i = 0; i < num_vectors; i++) {
int res = msix_vector_use(d, i);
if (0 > res) {
VMW_WRPRN("Failed to use MSI-X vector %d, error %d", i, res);
vmxnet3_unuse_msix_vectors(s, i);
return false;
}
msix_vector_use(d, i);
}
return true;
}
static bool
@ -2141,13 +2135,8 @@ vmxnet3_init_msix(VMXNET3State *s)
VMW_WRPRN("Failed to initialize MSI-X, error %d", res);
s->msix_used = false;
} else {
if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
VMW_WRPRN("Failed to use MSI-X vectors, error %d", res);
msix_uninit(d, &s->msix_bar, &s->msix_bar);
s->msix_used = false;
} else {
s->msix_used = true;
}
vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS);
s->msix_used = true;
}
return s->msix_used;
}
@ -2412,19 +2401,13 @@ static const VMStateDescription vmstate_vmxnet3_rxq_descr = {
static int vmxnet3_post_load(void *opaque, int version_id)
{
VMXNET3State *s = opaque;
PCIDevice *d = PCI_DEVICE(s);
net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
s->max_tx_frags, s->peer_has_vhdr);
net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
if (s->msix_used) {
if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) {
VMW_WRPRN("Failed to re-use MSI-X vectors");
msix_uninit(d, &s->msix_bar, &s->msix_bar);
s->msix_used = false;
return -1;
}
vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS);
}
if (!vmxnet3_validate_queues(s)) {

View File

@ -4740,11 +4740,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
uint16_t cqid, uint16_t vector, uint16_t size,
uint16_t irq_enabled)
{
int ret;
if (msix_enabled(&n->parent_obj)) {
ret = msix_vector_use(&n->parent_obj, vector);
assert(ret == 0);
msix_vector_use(&n->parent_obj, vector);
}
cq->ctrl = n;
cq->cqid = cqid;

View File

@ -10,11 +10,12 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/qdev-properties.h"
#include "hw/pci/msi.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_port.h"
#define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 1
#define CXL_UPSTREAM_PORT_MSI_NR_VECTOR 2
#define CXL_UPSTREAM_PORT_MSI_OFFSET 0x70
#define CXL_UPSTREAM_PORT_PCIE_CAP_OFFSET 0x90
@ -28,6 +29,7 @@ typedef struct CXLUpstreamPort {
/*< public >*/
CXLComponentState cxl_cstate;
DOECap doe_cdat;
} CXLUpstreamPort;
CXLComponentState *cxl_usp_to_cstate(CXLUpstreamPort *usp)
@ -60,6 +62,9 @@ static void cxl_usp_dvsec_write_config(PCIDevice *dev, uint32_t addr,
static void cxl_usp_write_config(PCIDevice *d, uint32_t address,
uint32_t val, int len)
{
CXLUpstreamPort *usp = CXL_USP(d);
pcie_doe_write_config(&usp->doe_cdat, address, val, len);
pci_bridge_write_config(d, address, val, len);
pcie_cap_flr_write_config(d, address, val, len);
pcie_aer_write_config(d, address, val, len);
@ -67,6 +72,18 @@ static void cxl_usp_write_config(PCIDevice *d, uint32_t address,
cxl_usp_dvsec_write_config(d, address, val, len);
}
static uint32_t cxl_usp_read_config(PCIDevice *d, uint32_t address, int len)
{
CXLUpstreamPort *usp = CXL_USP(d);
uint32_t val;
if (pcie_doe_read_config(&usp->doe_cdat, address, len, &val)) {
return val;
}
return pci_default_read_config(d, address, len);
}
static void latch_registers(CXLUpstreamPort *usp)
{
uint32_t *reg_state = usp->cxl_cstate.crb.cache_mem_registers;
@ -119,6 +136,167 @@ static void build_dvsecs(CXLComponentState *cxl)
REG_LOC_DVSEC_REVID, dvsec);
}
static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
{
CDATObject *cdat = &CXL_USP(doe_cap->pdev)->cxl_cstate.cdat;
uint16_t ent;
void *base;
uint32_t len;
CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
CDATRsp rsp;
cxl_doe_cdat_update(&CXL_USP(doe_cap->pdev)->cxl_cstate, &error_fatal);
assert(cdat->entry_len);
/* Discard if request length mismatched */
if (pcie_doe_get_obj_len(req) <
DIV_ROUND_UP(sizeof(CDATReq), sizeof(uint32_t))) {
return false;
}
ent = req->entry_handle;
base = cdat->entry[ent].base;
len = cdat->entry[ent].length;
rsp = (CDATRsp) {
.header = {
.vendor_id = CXL_VENDOR_ID,
.data_obj_type = CXL_DOE_TABLE_ACCESS,
.reserved = 0x0,
.length = DIV_ROUND_UP((sizeof(rsp) + len), sizeof(uint32_t)),
},
.rsp_code = CXL_DOE_TAB_RSP,
.table_type = CXL_DOE_TAB_TYPE_CDAT,
.entry_handle = (ent < cdat->entry_len - 1) ?
ent + 1 : CXL_DOE_TAB_ENT_MAX,
};
memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), sizeof(uint32_t)),
base, len);
doe_cap->read_mbox_len += rsp.header.length;
return true;
}
static DOEProtocol doe_cdat_prot[] = {
{ CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
{ }
};
enum {
CXL_USP_CDAT_SSLBIS_LAT,
CXL_USP_CDAT_SSLBIS_BW,
CXL_USP_CDAT_NUM_ENTRIES
};
static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
{
g_autofree CDATSslbis *sslbis_latency = NULL;
g_autofree CDATSslbis *sslbis_bandwidth = NULL;
CXLUpstreamPort *us = CXL_USP(priv);
PCIBus *bus = &PCI_BRIDGE(us)->sec_bus;
int devfn, sslbis_size, i;
int count = 0;
uint16_t port_ids[256];
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
PCIDevice *d = bus->devices[devfn];
PCIEPort *port;
if (!d || !pci_is_express(d) || !d->exp.exp_cap) {
continue;
}
/*
* Whilst the PCI express spec doesn't allow anything other than
* downstream ports on this bus, let us be a little paranoid
*/
if (!object_dynamic_cast(OBJECT(d), TYPE_PCIE_PORT)) {
continue;
}
port = PCIE_PORT(d);
port_ids[count] = port->port;
count++;
}
/* May not yet have any ports - try again later */
if (count == 0) {
return 0;
}
sslbis_size = sizeof(CDATSslbis) + sizeof(*sslbis_latency->sslbe) * count;
sslbis_latency = g_malloc(sslbis_size);
if (!sslbis_latency) {
return -ENOMEM;
}
*sslbis_latency = (CDATSslbis) {
.sslbis_header = {
.header = {
.type = CDAT_TYPE_SSLBIS,
.length = sslbis_size,
},
.data_type = HMATLB_DATA_TYPE_ACCESS_LATENCY,
.entry_base_unit = 10000,
},
};
for (i = 0; i < count; i++) {
sslbis_latency->sslbe[i] = (CDATSslbe) {
.port_x_id = CDAT_PORT_ID_USP,
.port_y_id = port_ids[i],
.latency_bandwidth = 15, /* 150ns */
};
}
sslbis_bandwidth = g_malloc(sslbis_size);
if (!sslbis_bandwidth) {
return 0;
}
*sslbis_bandwidth = (CDATSslbis) {
.sslbis_header = {
.header = {
.type = CDAT_TYPE_SSLBIS,
.length = sslbis_size,
},
.data_type = HMATLB_DATA_TYPE_ACCESS_BANDWIDTH,
.entry_base_unit = 1000,
},
};
for (i = 0; i < count; i++) {
sslbis_bandwidth->sslbe[i] = (CDATSslbe) {
.port_x_id = CDAT_PORT_ID_USP,
.port_y_id = port_ids[i],
.latency_bandwidth = 16, /* 16 GB/s */
};
}
*cdat_table = g_malloc0(sizeof(*cdat_table) * CXL_USP_CDAT_NUM_ENTRIES);
if (!*cdat_table) {
return -ENOMEM;
}
/* Header always at start of structure */
(*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency);
(*cdat_table)[CXL_USP_CDAT_SSLBIS_BW] = g_steal_pointer(&sslbis_bandwidth);
return CXL_USP_CDAT_NUM_ENTRIES;
}
static void free_default_cdat_table(CDATSubHeader **cdat_table, int num,
void *priv)
{
int i;
for (i = 0; i < num; i++) {
g_free(cdat_table[i]);
}
g_free(cdat_table);
}
static void cxl_usp_realize(PCIDevice *d, Error **errp)
{
PCIEPort *p = PCIE_PORT(d);
@ -161,6 +339,14 @@ static void cxl_usp_realize(PCIDevice *d, Error **errp)
PCI_BASE_ADDRESS_MEM_TYPE_64,
component_bar);
pcie_doe_init(d, &usp->doe_cdat, cxl_cstate->dvsec_offset, doe_cdat_prot,
true, 1);
cxl_cstate->cdat.build_cdat_table = build_cdat_table;
cxl_cstate->cdat.free_cdat_table = free_default_cdat_table;
cxl_cstate->cdat.private = d;
cxl_doe_cdat_init(cxl_cstate, errp);
return;
err_cap:
@ -179,6 +365,11 @@ static void cxl_usp_exitfn(PCIDevice *d)
pci_bridge_exitfn(d);
}
static Property cxl_upstream_props[] = {
DEFINE_PROP_STRING("cdat", CXLUpstreamPort, cxl_cstate.cdat.filename),
DEFINE_PROP_END_OF_LIST()
};
static void cxl_upstream_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -186,6 +377,7 @@ static void cxl_upstream_class_init(ObjectClass *oc, void *data)
k->is_bridge = true;
k->config_write = cxl_usp_write_config;
k->config_read = cxl_usp_read_config;
k->realize = cxl_usp_realize;
k->exit = cxl_usp_exitfn;
k->vendor_id = 0x19e5; /* Huawei */
@ -194,6 +386,7 @@ static void cxl_upstream_class_init(ObjectClass *oc, void *data)
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->desc = "CXL Switch Upstream Port";
dc->reset = cxl_usp_reset;
device_class_set_props(dc, cxl_upstream_props);
}
static const TypeInfo cxl_usp_info = {

View File

@ -13,6 +13,7 @@ pci_ss.add(files(
# allow plugging PCIe devices into PCI buses, include them even if
# CONFIG_PCI_EXPRESS=n.
pci_ss.add(files('pcie.c', 'pcie_aer.c'))
pci_ss.add(files('pcie_doe.c'))
softmmu_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_host.c'))
softmmu_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss)

View File

@ -136,17 +136,12 @@ static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
}
}
void msix_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp)
void msix_set_mask(PCIDevice *dev, int vector, bool mask)
{
ERRP_GUARD();
unsigned offset;
bool was_masked;
if (vector > dev->msix_entries_nr) {
error_setg(errp, "msix: vector %d not allocated. max vector is %d",
vector, dev->msix_entries_nr);
return;
}
assert(vector < dev->msix_entries_nr);
offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
@ -522,7 +517,9 @@ void msix_notify(PCIDevice *dev, unsigned vector)
{
MSIMessage msg;
if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
assert(vector < dev->msix_entries_nr);
if (!dev->msix_entry_used[vector]) {
return;
}
@ -558,20 +555,17 @@ void msix_reset(PCIDevice *dev)
* don't want to follow the spec suggestion can declare all vectors as used. */
/* Mark vector as used. */
int msix_vector_use(PCIDevice *dev, unsigned vector)
void msix_vector_use(PCIDevice *dev, unsigned vector)
{
if (vector >= dev->msix_entries_nr) {
return -EINVAL;
}
assert(vector < dev->msix_entries_nr);
dev->msix_entry_used[vector]++;
return 0;
}
/* Mark vector as unused. */
void msix_vector_unuse(PCIDevice *dev, unsigned vector)
{
if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
assert(vector < dev->msix_entries_nr);
if (!dev->msix_entry_used[vector]) {
return;
}
if (--dev->msix_entry_used[vector]) {

367
hw/pci/pcie_doe.c Normal file
View File

@ -0,0 +1,367 @@
/*
* PCIe Data Object Exchange
*
* Copyright (C) 2021 Avery Design Systems, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu/range.h"
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
#include "hw/pci/pcie_doe.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#define DWORD_BYTE 4
typedef struct DoeDiscoveryReq {
DOEHeader header;
uint8_t index;
uint8_t reserved[3];
} QEMU_PACKED DoeDiscoveryReq;
typedef struct DoeDiscoveryRsp {
DOEHeader header;
uint16_t vendor_id;
uint8_t data_obj_type;
uint8_t next_index;
} QEMU_PACKED DoeDiscoveryRsp;
static bool pcie_doe_discovery(DOECap *doe_cap)
{
DoeDiscoveryReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
DoeDiscoveryRsp rsp;
uint8_t index = req->index;
DOEProtocol *prot;
/* Discard request if length does not match DoeDiscoveryReq */
if (pcie_doe_get_obj_len(req) <
DIV_ROUND_UP(sizeof(DoeDiscoveryReq), DWORD_BYTE)) {
return false;
}
rsp.header = (DOEHeader) {
.vendor_id = PCI_VENDOR_ID_PCI_SIG,
.data_obj_type = PCI_SIG_DOE_DISCOVERY,
.length = DIV_ROUND_UP(sizeof(DoeDiscoveryRsp), DWORD_BYTE),
};
/* Point to the requested protocol, index 0 must be Discovery */
if (index == 0) {
rsp.vendor_id = PCI_VENDOR_ID_PCI_SIG;
rsp.data_obj_type = PCI_SIG_DOE_DISCOVERY;
} else {
if (index < doe_cap->protocol_num) {
prot = &doe_cap->protocols[index - 1];
rsp.vendor_id = prot->vendor_id;
rsp.data_obj_type = prot->data_obj_type;
} else {
rsp.vendor_id = 0xFFFF;
rsp.data_obj_type = 0xFF;
}
}
if (index + 1 == doe_cap->protocol_num) {
rsp.next_index = 0;
} else {
rsp.next_index = index + 1;
}
pcie_doe_set_rsp(doe_cap, &rsp);
return true;
}
static void pcie_doe_reset_mbox(DOECap *st)
{
st->read_mbox_idx = 0;
st->read_mbox_len = 0;
st->write_mbox_len = 0;
memset(st->read_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
memset(st->write_mbox, 0, PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
}
void pcie_doe_init(PCIDevice *dev, DOECap *doe_cap, uint16_t offset,
DOEProtocol *protocols, bool intr, uint16_t vec)
{
pcie_add_capability(dev, PCI_EXT_CAP_ID_DOE, 0x1, offset,
PCI_DOE_SIZEOF);
doe_cap->pdev = dev;
doe_cap->offset = offset;
if (intr && (msi_present(dev) || msix_present(dev))) {
doe_cap->cap.intr = intr;
doe_cap->cap.vec = vec;
}
doe_cap->write_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
doe_cap->read_mbox = g_malloc0(PCI_DOE_DW_SIZE_MAX * DWORD_BYTE);
pcie_doe_reset_mbox(doe_cap);
doe_cap->protocols = protocols;
for (; protocols->vendor_id; protocols++) {
doe_cap->protocol_num++;
}
assert(doe_cap->protocol_num < PCI_DOE_PROTOCOL_NUM_MAX);
/* Increment to allow for the discovery protocol */
doe_cap->protocol_num++;
}
void pcie_doe_fini(DOECap *doe_cap)
{
g_free(doe_cap->read_mbox);
g_free(doe_cap->write_mbox);
g_free(doe_cap);
}
uint32_t pcie_doe_build_protocol(DOEProtocol *p)
{
return DATA_OBJ_BUILD_HEADER1(p->vendor_id, p->data_obj_type);
}
void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap)
{
return doe_cap->write_mbox;
}
/*
* Copy the response to read mailbox buffer
* This might be called in self-defined handle_request() if a DOE response is
* required in the corresponding protocol
*/
void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp)
{
uint32_t len = pcie_doe_get_obj_len(rsp);
memcpy(doe_cap->read_mbox + doe_cap->read_mbox_len, rsp, len * DWORD_BYTE);
doe_cap->read_mbox_len += len;
}
uint32_t pcie_doe_get_obj_len(void *obj)
{
uint32_t len;
if (!obj) {
return 0;
}
/* Only lower 18 bits are valid */
len = DATA_OBJ_LEN_MASK(((DOEHeader *)obj)->length);
/* PCIe r6.0 Table 6.29: a value of 00000h indicates 2^18 DW */
return (len) ? len : PCI_DOE_DW_SIZE_MAX;
}
static void pcie_doe_irq_assert(DOECap *doe_cap)
{
PCIDevice *dev = doe_cap->pdev;
if (doe_cap->cap.intr && doe_cap->ctrl.intr) {
if (doe_cap->status.intr) {
return;
}
doe_cap->status.intr = 1;
if (msix_enabled(dev)) {
msix_notify(dev, doe_cap->cap.vec);
} else if (msi_enabled(dev)) {
msi_notify(dev, doe_cap->cap.vec);
}
}
}
static void pcie_doe_set_ready(DOECap *doe_cap, bool rdy)
{
doe_cap->status.ready = rdy;
if (rdy) {
pcie_doe_irq_assert(doe_cap);
}
}
static void pcie_doe_set_error(DOECap *doe_cap, bool err)
{
doe_cap->status.error = err;
if (err) {
pcie_doe_irq_assert(doe_cap);
}
}
/*
* Check incoming request in write_mbox for protocol format
*/
static void pcie_doe_prepare_rsp(DOECap *doe_cap)
{
bool success = false;
int p;
bool (*handle_request)(DOECap *) = NULL;
if (doe_cap->status.error) {
return;
}
if (doe_cap->write_mbox[0] ==
DATA_OBJ_BUILD_HEADER1(PCI_VENDOR_ID_PCI_SIG, PCI_SIG_DOE_DISCOVERY)) {
handle_request = pcie_doe_discovery;
} else {
for (p = 0; p < doe_cap->protocol_num - 1; p++) {
if (doe_cap->write_mbox[0] ==
pcie_doe_build_protocol(&doe_cap->protocols[p])) {
handle_request = doe_cap->protocols[p].handle_request;
break;
}
}
}
/*
* PCIe r6 DOE 6.30.1:
* If the number of DW transferred does not match the
* indicated Length for a data object, then the
* data object must be silently discarded.
*/
if (handle_request && (doe_cap->write_mbox_len ==
pcie_doe_get_obj_len(pcie_doe_get_write_mbox_ptr(doe_cap)))) {
success = handle_request(doe_cap);
}
if (success) {
pcie_doe_set_ready(doe_cap, 1);
} else {
pcie_doe_reset_mbox(doe_cap);
}
}
/*
* Read from DOE config space.
* Return false if the address not within DOE_CAP range.
*/
bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size,
uint32_t *buf)
{
uint32_t shift;
uint16_t doe_offset = doe_cap->offset;
if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP,
PCI_DOE_SIZEOF - 4, addr)) {
return false;
}
addr -= doe_offset;
*buf = 0;
if (range_covers_byte(PCI_EXP_DOE_CAP, DWORD_BYTE, addr)) {
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, INTR_SUPP,
doe_cap->cap.intr);
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM,
doe_cap->cap.vec);
} else if (range_covers_byte(PCI_EXP_DOE_CTRL, DWORD_BYTE, addr)) {
/* Must return ABORT=0 and GO=0 */
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_CONTROL, DOE_INTR_EN,
doe_cap->ctrl.intr);
} else if (range_covers_byte(PCI_EXP_DOE_STATUS, DWORD_BYTE, addr)) {
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_BUSY,
doe_cap->status.busy);
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS,
doe_cap->status.intr);
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DOE_ERROR,
doe_cap->status.error);
*buf = FIELD_DP32(*buf, PCI_DOE_CAP_STATUS, DATA_OBJ_RDY,
doe_cap->status.ready);
/* Mailbox should be DW accessed */
} else if (addr == PCI_EXP_DOE_RD_DATA_MBOX && size == DWORD_BYTE) {
if (doe_cap->status.ready && !doe_cap->status.error) {
*buf = doe_cap->read_mbox[doe_cap->read_mbox_idx];
}
}
/* Process Alignment */
shift = addr % DWORD_BYTE;
*buf = extract32(*buf, shift * 8, size * 8);
return true;
}
/*
* Write to DOE config space.
* Return if the address not within DOE_CAP range or receives an abort
*/
void pcie_doe_write_config(DOECap *doe_cap,
uint32_t addr, uint32_t val, int size)
{
uint16_t doe_offset = doe_cap->offset;
uint32_t shift;
if (!range_covers_byte(doe_offset + PCI_EXP_DOE_CAP,
PCI_DOE_SIZEOF - 4, addr)) {
return;
}
/* Process Alignment */
shift = addr % DWORD_BYTE;
addr -= (doe_offset + shift);
val = deposit32(val, shift * 8, size * 8, val);
switch (addr) {
case PCI_EXP_DOE_CTRL:
if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_ABORT)) {
pcie_doe_set_ready(doe_cap, 0);
pcie_doe_set_error(doe_cap, 0);
pcie_doe_reset_mbox(doe_cap);
return;
}
if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_GO)) {
pcie_doe_prepare_rsp(doe_cap);
}
if (FIELD_EX32(val, PCI_DOE_CAP_CONTROL, DOE_INTR_EN)) {
doe_cap->ctrl.intr = 1;
/* Clear interrupt bit located within the first byte */
} else if (shift == 0) {
doe_cap->ctrl.intr = 0;
}
break;
case PCI_EXP_DOE_STATUS:
if (FIELD_EX32(val, PCI_DOE_CAP_STATUS, DOE_INTR_STATUS)) {
doe_cap->status.intr = 0;
}
break;
case PCI_EXP_DOE_RD_DATA_MBOX:
/* Mailbox should be DW accessed */
if (size != DWORD_BYTE) {
return;
}
doe_cap->read_mbox_idx++;
if (doe_cap->read_mbox_idx == doe_cap->read_mbox_len) {
pcie_doe_reset_mbox(doe_cap);
pcie_doe_set_ready(doe_cap, 0);
} else if (doe_cap->read_mbox_idx > doe_cap->read_mbox_len) {
/* Underflow */
pcie_doe_set_error(doe_cap, 1);
}
break;
case PCI_EXP_DOE_WR_DATA_MBOX:
/* Mailbox should be DW accessed */
if (size != DWORD_BYTE) {
return;
}
doe_cap->write_mbox[doe_cap->write_mbox_len] = val;
doe_cap->write_mbox_len++;
break;
case PCI_EXP_DOE_CAP:
/* fallthrough */
default:
break;
}
}

View File

@ -307,12 +307,7 @@ static int init_msix(PCIDevice *pdev)
}
for (i = 0; i < RDMA_MAX_INTRS; i++) {
rc = msix_vector_use(PCI_DEVICE(dev), i);
if (rc < 0) {
rdma_error_report("Fail mark MSI-X vector %d", i);
uninit_msix(pdev, i);
return rc;
}
msix_vector_use(PCI_DEVICE(dev), i);
}
return 0;

View File

@ -602,17 +602,10 @@ static void vfu_msix_irq_state(vfu_ctx_t *vfu_ctx, uint32_t start,
uint32_t count, bool mask)
{
VfuObject *o = vfu_get_private(vfu_ctx);
Error *err = NULL;
uint32_t vector;
for (vector = start; vector < count; vector++) {
msix_set_mask(o->pci_dev, vector, mask, &err);
if (err) {
VFU_OBJECT_ERROR(o, "vfu: %s: %s", o->device,
error_get_pretty(err));
error_free(err);
err = NULL;
}
msix_set_mask(o->pci_dev, vector, mask);
}
}

View File

@ -711,8 +711,14 @@ static void smbios_build_type_3_table(void)
static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
{
char sock_str[128];
size_t tbl_len = SMBIOS_TYPE_4_LEN_V28;
SMBIOS_BUILD_TABLE_PRE(4, T4_BASE + instance, true); /* required */
if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_64) {
tbl_len = SMBIOS_TYPE_4_LEN_V30;
}
SMBIOS_BUILD_TABLE_PRE_SIZE(4, T4_BASE + instance,
true, tbl_len); /* required */
snprintf(sock_str, sizeof(sock_str), "%s%2x", type4.sock_pfx, instance);
SMBIOS_TABLE_SET_STR(4, socket_designation_str, sock_str);
@ -739,8 +745,15 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance)
SMBIOS_TABLE_SET_STR(4, serial_number_str, type4.serial);
SMBIOS_TABLE_SET_STR(4, asset_tag_number_str, type4.asset);
SMBIOS_TABLE_SET_STR(4, part_number_str, type4.part);
t->core_count = t->core_enabled = ms->smp.cores;
t->thread_count = ms->smp.threads;
t->core_count = (ms->smp.cores > 255) ? 0xFF : ms->smp.cores;
t->core_enabled = t->core_count;
t->core_count2 = t->core_enabled2 = cpu_to_le16(ms->smp.cores);
t->thread_count = (ms->smp.threads > 255) ? 0xFF : ms->smp.threads;
t->thread_count2 = cpu_to_le16(ms->smp.threads);
t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */
t->processor_family2 = cpu_to_le16(0x01); /* Other */

View File

@ -27,6 +27,11 @@ extern unsigned smbios_table_max;
extern unsigned smbios_table_cnt;
#define SMBIOS_BUILD_TABLE_PRE(tbl_type, tbl_handle, tbl_required) \
SMBIOS_BUILD_TABLE_PRE_SIZE(tbl_type, tbl_handle, tbl_required, \
sizeof(struct smbios_type_##tbl_type))\
#define SMBIOS_BUILD_TABLE_PRE_SIZE(tbl_type, tbl_handle, \
tbl_required, tbl_len) \
struct smbios_type_##tbl_type *t; \
size_t t_off; /* table offset into smbios_tables */ \
int str_index = 0; \
@ -39,12 +44,12 @@ extern unsigned smbios_table_cnt;
/* use offset of table t within smbios_tables */ \
/* (pointer must be updated after each realloc) */ \
t_off = smbios_tables_len; \
smbios_tables_len += sizeof(*t); \
smbios_tables_len += tbl_len; \
smbios_tables = g_realloc(smbios_tables, smbios_tables_len); \
t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \
\
t->header.type = tbl_type; \
t->header.length = sizeof(*t); \
t->header.length = tbl_len; \
t->header.handle = cpu_to_le16(tbl_handle); \
} while (0)

View File

@ -578,45 +578,11 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
ram_addr_t *ram_addr, bool *read_only)
{
MemoryRegion *mr;
hwaddr xlat;
hwaddr len = iotlb->addr_mask + 1;
bool writable = iotlb->perm & IOMMU_WO;
/*
* The IOMMU TLB entry we have just covers translation through
* this IOMMU to its immediate target. We need to translate
* it the rest of the way through to memory.
*/
mr = address_space_translate(&address_space_memory,
iotlb->translated_addr,
&xlat, &len, writable,
MEMTXATTRS_UNSPECIFIED);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %"HWADDR_PRIx"",
xlat);
return false;
} else if (memory_region_has_ram_discard_manager(mr)) {
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
MemoryRegionSection tmp = {
.mr = mr,
.offset_within_region = xlat,
.size = int128_make64(len),
};
/*
* Malicious VMs can map memory into the IOMMU, which is expected
* to remain discarded. vfio will pin all pages, populating memory.
* Disallow that. vmstate priorities make sure any RamDiscardManager
* were already restored before IOMMUs are restored.
*/
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
error_report("iommu map to discarded memory (e.g., unplugged via"
" virtio-mem): %"HWADDR_PRIx"",
iotlb->translated_addr);
return false;
}
bool ret, mr_has_discard_manager;
ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
&mr_has_discard_manager);
if (ret && mr_has_discard_manager) {
/*
* Malicious VMs might trigger discarding of IOMMU-mapped memory. The
* pages will remain pinned inside vfio until unmapped, resulting in a
@ -635,29 +601,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
" intended via an IOMMU. It's possible to mitigate "
" by setting/adjusting RLIMIT_MEMLOCK.");
}
/*
* Translation truncates length to the IOMMU page size,
* check that it did not truncate too much.
*/
if (len & iotlb->addr_mask) {
error_report("iommu has granularity incompatible with target AS");
return false;
}
if (vaddr) {
*vaddr = memory_region_get_ram_ptr(mr) + xlat;
}
if (ram_addr) {
*ram_addr = memory_region_get_ram_addr(mr) + xlat;
}
if (read_only) {
*read_only = !writable || mr->readonly;
}
return true;
return ret;
}
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)

View File

@ -123,7 +123,7 @@ static void vuf_stop(VirtIODevice *vdev)
static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&fs->vhost_dev) == should_start) {
return;

View File

@ -152,7 +152,7 @@ static void vu_gpio_stop(VirtIODevice *vdev)
static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
trace_virtio_gpio_set_status(status);

View File

@ -93,7 +93,7 @@ static void vu_i2c_stop(VirtIODevice *vdev)
static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) {
return;

View File

@ -90,7 +90,7 @@ static void vu_rng_stop(VirtIODevice *vdev)
static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&rng->vhost_dev) == should_start) {
return;

View File

@ -55,7 +55,7 @@ const VhostDevConfigOps vsock_ops = {
static void vuv_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {
return;

View File

@ -81,6 +81,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
/* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
VHOST_USER_PROTOCOL_F_STATUS = 16,
VHOST_USER_PROTOCOL_F_MAX
};
@ -126,6 +127,8 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_MAX_MEM_SLOTS = 36,
VHOST_USER_ADD_MEM_REG = 37,
VHOST_USER_REM_MEM_REG = 38,
VHOST_USER_SET_STATUS = 39,
VHOST_USER_GET_STATUS = 40,
VHOST_USER_MAX
} VhostUserRequest;
@ -1452,6 +1455,43 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
return 0;
}
static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
{
return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
}
static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
{
uint64_t value;
int ret;
ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
if (ret < 0) {
return ret;
}
*status = value;
return 0;
}
static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
{
uint8_t s;
int ret;
ret = vhost_user_get_status(dev, &s);
if (ret < 0) {
return ret;
}
if ((s & status) == status) {
return 0;
}
s |= status;
return vhost_user_set_status(dev, s);
}
static int vhost_user_set_features(struct vhost_dev *dev,
uint64_t features)
{
@ -1460,6 +1500,7 @@ static int vhost_user_set_features(struct vhost_dev *dev,
* backend is actually logging changes
*/
bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
int ret;
/*
* We need to include any extra backend only feature bits that
@ -1467,9 +1508,18 @@ static int vhost_user_set_features(struct vhost_dev *dev,
* VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
* features.
*/
return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
features | dev->backend_features,
log_enabled);
if (virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_STATUS)) {
if (!ret) {
return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
}
}
return ret;
}
static int vhost_user_set_protocol_features(struct vhost_dev *dev,
@ -1543,6 +1593,11 @@ static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
n = g_ptr_array_index(u->notifiers, idx);
if (!n) {
/*
* In case notification arrive out-of-order,
* make room for current index.
*/
g_ptr_array_remove_index(u->notifiers, idx);
n = g_new0(VhostUserHostNotifier, 1);
n->idx = idx;
g_ptr_array_insert(u->notifiers, idx, n);
@ -2615,6 +2670,27 @@ void vhost_user_cleanup(VhostUserState *user)
user->chr = NULL;
}
static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
{
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_STATUS)) {
return 0;
}
/* Set device status only for last queue pair */
if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
return 0;
}
if (started) {
return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
VIRTIO_CONFIG_S_DRIVER |
VIRTIO_CONFIG_S_DRIVER_OK);
} else {
return vhost_user_set_status(dev, 0);
}
}
const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_backend_init,
@ -2649,4 +2725,5 @@ const VhostOps user_ops = {
.vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
.vhost_get_inflight_fd = vhost_user_get_inflight_fd,
.vhost_set_inflight_fd = vhost_user_set_inflight_fd,
.vhost_dev_start = vhost_user_dev_start,
};

View File

@ -70,7 +70,7 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start)
static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
bool should_start = virtio_device_started(vdev, status);
bool should_start = virtio_device_should_start(vdev, status);
int ret;
if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {

View File

@ -1081,10 +1081,10 @@ out:
return ret;
}
static int vhost_virtqueue_start(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
int vhost_virtqueue_start(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
@ -1201,10 +1201,10 @@ fail_alloc_desc:
return r;
}
static void vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
void vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
{
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {

View File

@ -27,6 +27,39 @@
#define VIRTIO_CRYPTO_VM_VERSION 1
typedef struct VirtIOCryptoSessionReq {
VirtIODevice *vdev;
VirtQueue *vq;
VirtQueueElement *elem;
CryptoDevBackendSessionInfo info;
CryptoDevCompletionFunc cb;
} VirtIOCryptoSessionReq;
static void virtio_crypto_free_create_session_req(VirtIOCryptoSessionReq *sreq)
{
switch (sreq->info.op_code) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
g_free(sreq->info.u.sym_sess_info.cipher_key);
g_free(sreq->info.u.sym_sess_info.auth_key);
break;
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
g_free(sreq->info.u.asym_sess_info.key);
break;
case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION:
break;
default:
error_report("Unknown opcode: %u", sreq->info.op_code);
}
g_free(sreq);
}
/*
* Transfer virtqueue index to crypto queue index.
* The control virtqueue is after the data virtqueues
@ -75,27 +108,24 @@ virtio_crypto_cipher_session_helper(VirtIODevice *vdev,
return 0;
}
static int64_t
static int
virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_sym_create_session_req *sess_req,
uint32_t queue_id,
uint32_t opcode,
struct iovec *iov, unsigned int out_num)
struct iovec *iov, unsigned int out_num,
VirtIOCryptoSessionReq *sreq)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendSessionInfo info;
CryptoDevBackendSymSessionInfo *sym_info;
int64_t session_id;
CryptoDevBackendSymSessionInfo *sym_info = &sreq->info.u.sym_sess_info;
int queue_index;
uint32_t op_type;
Error *local_err = NULL;
int ret;
memset(&info, 0, sizeof(info));
op_type = ldl_le_p(&sess_req->op_type);
info.op_code = opcode;
sreq->info.op_code = opcode;
sym_info = &info.u.sym_sess_info;
sym_info = &sreq->info.u.sym_sess_info;
sym_info->op_type = op_type;
if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
@ -103,7 +133,7 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
&sess_req->u.cipher.para,
&iov, &out_num);
if (ret < 0) {
goto err;
return ret;
}
} else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
size_t s;
@ -112,7 +142,7 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
&sess_req->u.chain.para.cipher_param,
&iov, &out_num);
if (ret < 0) {
goto err;
return ret;
}
/* hash part */
sym_info->alg_chain_order = ldl_le_p(
@ -129,8 +159,7 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
if (sym_info->auth_key_len > vcrypto->conf.max_auth_key_len) {
error_report("virtio-crypto length of auth key is too big: %u",
sym_info->auth_key_len);
ret = -VIRTIO_CRYPTO_ERR;
goto err;
return -VIRTIO_CRYPTO_ERR;
}
/* get auth key */
if (sym_info->auth_key_len > 0) {
@ -140,8 +169,7 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
if (unlikely(s != sym_info->auth_key_len)) {
virtio_error(vdev,
"virtio-crypto authenticated key incorrect");
ret = -EFAULT;
goto err;
return -EFAULT;
}
iov_discard_front(&iov, &out_num, sym_info->auth_key_len);
}
@ -153,49 +181,30 @@ virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
} else {
/* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
error_report("unsupported hash mode");
ret = -VIRTIO_CRYPTO_NOTSUPP;
goto err;
return -VIRTIO_CRYPTO_NOTSUPP;
}
} else {
/* VIRTIO_CRYPTO_SYM_OP_NONE */
error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE");
ret = -VIRTIO_CRYPTO_NOTSUPP;
goto err;
return -VIRTIO_CRYPTO_NOTSUPP;
}
queue_index = virtio_crypto_vq2q(queue_id);
session_id = cryptodev_backend_create_session(
vcrypto->cryptodev,
&info, queue_index, &local_err);
if (session_id >= 0) {
ret = session_id;
} else {
if (local_err) {
error_report_err(local_err);
}
ret = -VIRTIO_CRYPTO_ERR;
}
err:
g_free(sym_info->cipher_key);
g_free(sym_info->auth_key);
return ret;
return cryptodev_backend_create_session(vcrypto->cryptodev, &sreq->info,
queue_index, sreq->cb, sreq);
}
static int64_t
static int
virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_akcipher_create_session_req *sess_req,
uint32_t queue_id, uint32_t opcode,
struct iovec *iov, unsigned int out_num)
struct iovec *iov, unsigned int out_num,
VirtIOCryptoSessionReq *sreq)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
CryptoDevBackendSessionInfo info = {0};
CryptoDevBackendAsymSessionInfo *asym_info;
int64_t session_id;
CryptoDevBackendAsymSessionInfo *asym_info = &sreq->info.u.asym_sess_info;
int queue_index;
uint32_t algo, keytype, keylen;
g_autofree uint8_t *key = NULL;
Error *local_err = NULL;
algo = ldl_le_p(&sess_req->para.algo);
keytype = ldl_le_p(&sess_req->para.keytype);
@ -208,20 +217,19 @@ virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
}
if (keylen) {
key = g_malloc(keylen);
if (iov_to_buf(iov, out_num, 0, key, keylen) != keylen) {
asym_info->key = g_malloc(keylen);
if (iov_to_buf(iov, out_num, 0, asym_info->key, keylen) != keylen) {
virtio_error(vdev, "virtio-crypto asym key incorrect");
return -EFAULT;
}
iov_discard_front(&iov, &out_num, keylen);
}
info.op_code = opcode;
asym_info = &info.u.asym_sess_info;
sreq->info.op_code = opcode;
asym_info = &sreq->info.u.asym_sess_info;
asym_info->algo = algo;
asym_info->keytype = keytype;
asym_info->keylen = keylen;
asym_info->key = key;
switch (asym_info->algo) {
case VIRTIO_CRYPTO_AKCIPHER_RSA:
asym_info->u.rsa.padding_algo =
@ -237,45 +245,95 @@ virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto,
}
queue_index = virtio_crypto_vq2q(queue_id);
session_id = cryptodev_backend_create_session(vcrypto->cryptodev, &info,
queue_index, &local_err);
if (session_id < 0) {
if (local_err) {
error_report_err(local_err);
}
return -VIRTIO_CRYPTO_ERR;
}
return session_id;
return cryptodev_backend_create_session(vcrypto->cryptodev, &sreq->info,
queue_index, sreq->cb, sreq);
}
static uint8_t
static int
virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
struct virtio_crypto_destroy_session_req *close_sess_req,
uint32_t queue_id)
uint32_t queue_id,
VirtIOCryptoSessionReq *sreq)
{
int ret;
uint64_t session_id;
uint32_t status;
Error *local_err = NULL;
session_id = ldq_le_p(&close_sess_req->session_id);
DPRINTF("close session, id=%" PRIu64 "\n", session_id);
ret = cryptodev_backend_close_session(
vcrypto->cryptodev, session_id, queue_id, &local_err);
if (ret == 0) {
status = VIRTIO_CRYPTO_OK;
return cryptodev_backend_close_session(
vcrypto->cryptodev, session_id, queue_id, sreq->cb, sreq);
}
static void virtio_crypto_create_session_completion(void *opaque, int ret)
{
VirtIOCryptoSessionReq *sreq = (VirtIOCryptoSessionReq *)opaque;
VirtQueue *vq = sreq->vq;
VirtQueueElement *elem = sreq->elem;
VirtIODevice *vdev = sreq->vdev;
struct virtio_crypto_session_input input;
struct iovec *in_iov = elem->in_sg;
unsigned in_num = elem->in_num;
size_t s;
memset(&input, 0, sizeof(input));
/* Serious errors, need to reset virtio crypto device */
if (ret == -EFAULT) {
virtqueue_detach_element(vq, elem, 0);
goto out;
} else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
} else if (ret == -VIRTIO_CRYPTO_KEY_REJECTED) {
stl_le_p(&input.status, VIRTIO_CRYPTO_KEY_REJECTED);
} else if (ret != VIRTIO_CRYPTO_OK) {
stl_le_p(&input.status, VIRTIO_CRYPTO_ERR);
} else {
if (local_err) {
error_report_err(local_err);
} else {
error_report("destroy session failed");
}
status = VIRTIO_CRYPTO_ERR;
/* Set the session id */
stq_le_p(&input.session_id, sreq->info.session_id);
stl_le_p(&input.status, VIRTIO_CRYPTO_OK);
}
return status;
s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
if (unlikely(s != sizeof(input))) {
virtio_error(vdev, "virtio-crypto input incorrect");
virtqueue_detach_element(vq, elem, 0);
goto out;
}
virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq);
out:
g_free(elem);
virtio_crypto_free_create_session_req(sreq);
}
static void virtio_crypto_destroy_session_completion(void *opaque, int ret)
{
VirtIOCryptoSessionReq *sreq = (VirtIOCryptoSessionReq *)opaque;
VirtQueue *vq = sreq->vq;
VirtQueueElement *elem = sreq->elem;
VirtIODevice *vdev = sreq->vdev;
struct iovec *in_iov = elem->in_sg;
unsigned in_num = elem->in_num;
uint8_t status;
size_t s;
if (ret < 0) {
status = VIRTIO_CRYPTO_ERR;
} else {
status = VIRTIO_CRYPTO_OK;
}
s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status));
if (unlikely(s != sizeof(status))) {
virtio_error(vdev, "virtio-crypto status incorrect");
virtqueue_detach_element(vq, elem, 0);
goto out;
}
virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
out:
g_free(elem);
g_free(sreq);
}
static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
@ -283,16 +341,16 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
struct virtio_crypto_op_ctrl_req ctrl;
VirtQueueElement *elem;
struct iovec *in_iov;
struct iovec *out_iov;
unsigned in_num;
VirtIOCryptoSessionReq *sreq;
unsigned out_num;
unsigned in_num;
uint32_t queue_id;
uint32_t opcode;
struct virtio_crypto_session_input input;
int64_t session_id;
uint8_t status;
size_t s;
int ret;
struct iovec *out_iov;
struct iovec *in_iov;
for (;;) {
g_autofree struct iovec *out_iov_copy = NULL;
@ -327,44 +385,34 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
opcode = ldl_le_p(&ctrl.header.opcode);
queue_id = ldl_le_p(&ctrl.header.queue_id);
memset(&input, 0, sizeof(input));
sreq = g_new0(VirtIOCryptoSessionReq, 1);
sreq->vdev = vdev;
sreq->vq = vq;
sreq->elem = elem;
switch (opcode) {
case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
session_id = virtio_crypto_create_sym_session(vcrypto,
&ctrl.u.sym_create_session,
queue_id, opcode,
out_iov, out_num);
goto check_session;
sreq->cb = virtio_crypto_create_session_completion;
ret = virtio_crypto_create_sym_session(vcrypto,
&ctrl.u.sym_create_session,
queue_id, opcode,
out_iov, out_num,
sreq);
if (ret < 0) {
virtio_crypto_create_session_completion(sreq, ret);
}
break;
case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION:
session_id = virtio_crypto_create_asym_session(vcrypto,
sreq->cb = virtio_crypto_create_session_completion;
ret = virtio_crypto_create_asym_session(vcrypto,
&ctrl.u.akcipher_create_session,
queue_id, opcode,
out_iov, out_num);
check_session:
/* Serious errors, need to reset virtio crypto device */
if (session_id == -EFAULT) {
virtqueue_detach_element(vq, elem, 0);
break;
} else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) {
stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
} else if (session_id == -VIRTIO_CRYPTO_ERR) {
stl_le_p(&input.status, VIRTIO_CRYPTO_ERR);
} else {
/* Set the session id */
stq_le_p(&input.session_id, session_id);
stl_le_p(&input.status, VIRTIO_CRYPTO_OK);
out_iov, out_num,
sreq);
if (ret < 0) {
virtio_crypto_create_session_completion(sreq, ret);
}
s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
if (unlikely(s != sizeof(input))) {
virtio_error(vdev, "virtio-crypto input incorrect");
virtqueue_detach_element(vq, elem, 0);
break;
}
virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq);
break;
case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
@ -372,37 +420,36 @@ check_session:
case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION:
status = virtio_crypto_handle_close_session(vcrypto,
&ctrl.u.destroy_session, queue_id);
/* The status only occupy one byte, we can directly use it */
s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status));
if (unlikely(s != sizeof(status))) {
virtio_error(vdev, "virtio-crypto status incorrect");
virtqueue_detach_element(vq, elem, 0);
break;
sreq->cb = virtio_crypto_destroy_session_completion;
ret = virtio_crypto_handle_close_session(vcrypto,
&ctrl.u.destroy_session, queue_id,
sreq);
if (ret < 0) {
virtio_crypto_destroy_session_completion(sreq, ret);
}
virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
break;
case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
default:
memset(&input, 0, sizeof(input));
error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
if (unlikely(s != sizeof(input))) {
virtio_error(vdev, "virtio-crypto input incorrect");
virtqueue_detach_element(vq, elem, 0);
break;
} else {
virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq);
}
virtqueue_push(vq, elem, sizeof(input));
virtio_notify(vdev, vq);
g_free(sreq);
g_free(elem);
break;
} /* end switch case */
g_free(elem);
} /* end for loop */
}
@ -448,6 +495,7 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
}
}
g_free(req->in_iov);
g_free(req);
}
@ -458,6 +506,7 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
CryptoDevBackendSymOpInfo *sym_op_info)
{
size_t s, len;
struct iovec *in_iov = req->in_iov;
if (status != VIRTIO_CRYPTO_OK) {
return;
@ -465,18 +514,18 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
len = sym_op_info->src_len;
/* Save the cipher result */
s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
s = iov_from_buf(in_iov, req->in_num, 0, sym_op_info->dst, len);
if (s != len) {
virtio_error(vdev, "virtio-crypto dest data incorrect");
return;
}
iov_discard_front(&req->in_iov, &req->in_num, len);
iov_discard_front(&in_iov, &req->in_num, len);
if (sym_op_info->op_type ==
VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
/* Save the digest result */
s = iov_from_buf(req->in_iov, req->in_num, 0,
s = iov_from_buf(in_iov, req->in_num, 0,
sym_op_info->digest_result,
sym_op_info->digest_result_len);
if (s != sym_op_info->digest_result_len) {
@ -491,6 +540,7 @@ virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev,
CryptoDevBackendAsymOpInfo *asym_op_info)
{
size_t s, len;
struct iovec *in_iov = req->in_iov;
if (status != VIRTIO_CRYPTO_OK) {
return;
@ -501,23 +551,24 @@ virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev,
return;
}
s = iov_from_buf(req->in_iov, req->in_num, 0, asym_op_info->dst, len);
s = iov_from_buf(in_iov, req->in_num, 0, asym_op_info->dst, len);
if (s != len) {
virtio_error(vdev, "virtio-crypto asym dest data incorrect");
return;
}
iov_discard_front(&req->in_iov, &req->in_num, len);
iov_discard_front(&in_iov, &req->in_num, len);
/* For akcipher, dst_len may be changed after operation */
req->in_len = sizeof(struct virtio_crypto_inhdr) + asym_op_info->dst_len;
}
static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
static void virtio_crypto_req_complete(void *opaque, int ret)
{
VirtIOCryptoReq *req = (VirtIOCryptoReq *)opaque;
VirtIOCrypto *vcrypto = req->vcrypto;
VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
uint8_t status = -ret;
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
virtio_crypto_sym_input_data_helper(vdev, req, status,
@ -529,6 +580,7 @@ static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
stb_p(&req->in->status, status);
virtqueue_push(req->vq, &req->elem, req->in_len);
virtio_notify(vdev, req->vq);
virtio_crypto_free_request(req);
}
static VirtIOCryptoReq *
@ -773,9 +825,7 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
unsigned in_num;
unsigned out_num;
uint32_t opcode;
uint8_t status = VIRTIO_CRYPTO_ERR;
CryptoDevBackendOpInfo *op_info = &request->op_info;
Error *local_err = NULL;
if (elem->out_num < 1 || elem->in_num < 1) {
virtio_error(vdev, "virtio-crypto dataq missing headers");
@ -815,6 +865,8 @@ virtio_crypto_handle_request(VirtIOCryptoReq *request)
*/
request->in_num = in_num;
request->in_iov = in_iov;
/* now, we free the in_iov_copy inside virtio_crypto_free_request */
in_iov_copy = NULL;
opcode = ldl_le_p(&req.header.opcode);
op_info->session_id = ldq_le_p(&req.header.session_id);
@ -843,23 +895,15 @@ check_result:
if (ret == -EFAULT) {
return -1;
} else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
virtio_crypto_free_request(request);
virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP);
} else {
/* Set request's parameter */
ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
request, queue_index, &local_err);
request, queue_index,
virtio_crypto_req_complete,
request);
if (ret < 0) {
status = -ret;
if (local_err) {
error_report_err(local_err);
}
} else { /* ret == VIRTIO_CRYPTO_OK */
status = ret;
virtio_crypto_req_complete(request, ret);
}
virtio_crypto_req_complete(request, status);
virtio_crypto_free_request(request);
}
break;
@ -870,8 +914,7 @@ check_result:
default:
error_report("virtio-crypto unsupported dataq opcode: %u",
opcode);
virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
virtio_crypto_free_request(request);
virtio_crypto_req_complete(request, -VIRTIO_CRYPTO_NOTSUPP);
}
return 0;
@ -1011,7 +1054,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
vcrypto->vqs[i].vcrypto = vcrypto;
}
vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
vcrypto->ctrl_vq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_ctrl);
if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
} else {

View File

@ -17,6 +17,7 @@
#include "hw/qdev-properties-system.h"
#include "qapi/error.h"
#include "hw/boards.h"
#include "hw/pci/pci_bus.h"
#include "qom/object.h"
typedef struct VirtIOIOMMUPCI VirtIOIOMMUPCI;
@ -44,6 +45,7 @@ static Property virtio_iommu_pci_properties[] = {
static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VirtIOIOMMUPCI *dev = VIRTIO_IOMMU_PCI(vpci_dev);
PCIBus *pbus = pci_get_bus(&vpci_dev->pci_dev);
DeviceState *vdev = DEVICE(&dev->vdev);
VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
@ -57,11 +59,17 @@ static void virtio_iommu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
s->reserved_regions[i].type != VIRTIO_IOMMU_RESV_MEM_T_MSI) {
error_setg(errp, "reserved region %d has an invalid type", i);
error_append_hint(errp, "Valid values are 0 and 1\n");
return;
}
}
if (!pci_bus_is_root(pbus)) {
error_setg(errp, "virtio-iommu-pci must be plugged on the root bus");
return;
}
object_property_set_link(OBJECT(dev), "primary-bus",
OBJECT(pci_get_bus(&vpci_dev->pci_dev)),
&error_abort);
OBJECT(pbus), &error_abort);
virtio_pci_force_virtio_1(vpci_dev);
qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
}

View File

@ -71,9 +71,11 @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
if (msix_enabled(&proxy->pci_dev))
msix_notify(&proxy->pci_dev, vector);
else {
if (msix_enabled(&proxy->pci_dev)) {
if (vector != VIRTIO_NO_VECTOR) {
msix_notify(&proxy->pci_dev, vector);
}
} else {
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1);
}
@ -175,6 +177,7 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
uint16_t vector;
int ret;
ret = pci_device_load(&proxy->pci_dev, f);
@ -184,12 +187,17 @@ static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
msix_unuse_all_vectors(&proxy->pci_dev);
msix_load(&proxy->pci_dev, f);
if (msix_present(&proxy->pci_dev)) {
qemu_get_be16s(f, &vdev->config_vector);
qemu_get_be16s(f, &vector);
if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
return -EINVAL;
}
} else {
vdev->config_vector = VIRTIO_NO_VECTOR;
vector = VIRTIO_NO_VECTOR;
}
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
vdev->config_vector = vector;
if (vector != VIRTIO_NO_VECTOR) {
msix_vector_use(&proxy->pci_dev, vector);
}
return 0;
}
@ -202,12 +210,15 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
uint16_t vector;
if (msix_present(&proxy->pci_dev)) {
qemu_get_be16s(f, &vector);
if (vector != VIRTIO_NO_VECTOR && vector >= proxy->nvectors) {
return -EINVAL;
}
} else {
vector = VIRTIO_NO_VECTOR;
}
virtio_queue_set_vector(vdev, n, vector);
if (vector != VIRTIO_NO_VECTOR) {
return msix_vector_use(&proxy->pci_dev, vector);
msix_vector_use(&proxy->pci_dev, vector);
}
return 0;
@ -299,6 +310,7 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
uint16_t vector;
hwaddr pa;
switch (addr) {
@ -352,18 +364,28 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
}
break;
case VIRTIO_MSI_CONFIG_VECTOR:
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
}
/* Make it possible for guest to discover an error took place. */
if (msix_vector_use(&proxy->pci_dev, val) < 0)
if (val < proxy->nvectors) {
msix_vector_use(&proxy->pci_dev, val);
} else {
val = VIRTIO_NO_VECTOR;
}
vdev->config_vector = val;
break;
case VIRTIO_MSI_QUEUE_VECTOR:
msix_vector_unuse(&proxy->pci_dev,
virtio_queue_vector(vdev, vdev->queue_sel));
vector = virtio_queue_vector(vdev, vdev->queue_sel);
if (vector != VIRTIO_NO_VECTOR) {
msix_vector_unuse(&proxy->pci_dev, vector);
}
/* Make it possible for guest to discover an error took place. */
if (msix_vector_use(&proxy->pci_dev, val) < 0)
if (val < proxy->nvectors) {
msix_vector_use(&proxy->pci_dev, val);
} else {
val = VIRTIO_NO_VECTOR;
}
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
break;
default:
@ -1251,6 +1273,9 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
case VIRTIO_PCI_COMMON_Q_USEDHI:
val = proxy->vqs[vdev->queue_sel].used[1];
break;
case VIRTIO_PCI_COMMON_Q_RESET:
val = proxy->vqs[vdev->queue_sel].reset;
break;
default:
val = 0;
}
@ -1263,6 +1288,7 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
uint16_t vector;
if (vdev == NULL) {
return;
@ -1284,9 +1310,13 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
}
break;
case VIRTIO_PCI_COMMON_MSIX:
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
if (vdev->config_vector != VIRTIO_NO_VECTOR) {
msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
}
/* Make it possible for guest to discover an error took place. */
if (msix_vector_use(&proxy->pci_dev, val) < 0) {
if (val < proxy->nvectors) {
msix_vector_use(&proxy->pci_dev, val);
} else {
val = VIRTIO_NO_VECTOR;
}
vdev->config_vector = val;
@ -1318,10 +1348,14 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
proxy->vqs[vdev->queue_sel].num);
break;
case VIRTIO_PCI_COMMON_Q_MSIX:
msix_vector_unuse(&proxy->pci_dev,
virtio_queue_vector(vdev, vdev->queue_sel));
vector = virtio_queue_vector(vdev, vdev->queue_sel);
if (vector != VIRTIO_NO_VECTOR) {
msix_vector_unuse(&proxy->pci_dev, vector);
}
/* Make it possible for guest to discover an error took place. */
if (msix_vector_use(&proxy->pci_dev, val) < 0) {
if (val < proxy->nvectors) {
msix_vector_use(&proxy->pci_dev, val);
} else {
val = VIRTIO_NO_VECTOR;
}
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
@ -1338,6 +1372,8 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
proxy->vqs[vdev->queue_sel].used[0]);
proxy->vqs[vdev->queue_sel].enabled = 1;
proxy->vqs[vdev->queue_sel].reset = 0;
virtio_queue_enable(vdev, vdev->queue_sel);
} else {
virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val);
}
@ -1360,6 +1396,16 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
case VIRTIO_PCI_COMMON_Q_USEDHI:
proxy->vqs[vdev->queue_sel].used[1] = val;
break;
case VIRTIO_PCI_COMMON_Q_RESET:
if (val == 1) {
proxy->vqs[vdev->queue_sel].reset = 1;
virtio_queue_reset(vdev, vdev->queue_sel);
proxy->vqs[vdev->queue_sel].reset = 0;
proxy->vqs[vdev->queue_sel].enabled = 0;
}
break;
default:
break;
}
@ -1954,6 +2000,7 @@ static void virtio_pci_reset(DeviceState *qdev)
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
proxy->vqs[i].enabled = 0;
proxy->vqs[i].reset = 0;
proxy->vqs[i].num = 0;
proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;

View File

@ -13,6 +13,7 @@
#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-rng.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/module.h"
#include "qom/object.h"
@ -31,11 +32,23 @@ struct VirtIORngPCI {
VirtIORNG vdev;
};
static Property virtio_rng_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
DEV_NVECTORS_UNSPECIFIED),
DEFINE_PROP_END_OF_LIST(),
};
static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&vrng->vdev);
if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
vpci_dev->nvectors = 2;
}
if (!qdev_realize(vdev, BUS(&vpci_dev->bus), errp)) {
return;
}
@ -54,6 +67,7 @@ static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
pcidev_k->class_id = PCI_CLASS_OTHERS;
device_class_set_props(dc, virtio_rng_properties);
}
static void virtio_rng_initfn(Object *obj)

View File

@ -2464,6 +2464,51 @@ static enum virtio_device_endian virtio_current_cpu_endian(void)
}
}
static void __virtio_queue_reset(VirtIODevice *vdev, uint32_t i)
{
vdev->vq[i].vring.desc = 0;
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].used_idx = 0;
vdev->vq[i].last_avail_wrap_counter = true;
vdev->vq[i].shadow_avail_wrap_counter = true;
vdev->vq[i].used_wrap_counter = true;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (k->queue_reset) {
k->queue_reset(vdev, queue_index);
}
__virtio_queue_reset(vdev, queue_index);
}
void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
error_report("queue_enable is only suppported in devices of virtio "
"1.0 or later.");
}
if (k->queue_enable) {
k->queue_enable(vdev, queue_index);
}
}
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
@ -2495,22 +2540,7 @@ void virtio_reset(void *opaque)
virtio_notify_vector(vdev, vdev->config_vector);
for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
vdev->vq[i].vring.desc = 0;
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].used_idx = 0;
vdev->vq[i].last_avail_wrap_counter = true;
vdev->vq[i].shadow_avail_wrap_counter = true;
vdev->vq[i].used_wrap_counter = true;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
__virtio_queue_reset(vdev, i);
}
}

View File

@ -153,6 +153,27 @@ int qcrypto_akcipher_max_dgst_len(QCryptoAkCipher *akcipher);
*/
void qcrypto_akcipher_free(QCryptoAkCipher *akcipher);
/**
* qcrypto_akcipher_export_p8info:
* @opts: the options of the akcipher to be exported.
* @key: the original key of the akcipher to be exported.
* @keylen: length of the 'key'
* @dst: output parameter, if export succeed, *dst is set to the
* PKCS#8 encoded private key, caller MUST free this key with
* g_free after use.
* @dst_len: output parameter, indicates the length of PKCS#8 encoded
* key.
*
* Export the akcipher into DER encoded pkcs#8 private key info, expects
* |key| stores a valid asymmetric PRIVATE key.
*
* Returns: 0 for succeed, otherwise -1 is returned.
*/
int qcrypto_akcipher_export_p8info(const QCryptoAkCipherOptions *opts,
uint8_t *key, size_t keylen,
uint8_t **dst, size_t *dst_len,
Error **errp);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoAkCipher, qcrypto_akcipher_free)
#endif /* QCRYPTO_AKCIPHER_H */

View File

@ -713,6 +713,10 @@ void ram_discard_manager_register_listener(RamDiscardManager *rdm,
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
RamDiscardListener *rdl);
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
ram_addr_t *ram_addr, bool *read_only,
bool *mr_has_discard_manager);
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;

View File

@ -29,11 +29,20 @@ struct AcpiDevAmlIfClass {
dev_aml_fn build_dev_aml;
};
static inline void call_dev_aml_func(DeviceState *dev, Aml *scope)
static inline dev_aml_fn get_dev_aml_func(DeviceState *dev)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_ACPI_DEV_AML_IF)) {
AcpiDevAmlIfClass *klass = ACPI_DEV_AML_IF_GET_CLASS(dev);
klass->build_dev_aml(ACPI_DEV_AML_IF(dev), scope);
return klass->build_dev_aml;
}
return NULL;
}
static inline void call_dev_aml_func(DeviceState *dev, Aml *scope)
{
dev_aml_fn fn = get_dev_aml_func(dev);
if (fn) {
fn(ACPI_DEV_AML_IF(dev), scope);
}
}

166
include/hw/cxl/cxl_cdat.h Normal file
View File

@ -0,0 +1,166 @@
/*
* CXL CDAT Structure
*
* Copyright (C) 2021 Avery Design Systems, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef CXL_CDAT_H
#define CXL_CDAT_H
#include "hw/cxl/cxl_pci.h"
/*
* Reference:
* Coherent Device Attribute Table (CDAT) Specification, Rev. 1.03, July. 2022
* Compute Express Link (CXL) Specification, Rev. 3.0, Aug. 2022
*/
/* Table Access DOE - CXL r3.0 8.1.11 */
#define CXL_DOE_TABLE_ACCESS 2
#define CXL_DOE_PROTOCOL_CDAT ((CXL_DOE_TABLE_ACCESS << 16) | CXL_VENDOR_ID)
/* Read Entry - CXL r3.0 8.1.11.1 */
#define CXL_DOE_TAB_TYPE_CDAT 0
#define CXL_DOE_TAB_ENT_MAX 0xFFFF
/* Read Entry Request - CXL r3.0 8.1.11.1 Table 8-13 */
#define CXL_DOE_TAB_REQ 0
typedef struct CDATReq {
DOEHeader header;
uint8_t req_code;
uint8_t table_type;
uint16_t entry_handle;
} QEMU_PACKED CDATReq;
/* Read Entry Response - CXL r3.0 8.1.11.1 Table 8-14 */
#define CXL_DOE_TAB_RSP 0
typedef struct CDATRsp {
DOEHeader header;
uint8_t rsp_code;
uint8_t table_type;
uint16_t entry_handle;
} QEMU_PACKED CDATRsp;
/* CDAT Table Format - CDAT Table 1 */
#define CXL_CDAT_REV 2
typedef struct CDATTableHeader {
uint32_t length;
uint8_t revision;
uint8_t checksum;
uint8_t reserved[6];
uint32_t sequence;
} QEMU_PACKED CDATTableHeader;
/* CDAT Structure Types - CDAT Table 2 */
typedef enum {
CDAT_TYPE_DSMAS = 0,
CDAT_TYPE_DSLBIS = 1,
CDAT_TYPE_DSMSCIS = 2,
CDAT_TYPE_DSIS = 3,
CDAT_TYPE_DSEMTS = 4,
CDAT_TYPE_SSLBIS = 5,
} CDATType;
typedef struct CDATSubHeader {
uint8_t type;
uint8_t reserved;
uint16_t length;
} CDATSubHeader;
/* Device Scoped Memory Affinity Structure - CDAT Table 3 */
typedef struct CDATDsmas {
CDATSubHeader header;
uint8_t DSMADhandle;
uint8_t flags;
#define CDAT_DSMAS_FLAG_NV (1 << 2)
#define CDAT_DSMAS_FLAG_SHAREABLE (1 << 3)
#define CDAT_DSMAS_FLAG_HW_COHERENT (1 << 4)
#define CDAT_DSMAS_FLAG_DYNAMIC_CAP (1 << 5)
uint16_t reserved;
uint64_t DPA_base;
uint64_t DPA_length;
} QEMU_PACKED CDATDsmas;
/* Device Scoped Latency and Bandwidth Information Structure - CDAT Table 5 */
typedef struct CDATDslbis {
CDATSubHeader header;
uint8_t handle;
/* Definitions of these fields refer directly to HMAT fields */
uint8_t flags;
uint8_t data_type;
uint8_t reserved;
uint64_t entry_base_unit;
uint16_t entry[3];
uint16_t reserved2;
} QEMU_PACKED CDATDslbis;
/* Device Scoped Memory Side Cache Information Structure - CDAT Table 6 */
typedef struct CDATDsmscis {
CDATSubHeader header;
uint8_t DSMAS_handle;
uint8_t reserved[3];
uint64_t memory_side_cache_size;
uint32_t cache_attributes;
} QEMU_PACKED CDATDsmscis;
/* Device Scoped Initiator Structure - CDAT Table 7 */
typedef struct CDATDsis {
CDATSubHeader header;
uint8_t flags;
uint8_t handle;
uint16_t reserved;
} QEMU_PACKED CDATDsis;
/* Device Scoped EFI Memory Type Structure - CDAT Table 8 */
typedef struct CDATDsemts {
CDATSubHeader header;
uint8_t DSMAS_handle;
uint8_t EFI_memory_type_attr;
uint16_t reserved;
uint64_t DPA_offset;
uint64_t DPA_length;
} QEMU_PACKED CDATDsemts;
/* Switch Scoped Latency and Bandwidth Information Structure - CDAT Table 9 */
typedef struct CDATSslbisHeader {
CDATSubHeader header;
uint8_t data_type;
uint8_t reserved[3];
uint64_t entry_base_unit;
} QEMU_PACKED CDATSslbisHeader;
#define CDAT_PORT_ID_USP 0x100
/* Switch Scoped Latency and Bandwidth Entry - CDAT Table 10 */
typedef struct CDATSslbe {
uint16_t port_x_id;
uint16_t port_y_id;
uint16_t latency_bandwidth;
uint16_t reserved;
} QEMU_PACKED CDATSslbe;
typedef struct CDATSslbis {
CDATSslbisHeader sslbis_header;
CDATSslbe sslbe[];
} QEMU_PACKED CDATSslbis;
typedef struct CDATEntry {
void *base;
uint32_t length;
} CDATEntry;
typedef struct CDATObject {
CDATEntry *entry;
int entry_len;
int (*build_cdat_table)(CDATSubHeader ***cdat_table, void *priv);
void (*free_cdat_table)(CDATSubHeader **cdat_table, int num, void *priv);
bool to_update;
void *private;
char *filename;
uint8_t *buf;
struct CDATSubHeader **built_buf;
int built_buf_len;
} CDATObject;
#endif /* CXL_CDAT_H */

View File

@ -19,6 +19,7 @@
#include "qemu/range.h"
#include "qemu/typedefs.h"
#include "hw/register.h"
#include "qapi/error.h"
enum reg_type {
CXL2_DEVICE,
@ -184,6 +185,8 @@ typedef struct cxl_component {
struct PCIDevice *pdev;
};
};
CDATObject cdat;
} CXLComponentState;
void cxl_component_register_block_init(Object *obj,
@ -220,4 +223,8 @@ static inline hwaddr cxl_decode_ig(int ig)
CXLComponentState *cxl_get_hb_cstate(PCIHostState *hb);
void cxl_doe_cdat_init(CXLComponentState *cxl_cstate, Error **errp);
void cxl_doe_cdat_release(CXLComponentState *cxl_cstate);
void cxl_doe_cdat_update(CXLComponentState *cxl_cstate, Error **errp);
#endif

View File

@ -243,6 +243,9 @@ struct CXLType3Dev {
AddressSpace hostmem_as;
CXLComponentState cxl_cstate;
CXLDeviceState cxl_dstate;
/* DOE */
DOECap doe_cdat;
};
#define TYPE_CXL_TYPE3 "cxl-type3"

View File

@ -13,6 +13,7 @@
#include "qemu/compiler.h"
#include "hw/pci/pci.h"
#include "hw/pci/pcie.h"
#include "hw/cxl/cxl_cdat.h"
#define CXL_VENDOR_ID 0x1e98

View File

@ -18,6 +18,8 @@
#define SMBIOS_MAX_TYPE 127
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
/* memory area description, used by type 19 table */
struct smbios_phys_mem_area {
@ -187,8 +189,18 @@ struct smbios_type_4 {
uint8_t thread_count;
uint16_t processor_characteristics;
uint16_t processor_family2;
/* SMBIOS spec 3.0.0, Table 21 */
uint16_t core_count2;
uint16_t core_enabled2;
uint16_t thread_count2;
} QEMU_PACKED;
typedef enum smbios_type_4_len_ver {
SMBIOS_TYPE_4_LEN_V28 = offsetofend(struct smbios_type_4,
processor_family2),
SMBIOS_TYPE_4_LEN_V30 = offsetofend(struct smbios_type_4, thread_count2),
} smbios_type_4_len_ver;
/* SMBIOS type 8 - Port Connector Information */
struct smbios_type_8 {
struct smbios_structure_header header;

View File

@ -58,7 +58,6 @@ typedef struct VTDContextEntry VTDContextEntry;
typedef struct VTDContextCacheEntry VTDContextCacheEntry;
typedef struct VTDAddressSpace VTDAddressSpace;
typedef struct VTDIOTLBEntry VTDIOTLBEntry;
typedef struct VTDBus VTDBus;
typedef union VTD_IR_TableEntry VTD_IR_TableEntry;
typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress;
typedef struct VTDPASIDDirEntry VTDPASIDDirEntry;
@ -98,11 +97,13 @@ struct VTDPASIDEntry {
struct VTDAddressSpace {
PCIBus *bus;
uint8_t devfn;
uint32_t pasid;
AddressSpace as;
IOMMUMemoryRegion iommu;
MemoryRegion root; /* The root container of the device */
MemoryRegion nodmar; /* The alias of shared nodmar MR */
MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */
MemoryRegion iommu_ir_fault; /* Interrupt region for catching fault */
IntelIOMMUState *iommu_state;
VTDContextCacheEntry context_cache_entry;
QLIST_ENTRY(VTDAddressSpace) next;
@ -111,15 +112,10 @@ struct VTDAddressSpace {
IOVATree *iova_tree; /* Traces mapped IOVA ranges */
};
struct VTDBus {
PCIBus* bus; /* A reference to the bus to provide translation for */
/* A table of VTDAddressSpace objects indexed by devfn */
VTDAddressSpace *dev_as[];
};
struct VTDIOTLBEntry {
uint64_t gfn;
uint16_t domain_id;
uint32_t pasid;
uint64_t slpte;
uint64_t mask;
uint8_t access_flags;
@ -253,8 +249,8 @@ struct IntelIOMMUState {
uint32_t context_cache_gen; /* Should be in [1,MAX] */
GHashTable *iotlb; /* IOTLB */
GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */
VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */
GHashTable *vtd_address_spaces; /* VTD address spaces */
VTDAddressSpace *vtd_as_cache[VTD_PCI_BUS_MAX]; /* VTD address space cache */
/* list of registered notifiers */
QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers;
@ -268,6 +264,7 @@ struct IntelIOMMUState {
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
bool dma_drain; /* Whether DMA r/w draining enabled */
bool dma_translation; /* Whether DMA translation supported */
bool pasid; /* Whether to support PASID */
/*
* Protects IOMMU states in general. Currently it protects the
@ -279,6 +276,7 @@ struct IntelIOMMUState {
/* Find the VTD Address space associated with the given bus pointer,
* create a new one if none exists
*/
VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn);
VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus,
int devfn, unsigned int pasid);
#endif

View File

@ -33,10 +33,10 @@ bool msix_is_masked(PCIDevice *dev, unsigned vector);
void msix_set_pending(PCIDevice *dev, unsigned vector);
void msix_clr_pending(PCIDevice *dev, int vector);
int msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_unuse(PCIDevice *dev, unsigned vector);
void msix_unuse_all_vectors(PCIDevice *dev);
void msix_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp);
void msix_set_mask(PCIDevice *dev, int vector, bool mask);
void msix_notify(PCIDevice *dev, unsigned vector);

View File

@ -28,6 +28,8 @@ enum PCIBusFlags {
PCI_BUS_CXL = 0x0004,
};
#define PCI_NO_PASID UINT32_MAX
struct PCIBus {
BusState qbus;
enum PCIBusFlags flags;

View File

@ -157,6 +157,9 @@
/* Vendors and devices. Sort key: vendor first, device next. */
/* Ref: PCIe r6.0 Table 6-32 */
#define PCI_VENDOR_ID_PCI_SIG 0x0001
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C810 0x0001
#define PCI_DEVICE_ID_LSI_53C895A 0x0012

View File

@ -26,6 +26,7 @@
#include "hw/pci/pcie_aer.h"
#include "hw/pci/pcie_sriov.h"
#include "hw/hotplug.h"
#include "hw/pci/pcie_doe.h"
typedef enum {
/* for attention and power indicator */

123
include/hw/pci/pcie_doe.h Normal file
View File

@ -0,0 +1,123 @@
/*
* PCIe Data Object Exchange
*
* Copyright (C) 2021 Avery Design Systems, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef PCIE_DOE_H
#define PCIE_DOE_H
#include "qemu/range.h"
#include "qemu/typedefs.h"
#include "hw/register.h"
/*
* Reference:
* PCIe r6.0 - 7.9.24 Data Object Exchange Extended Capability
*/
/* Capabilities Register - r6.0 7.9.24.2 */
#define PCI_EXP_DOE_CAP 0x04
REG32(PCI_DOE_CAP_REG, 0)
FIELD(PCI_DOE_CAP_REG, INTR_SUPP, 0, 1)
FIELD(PCI_DOE_CAP_REG, DOE_INTR_MSG_NUM, 1, 11)
/* Control Register - r6.0 7.9.24.3 */
#define PCI_EXP_DOE_CTRL 0x08
REG32(PCI_DOE_CAP_CONTROL, 0)
FIELD(PCI_DOE_CAP_CONTROL, DOE_ABORT, 0, 1)
FIELD(PCI_DOE_CAP_CONTROL, DOE_INTR_EN, 1, 1)
FIELD(PCI_DOE_CAP_CONTROL, DOE_GO, 31, 1)
/* Status Register - r6.0 7.9.24.4 */
#define PCI_EXP_DOE_STATUS 0x0c
REG32(PCI_DOE_CAP_STATUS, 0)
FIELD(PCI_DOE_CAP_STATUS, DOE_BUSY, 0, 1)
FIELD(PCI_DOE_CAP_STATUS, DOE_INTR_STATUS, 1, 1)
FIELD(PCI_DOE_CAP_STATUS, DOE_ERROR, 2, 1)
FIELD(PCI_DOE_CAP_STATUS, DATA_OBJ_RDY, 31, 1)
/* Write Data Mailbox Register - r6.0 7.9.24.5 */
#define PCI_EXP_DOE_WR_DATA_MBOX 0x10
/* Read Data Mailbox Register - 7.9.xx.6 */
#define PCI_EXP_DOE_RD_DATA_MBOX 0x14
/* PCI-SIG defined Data Object Types - r6.0 Table 6-32 */
#define PCI_SIG_DOE_DISCOVERY 0x00
#define PCI_DOE_DW_SIZE_MAX (1 << 18)
#define PCI_DOE_PROTOCOL_NUM_MAX 256
#define DATA_OBJ_BUILD_HEADER1(v, p) (((p) << 16) | (v))
#define DATA_OBJ_LEN_MASK(len) ((len) & (PCI_DOE_DW_SIZE_MAX - 1))
typedef struct DOEHeader DOEHeader;
typedef struct DOEProtocol DOEProtocol;
typedef struct DOECap DOECap;
struct DOEHeader {
uint16_t vendor_id;
uint8_t data_obj_type;
uint8_t reserved;
uint32_t length;
} QEMU_PACKED;
/* Protocol infos and rsp function callback */
struct DOEProtocol {
uint16_t vendor_id;
uint8_t data_obj_type;
bool (*handle_request)(DOECap *);
};
struct DOECap {
/* Owner */
PCIDevice *pdev;
uint16_t offset;
struct {
bool intr;
uint16_t vec;
} cap;
struct {
bool abort;
bool intr;
bool go;
} ctrl;
struct {
bool busy;
bool intr;
bool error;
bool ready;
} status;
uint32_t *write_mbox;
uint32_t *read_mbox;
/* Mailbox position indicator */
uint32_t read_mbox_idx;
uint32_t read_mbox_len;
uint32_t write_mbox_len;
/* Protocols and its callback response */
DOEProtocol *protocols;
uint16_t protocol_num;
};
void pcie_doe_init(PCIDevice *pdev, DOECap *doe_cap, uint16_t offset,
DOEProtocol *protocols, bool intr, uint16_t vec);
void pcie_doe_fini(DOECap *doe_cap);
bool pcie_doe_read_config(DOECap *doe_cap, uint32_t addr, int size,
uint32_t *buf);
void pcie_doe_write_config(DOECap *doe_cap, uint32_t addr,
uint32_t val, int size);
uint32_t pcie_doe_build_protocol(DOEProtocol *p);
void *pcie_doe_get_write_mbox_ptr(DOECap *doe_cap);
void pcie_doe_set_rsp(DOECap *doe_cap, void *rsp);
uint32_t pcie_doe_get_obj_len(void *obj);
#endif /* PCIE_DOE_H */

View File

@ -179,4 +179,8 @@ typedef enum PCIExpLinkWidth {
#define PCI_ACS_VER 0x1
#define PCI_ACS_SIZEOF 8
/* DOE Capability Register Fields */
#define PCI_DOE_VER 0x1
#define PCI_DOE_SIZEOF 24
#endif /* QEMU_PCIE_REGS_H */

View File

@ -297,6 +297,11 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev,
struct vhost_virtqueue *vq, unsigned idx);
void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev,
struct vhost_virtqueue *vq, unsigned idx);
void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
void vhost_dev_free_inflight(struct vhost_inflight *inflight);
void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);

View File

@ -117,6 +117,11 @@ typedef struct VirtIOPCIRegion {
typedef struct VirtIOPCIQueue {
uint16_t num;
bool enabled;
/*
* No need to migrate the reset status, because it is always 0
* when the migration starts.
*/
bool reset;
uint32_t desc[2];
uint32_t avail[2];
uint32_t used[2];

View File

@ -148,6 +148,8 @@ struct VirtioDeviceClass {
void (*set_config)(VirtIODevice *vdev, const uint8_t *config);
void (*reset)(VirtIODevice *vdev);
void (*set_status)(VirtIODevice *vdev, uint8_t val);
void (*queue_reset)(VirtIODevice *vdev, uint32_t queue_index);
void (*queue_enable)(VirtIODevice *vdev, uint32_t queue_index);
/* For transitional devices, this is a bitmap of features
* that are only exposed on the legacy interface but not
* the modern one.
@ -286,6 +288,8 @@ int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
MemoryRegion *mr, bool assign);
int virtio_set_status(VirtIODevice *vdev, uint8_t val);
void virtio_reset(void *opaque);
void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index);
void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index);
void virtio_update_irq(VirtIODevice *vdev);
int virtio_set_features(VirtIODevice *vdev, uint64_t val);
@ -309,7 +313,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
VIRTIO_F_IOMMU_PLATFORM, false), \
DEFINE_PROP_BIT64("packed", _state, _field, \
VIRTIO_F_RING_PACKED, false)
VIRTIO_F_RING_PACKED, false), \
DEFINE_PROP_BIT64("queue_reset", _state, _field, \
VIRTIO_F_RING_RESET, true)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n);
@ -389,6 +395,24 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
return vdev->started;
}
return status & VIRTIO_CONFIG_S_DRIVER_OK;
}
/**
* virtio_device_should_start() - check if device startable
* @vdev - the VirtIO device
* @status - the devices status bits
*
* This is similar to virtio_device_started() but also encapsulates a
* check on the VM status which would prevent a device starting
* anyway.
*/
static inline bool virtio_device_should_start(VirtIODevice *vdev, uint8_t status)
{
if (vdev->use_started) {
return vdev->started;
}
if (!vdev->vm_running) {
return false;
}

View File

@ -48,4 +48,8 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net);
int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
int vq_index);
int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
int vq_index);
#endif

View File

@ -113,6 +113,7 @@ typedef struct CryptoDevBackendSessionInfo {
CryptoDevBackendSymSessionInfo sym_sess_info;
CryptoDevBackendAsymSessionInfo asym_sess_info;
} u;
uint64_t session_id;
} CryptoDevBackendSessionInfo;
/**
@ -188,27 +189,37 @@ typedef struct CryptoDevBackendOpInfo {
} u;
} CryptoDevBackendOpInfo;
typedef void (*CryptoDevCompletionFunc) (void *opaque, int ret);
struct CryptoDevBackendClass {
ObjectClass parent_class;
void (*init)(CryptoDevBackend *backend, Error **errp);
void (*cleanup)(CryptoDevBackend *backend, Error **errp);
int64_t (*create_session)(CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp);
int (*create_session)(CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
int (*close_session)(CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp);
uint64_t session_id,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
int (*do_op)(CryptoDevBackend *backend,
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index, Error **errp);
CryptoDevBackendOpInfo *op_info,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
};
typedef enum CryptoDevBackendOptionsType {
CRYPTODEV_BACKEND_TYPE_NONE = 0,
CRYPTODEV_BACKEND_TYPE_BUILTIN = 1,
CRYPTODEV_BACKEND_TYPE_VHOST_USER = 2,
CRYPTODEV_BACKEND_TYPE_LKCF = 3,
CRYPTODEV_BACKEND_TYPE__MAX,
} CryptoDevBackendOptionsType;
@ -303,15 +314,20 @@ void cryptodev_backend_cleanup(
* @sess_info: parameters needed by session creating
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
* @cb: callback when session create is compeleted
* @opaque: parameter passed to callback
*
* Create a session for symmetric/symmetric algorithms
* Create a session for symmetric/asymmetric algorithms
*
* Returns: session id on success, or -1 on error
* Returns: 0 for success and cb will be called when creation is completed,
* negative value for error, and cb will not be called.
*/
int64_t cryptodev_backend_create_session(
int cryptodev_backend_create_session(
CryptoDevBackend *backend,
CryptoDevBackendSessionInfo *sess_info,
uint32_t queue_index, Error **errp);
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
/**
* cryptodev_backend_close_session:
@ -319,34 +335,43 @@ int64_t cryptodev_backend_create_session(
* @session_id: the session id
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
* @cb: callback when session create is compeleted
* @opaque: parameter passed to callback
*
* Close a session for which was previously
* created by cryptodev_backend_create_session()
*
* Returns: 0 on success, or Negative on error
* Returns: 0 for success and cb will be called when creation is completed,
* negative value for error, and cb will not be called.
*/
int cryptodev_backend_close_session(
CryptoDevBackend *backend,
uint64_t session_id,
uint32_t queue_index, Error **errp);
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque);
/**
* cryptodev_backend_crypto_operation:
* @backend: the cryptodev backend object
* @opaque: pointer to a VirtIOCryptoReq object
* @opaque1: pointer to a VirtIOCryptoReq object
* @queue_index: queue index of cryptodev backend client
* @errp: pointer to a NULL-initialized error object
* @cb: callbacks when operation is completed
* @opaque2: parameter passed to cb
*
* Do crypto operation, such as encryption and
* decryption
*
* Returns: VIRTIO_CRYPTO_OK on success,
* or -VIRTIO_CRYPTO_* on error
* Returns: 0 for success and cb will be called when creation is completed,
* negative value for error, and cb will not be called.
*/
int cryptodev_backend_crypto_operation(
CryptoDevBackend *backend,
void *opaque,
uint32_t queue_index, Error **errp);
void *opaque1,
uint32_t queue_index,
CryptoDevCompletionFunc cb,
void *opaque2);
/**
* cryptodev_backend_set_used:

View File

@ -876,6 +876,7 @@
'colo-compare',
'cryptodev-backend',
'cryptodev-backend-builtin',
'cryptodev-backend-lkcf',
{ 'name': 'cryptodev-vhost-user',
'if': 'CONFIG_VHOST_CRYPTO' },
'dbus-vmstate',
@ -944,6 +945,7 @@
'colo-compare': 'ColoCompareProperties',
'cryptodev-backend': 'CryptodevBackendProperties',
'cryptodev-backend-builtin': 'CryptodevBackendProperties',
'cryptodev-backend-lkcf': 'CryptodevBackendProperties',
'cryptodev-vhost-user': { 'type': 'CryptodevVhostUserProperties',
'if': 'CONFIG_VHOST_CRYPTO' },
'dbus-vmstate': 'DBusVMStateProperties',

View File

@ -1681,8 +1681,10 @@ sub process {
# Block comment styles
# Block comments use /* on a line of its own
if ($rawline !~ m@^\+.*/\*.*\*/[ \t)}]*$@ && #inline /*...*/
$rawline =~ m@^\+.*/\*\*?+[ \t]*[^ \t]@) { # /* or /** non-blank
my $commentline = $rawline;
while ($commentline =~ s@^(\+.*)/\*.*\*/@$1@o) { # remove inline #inline /*...*/
}
if ($commentline =~ m@^\+.*/\*\*?+[ \t]*[^ \t]@) { # /* or /** non-blank
WARN("Block comments use a leading /* on a separate line\n" . $herecurr);
}

View File

@ -33,6 +33,7 @@
#include "qemu/accel.h"
#include "hw/boards.h"
#include "migration/vmstate.h"
#include "exec/address-spaces.h"
//#define DEBUG_UNASSIGNED
@ -2121,6 +2122,77 @@ void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
rdmc->unregister_listener(rdm, rdl);
}
/* Called with rcu_read_lock held. */
bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
ram_addr_t *ram_addr, bool *read_only,
bool *mr_has_discard_manager)
{
MemoryRegion *mr;
hwaddr xlat;
hwaddr len = iotlb->addr_mask + 1;
bool writable = iotlb->perm & IOMMU_WO;
if (mr_has_discard_manager) {
*mr_has_discard_manager = false;
}
/*
* The IOMMU TLB entry we have just covers translation through
* this IOMMU to its immediate target. We need to translate
* it the rest of the way through to memory.
*/
mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
&xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
if (!memory_region_is_ram(mr)) {
error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
return false;
} else if (memory_region_has_ram_discard_manager(mr)) {
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
MemoryRegionSection tmp = {
.mr = mr,
.offset_within_region = xlat,
.size = int128_make64(len),
};
if (mr_has_discard_manager) {
*mr_has_discard_manager = true;
}
/*
* Malicious VMs can map memory into the IOMMU, which is expected
* to remain discarded. vfio will pin all pages, populating memory.
* Disallow that. vmstate priorities make sure any RamDiscardManager
* were already restored before IOMMUs are restored.
*/
if (!ram_discard_manager_is_populated(rdm, &tmp)) {
error_report("iommu map to discarded memory (e.g., unplugged via"
" virtio-mem): %" HWADDR_PRIx "",
iotlb->translated_addr);
return false;
}
}
/*
* Translation truncates length to the IOMMU page size,
* check that it did not truncate too much.
*/
if (len & iotlb->addr_mask) {
error_report("iommu has granularity incompatible with target AS");
return false;
}
if (vaddr) {
*vaddr = memory_region_get_ram_ptr(mr) + xlat;
}
if (ram_addr) {
*ram_addr = memory_region_get_ram_addr(mr) + xlat;
}
if (read_only) {
*read_only = !writable || mr->readonly;
}
return true;
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
uint8_t mask = 1 << client;

396
tests/avocado/acpi-bits.py Normal file
View File

@ -0,0 +1,396 @@
#!/usr/bin/env python3
# group: rw quick
# Exercize QEMU generated ACPI/SMBIOS tables using biosbits,
# https://biosbits.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author:
# Ani Sinha <ani@anisinha.ca>
# pylint: disable=invalid-name
# pylint: disable=consider-using-f-string
"""
This is QEMU ACPI/SMBIOS avocado tests using biosbits.
Biosbits is available originally at https://biosbits.org/.
This test uses a fork of the upstream bits and has numerous fixes
including an upgraded acpica. The fork is located here:
https://gitlab.com/qemu-project/biosbits-bits .
"""
import logging
import os
import platform
import re
import shutil
import subprocess
import tarfile
import tempfile
import time
import zipfile
from typing import (
List,
Optional,
Sequence,
)
from qemu.machine import QEMUMachine
from avocado import skipIf
from avocado_qemu import QemuBaseTest
deps = ["xorriso"] # dependent tools needed in the test setup/box.
supported_platforms = ['x86_64'] # supported test platforms.
def which(tool):
""" looks up the full path for @tool, returns None if not found
or if @tool does not have executable permissions.
"""
paths=os.getenv('PATH')
for p in paths.split(os.path.pathsep):
p = os.path.join(p, tool)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
return None
def missing_deps():
""" returns True if any of the test dependent tools are absent.
"""
for dep in deps:
if which(dep) is None:
return True
return False
def supported_platform():
""" checks if the test is running on a supported platform.
"""
return platform.machine() in supported_platforms
class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods
"""
A QEMU VM, with isa-debugcon enabled and bits iso passed
using -cdrom to QEMU commandline.
"""
def __init__(self,
binary: str,
args: Sequence[str] = (),
wrapper: Sequence[str] = (),
name: Optional[str] = None,
base_temp_dir: str = "/var/tmp",
debugcon_log: str = "debugcon-log.txt",
debugcon_addr: str = "0x403",
sock_dir: Optional[str] = None,
qmp_timer: Optional[float] = None):
# pylint: disable=too-many-arguments
if name is None:
name = "qemu-bits-%d" % os.getpid()
if sock_dir is None:
sock_dir = base_temp_dir
super().__init__(binary, args, wrapper=wrapper, name=name,
base_temp_dir=base_temp_dir,
sock_dir=sock_dir, qmp_timer=qmp_timer)
self.debugcon_log = debugcon_log
self.debugcon_addr = debugcon_addr
self.base_temp_dir = base_temp_dir
@property
def _base_args(self) -> List[str]:
args = super()._base_args
args.extend([
'-chardev',
'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir,
self.debugcon_log),
'-device',
'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr,
])
return args
def base_args(self):
"""return the base argument to QEMU binary"""
return self._base_args
@skipIf(not supported_platform() or missing_deps() or os.getenv('GITLAB_CI'),
'incorrect platform or dependencies (%s) not installed ' \
'or running on GitLab' % ','.join(deps))
class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes
"""
ACPI and SMBIOS tests using biosbits.
:avocado: tags=arch:x86_64
:avocado: tags=acpi
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vm = None
self._workDir = None
self._baseDir = None
# following are some standard configuration constants
self._bitsInternalVer = 2020
self._bitsCommitHash = 'b48b88ff' # commit hash must match
# the artifact tag below
self._bitsTag = "qemu-bits-10182022" # this is the latest bits
# release as of today.
self._bitsArtSHA1Hash = 'b04790ac9b99b5662d0416392c73b97580641fe5'
self._bitsArtURL = ("https://gitlab.com/qemu-project/"
"biosbits-bits/-/jobs/artifacts/%s/"
"download?job=qemu-bits-build" %self._bitsTag)
self._debugcon_addr = '0x403'
self._debugcon_log = 'debugcon-log.txt'
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger('acpi-bits')
def _print_log(self, log):
self.logger.info('\nlogs from biosbits follows:')
self.logger.info('==========================================\n')
self.logger.info(log)
self.logger.info('==========================================\n')
def copy_bits_config(self):
""" copies the bios bits config file into bits.
"""
config_file = 'bits-cfg.txt'
bits_config_dir = os.path.join(self._baseDir, 'acpi-bits',
'bits-config')
target_config_dir = os.path.join(self._workDir,
'bits-%d' %self._bitsInternalVer,
'boot')
self.assertTrue(os.path.exists(bits_config_dir))
self.assertTrue(os.path.exists(target_config_dir))
self.assertTrue(os.access(os.path.join(bits_config_dir,
config_file), os.R_OK))
shutil.copy2(os.path.join(bits_config_dir, config_file),
target_config_dir)
self.logger.info('copied config file %s to %s',
config_file, target_config_dir)
def copy_test_scripts(self):
"""copies the python test scripts into bits. """
bits_test_dir = os.path.join(self._baseDir, 'acpi-bits',
'bits-tests')
target_test_dir = os.path.join(self._workDir,
'bits-%d' %self._bitsInternalVer,
'boot', 'python')
self.assertTrue(os.path.exists(bits_test_dir))
self.assertTrue(os.path.exists(target_test_dir))
for filename in os.listdir(bits_test_dir):
if os.path.isfile(os.path.join(bits_test_dir, filename)) and \
filename.endswith('.py2'):
# all test scripts are named with extension .py2 so that
# avocado does not try to load them. These scripts are
# written for python 2.7 not python 3 and hence if avocado
# loaded them, it would complain about python 3 specific
# syntaxes.
newfilename = os.path.splitext(filename)[0] + '.py'
shutil.copy2(os.path.join(bits_test_dir, filename),
os.path.join(target_test_dir, newfilename))
self.logger.info('copied test file %s to %s',
filename, target_test_dir)
# now remove the pyc test file if it exists, otherwise the
# changes in the python test script won't be executed.
testfile_pyc = os.path.splitext(filename)[0] + '.pyc'
if os.access(os.path.join(target_test_dir, testfile_pyc),
os.F_OK):
os.remove(os.path.join(target_test_dir, testfile_pyc))
self.logger.info('removed compiled file %s',
os.path.join(target_test_dir,
testfile_pyc))
def fix_mkrescue(self, mkrescue):
""" grub-mkrescue is a bash script with two variables, 'prefix' and
'libdir'. They must be pointed to the right location so that the
iso can be generated appropriately. We point the two variables to
the directory where we have extracted our pre-built bits grub
tarball.
"""
grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi')
grub_i386_mods = os.path.join(self._workDir, 'grub-inst')
self.assertTrue(os.path.exists(grub_x86_64_mods))
self.assertTrue(os.path.exists(grub_i386_mods))
new_script = ""
with open(mkrescue, 'r', encoding='utf-8') as filehandle:
orig_script = filehandle.read()
new_script = re.sub('(^prefix=)(.*)',
r'\1"%s"' %grub_x86_64_mods,
orig_script, flags=re.M)
new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods,
new_script, flags=re.M)
with open(mkrescue, 'w', encoding='utf-8') as filehandle:
filehandle.write(new_script)
def generate_bits_iso(self):
""" Uses grub-mkrescue to generate a fresh bits iso with the python
test scripts
"""
bits_dir = os.path.join(self._workDir,
'bits-%d' %self._bitsInternalVer)
iso_file = os.path.join(self._workDir,
'bits-%d.iso' %self._bitsInternalVer)
mkrescue_script = os.path.join(self._workDir,
'grub-inst-x86_64-efi', 'bin',
'grub-mkrescue')
self.assertTrue(os.access(mkrescue_script,
os.R_OK | os.W_OK | os.X_OK))
self.fix_mkrescue(mkrescue_script)
self.logger.info('using grub-mkrescue for generating biosbits iso ...')
try:
if os.getenv('V'):
subprocess.check_call([mkrescue_script, '-o', iso_file,
bits_dir], stderr=subprocess.STDOUT)
else:
subprocess.check_call([mkrescue_script, '-o',
iso_file, bits_dir],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
except Exception as e: # pylint: disable=broad-except
self.skipTest("Error while generating the bits iso. "
"Pass V=1 in the environment to get more details. "
+ str(e))
self.assertTrue(os.access(iso_file, os.R_OK))
self.logger.info('iso file %s successfully generated.', iso_file)
def setUp(self): # pylint: disable=arguments-differ
super().setUp('qemu-system-')
self._baseDir = os.getenv('AVOCADO_TEST_BASEDIR')
# workdir could also be avocado's own workdir in self.workdir.
# At present, I prefer to maintain my own temporary working
# directory. It gives us more control over the generated bits
# log files and also for debugging, we may chose not to remove
# this working directory so that the logs and iso can be
# inspected manually and archived if needed.
self._workDir = tempfile.mkdtemp(prefix='acpi-bits-',
suffix='.tmp')
self.logger.info('working dir: %s', self._workDir)
prebuiltDir = os.path.join(self._workDir, 'prebuilt')
if not os.path.isdir(prebuiltDir):
os.mkdir(prebuiltDir, mode=0o775)
bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip'
%(self._bitsInternalVer,
self._bitsCommitHash))
grub_tar_file = os.path.join(prebuiltDir,
'bits-%d-%s-grub.tar.gz'
%(self._bitsInternalVer,
self._bitsCommitHash))
bitsLocalArtLoc = self.fetch_asset(self._bitsArtURL,
asset_hash=self._bitsArtSHA1Hash)
self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc)
# extract the bits artifact in the temp working directory
with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref:
zref.extractall(prebuiltDir)
# extract the bits software in the temp working directory
with zipfile.ZipFile(bits_zip_file, 'r') as zref:
zref.extractall(self._workDir)
with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball:
tarball.extractall(self._workDir)
self.copy_test_scripts()
self.copy_bits_config()
self.generate_bits_iso()
def parse_log(self):
"""parse the log generated by running bits tests and
check for failures.
"""
debugconf = os.path.join(self._workDir, self._debugcon_log)
log = ""
with open(debugconf, 'r', encoding='utf-8') as filehandle:
log = filehandle.read()
matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*',
log)
for match in matchiter:
# verify that no test cases failed.
try:
self.assertEqual(match.group(3).split()[0], '0',
'Some bits tests seems to have failed. ' \
'Please check the test logs for more info.')
except AssertionError as e:
self._print_log(log)
raise e
else:
if os.getenv('V'):
self._print_log(log)
def tearDown(self):
"""
Lets do some cleanups.
"""
if self._vm:
self.assertFalse(not self._vm.is_running)
self.logger.info('removing the work directory %s', self._workDir)
shutil.rmtree(self._workDir)
super().tearDown()
def test_acpi_smbios_bits(self):
"""The main test case implementaion."""
iso_file = os.path.join(self._workDir,
'bits-%d.iso' %self._bitsInternalVer)
self.assertTrue(os.access(iso_file, os.R_OK))
self._vm = QEMUBitsMachine(binary=self.qemu_bin,
base_temp_dir=self._workDir,
debugcon_log=self._debugcon_log,
debugcon_addr=self._debugcon_addr)
self._vm.add_args('-cdrom', '%s' %iso_file)
# the vm needs to be run under icount so that TCG emulation is
# consistent in terms of timing. smilatency tests have consistent
# timing requirements.
self._vm.add_args('-icount', 'auto')
args = " ".join(str(arg) for arg in self._vm.base_args()) + \
" " + " ".join(str(arg) for arg in self._vm.args)
self.logger.info("launching QEMU vm with the following arguments: %s",
args)
self._vm.launch()
# biosbits has been configured to run all the specified test suites
# in batch mode and then automatically initiate a vm shutdown.
# sleep for maximum of one minute
max_sleep_time = time.monotonic() + 60
while self._vm.is_running() and time.monotonic() < max_sleep_time:
time.sleep(1)
self.assertFalse(time.monotonic() > max_sleep_time,
'The VM seems to have failed to shutdown in time')
self.parse_log()

View File

@ -0,0 +1,18 @@
# BITS configuration file
[bits]
# To run BITS in batch mode, set batch to a list of one or more of the
# following keywords; BITS will then run all of the requested operations, then
# save the log file to disk.
#
# test: Run the full BITS testsuite.
# acpi: Dump all ACPI structures.
# smbios: Dump all SMBIOS structures.
#
# Leave batch set to an empty string to disable batch mode.
# batch =
# Uncomment the following to run all available batch operations
# please take a look at boot/python/init.py in bits zip file
# to see how these options are parsed and used.
batch = test acpi smbios

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,283 @@
# Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for ACPI"""
import acpi
import bits
import bits.mwait
import struct
import testutil
import testsuite
import time
def register_tests():
testsuite.add_test("ACPI _MAT (Multiple APIC Table Entry) under Processor objects", test_mat, submenu="ACPI Tests")
# testsuite.add_test("ACPI _PSS (Pstate) table conformance tests", test_pss, submenu="ACPI Tests")
# testsuite.add_test("ACPI _PSS (Pstate) runtime tests", test_pstates, submenu="ACPI Tests")
testsuite.add_test("ACPI DSDT (Differentiated System Description Table)", test_dsdt, submenu="ACPI Tests")
testsuite.add_test("ACPI FACP (Fixed ACPI Description Table)", test_facp, submenu="ACPI Tests")
testsuite.add_test("ACPI HPET (High Precision Event Timer Table)", test_hpet, submenu="ACPI Tests")
testsuite.add_test("ACPI MADT (Multiple APIC Description Table)", test_apic, submenu="ACPI Tests")
testsuite.add_test("ACPI MPST (Memory Power State Table)", test_mpst, submenu="ACPI Tests")
testsuite.add_test("ACPI RSDP (Root System Description Pointer Structure)", test_rsdp, submenu="ACPI Tests")
testsuite.add_test("ACPI XSDT (Extended System Description Table)", test_xsdt, submenu="ACPI Tests")
def test_mat():
cpupaths = acpi.get_cpupaths()
apic = acpi.parse_apic()
procid_apicid = apic.procid_apicid
uid_x2apicid = apic.uid_x2apicid
for cpupath in cpupaths:
# Find the ProcId defined by the processor object
processor = acpi.evaluate(cpupath)
# Find the UID defined by the processor object's _UID method
uid = acpi.evaluate(cpupath + "._UID")
mat_buffer = acpi.evaluate(cpupath + "._MAT")
if mat_buffer is None:
continue
# Process each _MAT subtable
mat = acpi._MAT(mat_buffer)
for index, subtable in enumerate(mat):
if subtable.subtype == acpi.MADT_TYPE_LOCAL_APIC:
if subtable.flags.bits.enabled:
testsuite.test("{} Processor declaration ProcId = _MAT ProcId".format(cpupath), processor.ProcId == subtable.proc_id)
testsuite.print_detail("{} ProcId ({:#02x}) != _MAT ProcId ({:#02x})".format(cpupath, processor.ProcId, subtable.proc_id))
testsuite.print_detail("Processor Declaration: {}".format(processor))
testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
if testsuite.test("{} with local APIC in _MAT has local APIC in MADT".format(cpupath), processor.ProcId in procid_apicid):
testsuite.test("{} ApicId derived using Processor declaration ProcId = _MAT ApicId".format(cpupath), procid_apicid[processor.ProcId] == subtable.apic_id)
testsuite.print_detail("{} ApicId derived from MADT ({:#02x}) != _MAT ApicId ({:#02x})".format(cpupath, procid_apicid[processor.ProcId], subtable.apic_id))
testsuite.print_detail("Processor Declaration: {}".format(processor))
testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
if subtable.subtype == acpi.MADT_TYPE_LOCAL_X2APIC:
if subtable.flags.bits.enabled:
if testsuite.test("{} with x2Apic in _MAT has _UID".format(cpupath), uid is not None):
testsuite.test("{}._UID = _MAT UID".format(cpupath), uid == subtable.uid)
testsuite.print_detail("{}._UID ({:#x}) != _MAT UID ({:#x})".format(cpupath, uid, subtable.uid))
testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
if testsuite.test("{} with _MAT x2Apic has x2Apic in MADT".format(cpupath), subtable.uid in uid_x2apicid):
testsuite.test("{} x2ApicId derived from MADT using UID = _MAT x2ApicId".format(cpupath), uid_x2apicid[subtable.uid] == subtable.x2apicid)
testsuite.print_detail("{} x2ApicId derived from MADT ({:#02x}) != _MAT x2ApicId ({:#02x})".format(cpupath, uid_x2apicid[subtable.uid], subtable.x2apicid))
testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
def test_pss():
uniques = acpi.parse_cpu_method("_PSS")
# We special-case None here to avoid a double-failure for CPUs without a _PSS
testsuite.test("_PSS must be identical for all CPUs", len(uniques) <= 1 or (len(uniques) == 2 and None in uniques))
for pss, cpupaths in uniques.iteritems():
if not testsuite.test("_PSS must exist", pss is not None):
testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
testsuite.print_detail('No _PSS exists')
continue
if not testsuite.test("_PSS must not be empty", pss.pstates):
testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
testsuite.print_detail('_PSS is empty')
continue
testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
for index, pstate in enumerate(pss.pstates):
testsuite.print_detail("P[{}]: {}".format(index, pstate))
testsuite.test("_PSS must contain at most 16 Pstates", len(pss.pstates) <= 16)
testsuite.test("_PSS must have no duplicate Pstates", len(pss.pstates) == len(set(pss.pstates)))
frequencies = [p.core_frequency for p in pss.pstates]
testsuite.test("_PSS must list Pstates in descending order of frequency", frequencies == sorted(frequencies, reverse=True))
testsuite.test("_PSS must have Pstates with no duplicate frequencies", len(frequencies) == len(set(frequencies)))
dissipations = [p.power for p in pss.pstates]
testsuite.test("_PSS must list Pstates in descending order of power dissipation", dissipations == sorted(dissipations, reverse=True))
def test_pstates():
"""Execute and verify frequency for each Pstate in the _PSS"""
IA32_PERF_CTL = 0x199
with bits.mwait.use_hint(), bits.preserve_msr(IA32_PERF_CTL):
cpupath_procid = acpi.find_procid()
cpupath_uid = acpi.find_uid()
apic = acpi.parse_apic()
procid_apicid = apic.procid_apicid
uid_x2apicid = apic.uid_x2apicid
def cpupath_apicid(cpupath):
if procid_apicid is not None:
procid = cpupath_procid.get(cpupath, None)
if procid is not None:
apicid = procid_apicid.get(procid, None)
if apicid is not None:
return apicid
if uid_x2apicid is not None:
uid = cpupath_uid.get(cpupath, None)
if uid is not None:
apicid = uid_x2apicid.get(uid, None)
if apicid is not None:
return apicid
return bits.cpus()[0]
bclk = testutil.adjust_to_nearest(bits.bclk(), 100.0/12) * 1000000
uniques = acpi.parse_cpu_method("_PSS")
for pss, cpupaths in uniques.iteritems():
if not testsuite.test("_PSS must exist", pss is not None):
testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
testsuite.print_detail('No _PSS exists')
continue
for n, pstate in enumerate(pss.pstates):
for cpupath in cpupaths:
apicid = cpupath_apicid(cpupath)
if apicid is None:
print 'Failed to find apicid for cpupath {}'.format(cpupath)
continue
bits.wrmsr(apicid, IA32_PERF_CTL, pstate.control)
# Detecting Turbo frequency requires at least 2 pstates
# since turbo frequency = max non-turbo frequency + 1
turbo = False
if len(pss.pstates) >= 2:
turbo = (n == 0 and pstate.core_frequency == (pss.pstates[1].core_frequency + 1))
if turbo:
# Needs to busywait, not sleep
start = time.time()
while (time.time() - start < 2):
pass
for duration in (0.1, 1.0):
frequency_data = bits.cpu_frequency(duration)
# Abort the test if no cpu frequency is not available
if frequency_data is None:
continue
aperf = frequency_data[1]
aperf = testutil.adjust_to_nearest(aperf, bclk/2)
aperf = int(aperf / 1000000)
if turbo:
if aperf >= pstate.core_frequency:
break
else:
if aperf == pstate.core_frequency:
break
if turbo:
testsuite.test("P{}: Turbo measured frequency {} >= expected {} MHz".format(n, aperf, pstate.core_frequency), aperf >= pstate.core_frequency)
else:
testsuite.test("P{}: measured frequency {} MHz == expected {} MHz".format(n, aperf, pstate.core_frequency), aperf == pstate.core_frequency)
def test_psd_thread_scope():
uniques = acpi.parse_cpu_method("_PSD")
if not testsuite.test("_PSD (P-State Dependency) must exist for each processor", None not in uniques):
testsuite.print_detail(acpi.factor_commonprefix(uniques[None]))
testsuite.print_detail('No _PSD exists')
return
unique_num_dependencies = {}
unique_num_entries = {}
unique_revision = {}
unique_domain = {}
unique_coordination_type = {}
unique_num_processors = {}
for value, cpupaths in uniques.iteritems():
unique_num_dependencies.setdefault(len(value.dependencies), []).extend(cpupaths)
unique_num_entries.setdefault(value.dependencies[0].num_entries, []).extend(cpupaths)
unique_revision.setdefault(value.dependencies[0].revision, []).extend(cpupaths)
unique_domain.setdefault(value.dependencies[0].domain, []).extend(cpupaths)
unique_coordination_type.setdefault(value.dependencies[0].coordination_type, []).extend(cpupaths)
unique_num_processors.setdefault(value.dependencies[0].num_processors, []).extend(cpupaths)
def detail(d, fmt):
for value, cpupaths in sorted(d.iteritems(), key=(lambda (k,v): v)):
testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
testsuite.print_detail(fmt.format(value))
testsuite.test('Dependency count for each processor must be 1', unique_num_dependencies.keys() == [1])
detail(unique_num_dependencies, 'Dependency count for each processor = {} (Expected 1)')
testsuite.test('_PSD.num_entries must be 5', unique_num_entries.keys() == [5])
detail(unique_num_entries, 'num_entries = {} (Expected 5)')
testsuite.test('_PSD.revision must be 0', unique_revision.keys() == [0])
detail(unique_revision, 'revision = {}')
testsuite.test('_PSD.coordination_type must be 0xFE (HW_ALL)', unique_coordination_type.keys() == [0xfe])
detail(unique_coordination_type, 'coordination_type = {:#x} (Expected 0xFE HW_ALL)')
testsuite.test('_PSD.domain must be unique (thread-scoped) for each processor', len(unique_domain) == len(acpi.get_cpupaths()))
detail(unique_domain, 'domain = {:#x} (Expected a unique value for each processor)')
testsuite.test('_PSD.num_processors must be 1', unique_num_processors.keys() == [1])
detail(unique_num_processors, 'num_processors = {} (Expected 1)')
def test_table_checksum(data):
csum = sum(ord(c) for c in data) % 0x100
testsuite.test('ACPI table cumulative checksum must equal 0', csum == 0)
testsuite.print_detail("Cumulative checksum = {} (Expected 0)".format(csum))
def test_apic():
data = acpi.get_table("APIC")
if data is None:
return
test_table_checksum(data)
apic = acpi.parse_apic()
def test_dsdt():
data = acpi.get_table("DSDT")
if data is None:
return
test_table_checksum(data)
def test_facp():
data = acpi.get_table("FACP")
if data is None:
return
test_table_checksum(data)
facp = acpi.parse_facp()
def test_hpet():
data = acpi.get_table("HPET")
if data is None:
return
test_table_checksum(data)
hpet = acpi.parse_hpet()
def test_mpst():
data = acpi.get_table("MPST")
if data is None:
return
test_table_checksum(data)
mpst = acpi.MPST(data)
def test_rsdp():
data = acpi.get_table("RSD PTR ")
if data is None:
return
# Checksum the first 20 bytes per ACPI 1.0
csum = sum(ord(c) for c in data[:20]) % 0x100
testsuite.test('ACPI 1.0 table first 20 bytes cummulative checksum must equal 0', csum == 0)
testsuite.print_detail("Cummulative checksum = {} (Expected 0)".format(csum))
test_table_checksum(data)
rsdp = acpi.parse_rsdp()
def test_xsdt():
data = acpi.get_table("XSDT")
if data is None:
return
test_table_checksum(data)
xsdt = acpi.parse_xsdt()

View File

@ -0,0 +1,83 @@
# Copyright (c) 2012, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests and helpers for CPUID."""
import bits
import testsuite
import testutil
def cpuid_helper(function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0):
if index is None:
index = 0
indexdesc = ""
else:
indexdesc = " index {0:#x}".format(index)
def find_mask(m):
if m == ~0:
return mask
return m
masks = map(find_mask, [eax_mask, ebx_mask, ecx_mask, edx_mask])
uniques = {}
for cpu in bits.cpus():
regs = bits.cpuid_result(*[(r >> shift) & m for r, m in zip(bits.cpuid(cpu, function, index), masks)])
uniques.setdefault(regs, []).append(cpu)
desc = ["CPUID function {:#x}{}".format(function, indexdesc)]
if shift != 0:
desc.append("Register values have been shifted by {}".format(shift))
if mask != ~0 or eax_mask != ~0 or ebx_mask != ~0 or ecx_mask != ~0 or edx_mask != ~0:
desc.append("Register values have been masked:")
shifted_masks = bits.cpuid_result(*[m << shift for m in masks])
desc.append("Masks: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**shifted_masks._asdict()))
if len(uniques) > 1:
regvalues = zip(*uniques.iterkeys())
common_masks = bits.cpuid_result(*map(testutil.find_common_mask, regvalues))
common_values = bits.cpuid_result(*[v[0] & m for v, m in zip(regvalues, common_masks)])
desc.append('Register values are not unique across all logical processors')
desc.append("Common bits: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**common_values._asdict()))
desc.append("Mask of common bits: {eax:#010x} {ebx:#010x} {ecx:#010x} {edx:#010x}".format(**common_masks._asdict()))
for regs in sorted(uniques.iterkeys()):
cpus = uniques[regs]
desc.append("Register value: eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**regs._asdict()))
desc.append("On {0} CPUs: {1}".format(len(cpus), testutil.apicid_list(cpus)))
return uniques, desc
def test_cpuid_consistency(text, function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0):
uniques, desc = cpuid_helper(function, index, shift, mask, eax_mask, ebx_mask, ecx_mask, edx_mask)
desc[0] += " Consistency Check"
if text:
desc.insert(0, text)
status = testsuite.test(desc[0], len(uniques) == 1)
for line in desc[1:]:
testsuite.print_detail(line)
return status

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More