tpm: replace GThreadPool with AIO threadpool

The TPM backend uses a GThreadPool to handle IO in a seperate
thread. However, GThreadPool isn't integrated with Qemu main loops,
making it unnecessarily complicated to deal with.

Qemu has a AIO threadpool, that is better integrated with loops and
various IO functions, provides completion BH by default etc.

Remove the only user of GThreadPool from qemu, use AIO threadpool.

Note that the backend:
- no longer accepts queing multiple requests (unneeded so far)
- increase ref to itself when handling a command, for extra safety
- tpm_backend_thread_end() is renamed tpm_backend_finish_sync() and
will wait for completion of BH (request_completed), which will help
migration handling.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Reviewed-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
This commit is contained in:
Marc-André Lureau 2018-01-29 19:33:05 +01:00 committed by Stefan Berger
parent 05b71fb207
commit c4fb8561bc
2 changed files with 37 additions and 27 deletions

View File

@ -19,30 +19,35 @@
#include "sysemu/tpm.h" #include "sysemu/tpm.h"
#include "qemu/thread.h" #include "qemu/thread.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "block/thread-pool.h"
#include "qemu/error-report.h"
static void tpm_backend_request_completed_bh(void *opaque) static void tpm_backend_request_completed(void *opaque, int ret)
{ {
TPMBackend *s = TPM_BACKEND(opaque); TPMBackend *s = TPM_BACKEND(opaque);
TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif); TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif);
tic->request_completed(s->tpmif); tic->request_completed(s->tpmif);
/* no need for atomic, as long the BQL is taken */
s->cmd = NULL;
object_unref(OBJECT(s));
} }
static void tpm_backend_worker_thread(gpointer data, gpointer user_data) static int tpm_backend_worker_thread(gpointer data)
{ {
TPMBackend *s = TPM_BACKEND(user_data); TPMBackend *s = TPM_BACKEND(data);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s); TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
k->handle_request(s, (TPMBackendCmd *)data); k->handle_request(s, s->cmd);
qemu_bh_schedule(s->bh); return 0;
} }
static void tpm_backend_thread_end(TPMBackend *s) void tpm_backend_finish_sync(TPMBackend *s)
{ {
if (s->thread_pool) { while (s->cmd) {
g_thread_pool_free(s->thread_pool, FALSE, TRUE); aio_poll(qemu_get_aio_context(), true);
s->thread_pool = NULL;
} }
} }
@ -74,10 +79,7 @@ int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize)
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s); TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
/* terminate a running TPM */ /* terminate a running TPM */
tpm_backend_thread_end(s); tpm_backend_finish_sync(s);
s->thread_pool = g_thread_pool_new(tpm_backend_worker_thread, s, 1, TRUE,
NULL);
res = k->startup_tpm ? k->startup_tpm(s, buffersize) : 0; res = k->startup_tpm ? k->startup_tpm(s, buffersize) : 0;
@ -93,7 +95,17 @@ bool tpm_backend_had_startup_error(TPMBackend *s)
void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd) void tpm_backend_deliver_request(TPMBackend *s, TPMBackendCmd *cmd)
{ {
g_thread_pool_push(s->thread_pool, cmd, NULL); ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
if (s->cmd != NULL) {
error_report("There is a TPM request pending");
return;
}
s->cmd = cmd;
object_ref(OBJECT(s));
thread_pool_submit_aio(pool, tpm_backend_worker_thread, s,
tpm_backend_request_completed, s);
} }
void tpm_backend_reset(TPMBackend *s) void tpm_backend_reset(TPMBackend *s)
@ -104,7 +116,7 @@ void tpm_backend_reset(TPMBackend *s)
k->reset(s); k->reset(s);
} }
tpm_backend_thread_end(s); tpm_backend_finish_sync(s);
s->had_startup_error = false; s->had_startup_error = false;
} }
@ -159,28 +171,18 @@ TPMInfo *tpm_backend_query_tpm(TPMBackend *s)
return info; return info;
} }
static void tpm_backend_instance_init(Object *obj)
{
TPMBackend *s = TPM_BACKEND(obj);
s->bh = qemu_bh_new(tpm_backend_request_completed_bh, s);
}
static void tpm_backend_instance_finalize(Object *obj) static void tpm_backend_instance_finalize(Object *obj)
{ {
TPMBackend *s = TPM_BACKEND(obj); TPMBackend *s = TPM_BACKEND(obj);
object_unref(OBJECT(s->tpmif)); object_unref(OBJECT(s->tpmif));
g_free(s->id); g_free(s->id);
tpm_backend_thread_end(s);
qemu_bh_delete(s->bh);
} }
static const TypeInfo tpm_backend_info = { static const TypeInfo tpm_backend_info = {
.name = TYPE_TPM_BACKEND, .name = TYPE_TPM_BACKEND,
.parent = TYPE_OBJECT, .parent = TYPE_OBJECT,
.instance_size = sizeof(TPMBackend), .instance_size = sizeof(TPMBackend),
.instance_init = tpm_backend_instance_init,
.instance_finalize = tpm_backend_instance_finalize, .instance_finalize = tpm_backend_instance_finalize,
.class_size = sizeof(TPMBackendClass), .class_size = sizeof(TPMBackendClass),
.abstract = true, .abstract = true,

View File

@ -45,9 +45,8 @@ struct TPMBackend {
/*< protected >*/ /*< protected >*/
TPMIf *tpmif; TPMIf *tpmif;
bool opened; bool opened;
GThreadPool *thread_pool;
bool had_startup_error; bool had_startup_error;
QEMUBH *bh; TPMBackendCmd *cmd;
/* <public> */ /* <public> */
char *id; char *id;
@ -196,6 +195,15 @@ TPMVersion tpm_backend_get_tpm_version(TPMBackend *s);
*/ */
size_t tpm_backend_get_buffer_size(TPMBackend *s); size_t tpm_backend_get_buffer_size(TPMBackend *s);
/**
* tpm_backend_finish_sync:
* @s: the backend to call into
*
* Finish the pending command synchronously (this will call aio_poll()
* on qemu main AIOContext until it ends)
*/
void tpm_backend_finish_sync(TPMBackend *s);
/** /**
* tpm_backend_query_tpm: * tpm_backend_query_tpm:
* @s: the backend * @s: the backend