Skip to content

Commit 178bacb

Browse files
committed
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
Pull request - Add new thread-pool-min/thread-pool-max parameters to control the thread pool used for async I/O. - Fix virtio-scsi IOThread 100% CPU consumption QEMU 7.0 regression. # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmJ5DqgACgkQnKSrs4Gr # c8iAqAf/WEJzEso0Hu3UUYJi2lAXpLxWPjoNBlPdQlKIJ/I0zQIF0P7GeCifF+0l # iMjgBv0ofyAuV47gaTJlVrAR75+hJ/IXNDhnu3UuvNWfVOqvksgw6kuHkMo9A2hC # 4tIHEU9J8jbQSSdQTaZR8Zj4FX1/zcxMBAXT3YO3De6zo78RatBTuNP4dsZzt8bI # Qs1a4A0p2ScNXK8EcF4QwAWfoxu9OPPzN52DBCNxcIcnn0SUab4NbDxzpRV4ZhDP # 08WoafI5O+2Kb36QysJN01LqajHrClG/fozrPzBLq5aZUK3xewJGB1hEdGTLkkmz # NJNBg5Ldszwj4PDZ1dFU3/03aigb3g== # =t5eR # -----END PGP SIGNATURE----- # gpg: Signature made Mon 09 May 2022 05:52:56 AM PDT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <[email protected]>" [full] # gpg: aka "Stefan Hajnoczi <[email protected]>" [full] * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: virtio-scsi: move request-related items from .h to .c virtio-scsi: clean up virtio_scsi_handle_cmd_vq() virtio-scsi: clean up virtio_scsi_handle_ctrl_vq() virtio-scsi: clean up virtio_scsi_handle_event_vq() virtio-scsi: don't waste CPU polling the event virtqueue virtio-scsi: fix ctrl and event handler functions in dataplane mode util/event-loop-base: Introduce options to set the thread pool size util/main-loop: Introduce the main loop into QOM Introduce event-loop-base abstract class Signed-off-by: Richard Henderson <[email protected]>
2 parents b0c3c60 + 3dc584a commit 178bacb

File tree

18 files changed

+505
-143
lines changed

18 files changed

+505
-143
lines changed

event-loop-base.c

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
/*
2+
* QEMU event-loop base
3+
*
4+
* Copyright (C) 2022 Red Hat Inc
5+
*
6+
* Authors:
7+
* Stefan Hajnoczi <[email protected]>
8+
* Nicolas Saenz Julienne <[email protected]>
9+
*
10+
* This work is licensed under the terms of the GNU GPL, version 2 or later.
11+
* See the COPYING file in the top-level directory.
12+
*/
13+
14+
#include "qemu/osdep.h"
15+
#include "qom/object_interfaces.h"
16+
#include "qapi/error.h"
17+
#include "block/thread-pool.h"
18+
#include "sysemu/event-loop-base.h"
19+
20+
typedef struct {
21+
const char *name;
22+
ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
23+
} EventLoopBaseParamInfo;
24+
25+
static void event_loop_base_instance_init(Object *obj)
26+
{
27+
EventLoopBase *base = EVENT_LOOP_BASE(obj);
28+
29+
base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
30+
}
31+
32+
static EventLoopBaseParamInfo aio_max_batch_info = {
33+
"aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
34+
};
35+
static EventLoopBaseParamInfo thread_pool_min_info = {
36+
"thread-pool-min", offsetof(EventLoopBase, thread_pool_min),
37+
};
38+
static EventLoopBaseParamInfo thread_pool_max_info = {
39+
"thread-pool-max", offsetof(EventLoopBase, thread_pool_max),
40+
};
41+
42+
static void event_loop_base_get_param(Object *obj, Visitor *v,
43+
const char *name, void *opaque, Error **errp)
44+
{
45+
EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj);
46+
EventLoopBaseParamInfo *info = opaque;
47+
int64_t *field = (void *)event_loop_base + info->offset;
48+
49+
visit_type_int64(v, name, field, errp);
50+
}
51+
52+
static void event_loop_base_set_param(Object *obj, Visitor *v,
53+
const char *name, void *opaque, Error **errp)
54+
{
55+
EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj);
56+
EventLoopBase *base = EVENT_LOOP_BASE(obj);
57+
EventLoopBaseParamInfo *info = opaque;
58+
int64_t *field = (void *)base + info->offset;
59+
int64_t value;
60+
61+
if (!visit_type_int64(v, name, &value, errp)) {
62+
return;
63+
}
64+
65+
if (value < 0) {
66+
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
67+
info->name, INT64_MAX);
68+
return;
69+
}
70+
71+
*field = value;
72+
73+
if (bc->update_params) {
74+
bc->update_params(base, errp);
75+
}
76+
77+
return;
78+
}
79+
80+
static void event_loop_base_complete(UserCreatable *uc, Error **errp)
81+
{
82+
EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
83+
EventLoopBase *base = EVENT_LOOP_BASE(uc);
84+
85+
if (bc->init) {
86+
bc->init(base, errp);
87+
}
88+
}
89+
90+
static bool event_loop_base_can_be_deleted(UserCreatable *uc)
91+
{
92+
EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
93+
EventLoopBase *backend = EVENT_LOOP_BASE(uc);
94+
95+
if (bc->can_be_deleted) {
96+
return bc->can_be_deleted(backend);
97+
}
98+
99+
return true;
100+
}
101+
102+
static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
103+
{
104+
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
105+
ucc->complete = event_loop_base_complete;
106+
ucc->can_be_deleted = event_loop_base_can_be_deleted;
107+
108+
object_class_property_add(klass, "aio-max-batch", "int",
109+
event_loop_base_get_param,
110+
event_loop_base_set_param,
111+
NULL, &aio_max_batch_info);
112+
object_class_property_add(klass, "thread-pool-min", "int",
113+
event_loop_base_get_param,
114+
event_loop_base_set_param,
115+
NULL, &thread_pool_min_info);
116+
object_class_property_add(klass, "thread-pool-max", "int",
117+
event_loop_base_get_param,
118+
event_loop_base_set_param,
119+
NULL, &thread_pool_max_info);
120+
}
121+
122+
static const TypeInfo event_loop_base_info = {
123+
.name = TYPE_EVENT_LOOP_BASE,
124+
.parent = TYPE_OBJECT,
125+
.instance_size = sizeof(EventLoopBase),
126+
.instance_init = event_loop_base_instance_init,
127+
.class_size = sizeof(EventLoopBaseClass),
128+
.class_init = event_loop_base_class_init,
129+
.abstract = true,
130+
.interfaces = (InterfaceInfo[]) {
131+
{ TYPE_USER_CREATABLE },
132+
{ }
133+
}
134+
};
135+
136+
static void register_types(void)
137+
{
138+
type_register_static(&event_loop_base_info);
139+
}
140+
type_init(register_types);

hw/scsi/virtio-scsi-dataplane.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
138138

139139
aio_context_acquire(s->ctx);
140140
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
141-
virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx);
141+
virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
142142

143143
for (i = 0; i < vs->conf.num_queues; i++) {
144144
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);

hw/scsi/virtio-scsi.c

Lines changed: 71 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,43 @@
2929
#include "hw/virtio/virtio-access.h"
3030
#include "trace.h"
3131

32+
typedef struct VirtIOSCSIReq {
33+
/*
34+
* Note:
35+
* - fields up to resp_iov are initialized by virtio_scsi_init_req;
36+
* - fields starting at vring are zeroed by virtio_scsi_init_req.
37+
*/
38+
VirtQueueElement elem;
39+
40+
VirtIOSCSI *dev;
41+
VirtQueue *vq;
42+
QEMUSGList qsgl;
43+
QEMUIOVector resp_iov;
44+
45+
union {
46+
/* Used for two-stage request submission */
47+
QTAILQ_ENTRY(VirtIOSCSIReq) next;
48+
49+
/* Used for cancellation of request during TMFs */
50+
int remaining;
51+
};
52+
53+
SCSIRequest *sreq;
54+
size_t resp_size;
55+
enum SCSIXferMode mode;
56+
union {
57+
VirtIOSCSICmdResp cmd;
58+
VirtIOSCSICtrlTMFResp tmf;
59+
VirtIOSCSICtrlANResp an;
60+
VirtIOSCSIEvent event;
61+
} resp;
62+
union {
63+
VirtIOSCSICmdReq cmd;
64+
VirtIOSCSICtrlTMFReq tmf;
65+
VirtIOSCSICtrlANReq an;
66+
} req;
67+
} VirtIOSCSIReq;
68+
3269
static inline int virtio_scsi_get_lun(uint8_t *lun)
3370
{
3471
return ((lun[2] << 8) | lun[3]) & 0x3FFF;
@@ -45,7 +82,7 @@ static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
4582
return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
4683
}
4784

48-
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
85+
static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
4986
{
5087
VirtIODevice *vdev = VIRTIO_DEVICE(s);
5188
const size_t zero_skip =
@@ -58,7 +95,7 @@ void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
5895
memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
5996
}
6097

61-
void virtio_scsi_free_req(VirtIOSCSIReq *req)
98+
static void virtio_scsi_free_req(VirtIOSCSIReq *req)
6299
{
63100
qemu_iovec_destroy(&req->resp_iov);
64101
qemu_sglist_destroy(&req->qsgl);
@@ -460,28 +497,41 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
460497
}
461498
}
462499

463-
bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
500+
static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
464501
{
465502
VirtIOSCSIReq *req;
466-
bool progress = false;
467503

468504
while ((req = virtio_scsi_pop_req(s, vq))) {
469-
progress = true;
470505
virtio_scsi_handle_ctrl_req(s, req);
471506
}
472-
return progress;
507+
}
508+
509+
/*
510+
* If dataplane is configured but not yet started, do so now and return true on
511+
* success.
512+
*
513+
* Dataplane is started by the core virtio code but virtqueue handler functions
514+
* can also be invoked when a guest kicks before DRIVER_OK, so this helper
515+
* function helps us deal with manually starting ioeventfd in that case.
516+
*/
517+
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
518+
{
519+
if (!s->ctx || s->dataplane_started) {
520+
return false;
521+
}
522+
523+
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
524+
return !s->dataplane_fenced;
473525
}
474526

475527
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
476528
{
477529
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
478530

479-
if (s->ctx) {
480-
virtio_device_start_ioeventfd(vdev);
481-
if (!s->dataplane_fenced) {
482-
return;
483-
}
531+
if (virtio_scsi_defer_to_dataplane(s)) {
532+
return;
484533
}
534+
485535
virtio_scsi_acquire(s);
486536
virtio_scsi_handle_ctrl_vq(s, vq);
487537
virtio_scsi_release(s);
@@ -672,12 +722,11 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
672722
scsi_req_unref(sreq);
673723
}
674724

675-
bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
725+
static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
676726
{
677727
VirtIOSCSIReq *req, *next;
678728
int ret = 0;
679729
bool suppress_notifications = virtio_queue_get_notification(vq);
680-
bool progress = false;
681730

682731
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
683732

@@ -687,7 +736,6 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
687736
}
688737

689738
while ((req = virtio_scsi_pop_req(s, vq))) {
690-
progress = true;
691739
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
692740
if (!ret) {
693741
QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -712,20 +760,17 @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
712760
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
713761
virtio_scsi_handle_cmd_req_submit(s, req);
714762
}
715-
return progress;
716763
}
717764

718765
static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
719766
{
720767
/* use non-QOM casts in the data path */
721768
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
722769

723-
if (s->ctx && !s->dataplane_started) {
724-
virtio_device_start_ioeventfd(vdev);
725-
if (!s->dataplane_fenced) {
726-
return;
727-
}
770+
if (virtio_scsi_defer_to_dataplane(s)) {
771+
return;
728772
}
773+
729774
virtio_scsi_acquire(s);
730775
virtio_scsi_handle_cmd_vq(s, vq);
731776
virtio_scsi_release(s);
@@ -793,8 +838,8 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
793838
s->events_dropped = false;
794839
}
795840

796-
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
797-
uint32_t event, uint32_t reason)
841+
static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
842+
uint32_t event, uint32_t reason)
798843
{
799844
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
800845
VirtIOSCSIReq *req;
@@ -842,25 +887,21 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
842887
virtio_scsi_complete_req(req);
843888
}
844889

845-
bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
890+
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
846891
{
847892
if (s->events_dropped) {
848893
virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
849-
return true;
850894
}
851-
return false;
852895
}
853896

854897
static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
855898
{
856899
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
857900

858-
if (s->ctx) {
859-
virtio_device_start_ioeventfd(vdev);
860-
if (!s->dataplane_fenced) {
861-
return;
862-
}
901+
if (virtio_scsi_defer_to_dataplane(s)) {
902+
return;
863903
}
904+
864905
virtio_scsi_acquire(s);
865906
virtio_scsi_handle_event_vq(s, vq);
866907
virtio_scsi_release(s);

hw/virtio/virtio.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3534,6 +3534,19 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
35343534
virtio_queue_host_notifier_aio_poll_end);
35353535
}
35363536

3537+
/*
3538+
* Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
3539+
* this for rx virtqueues and similar cases where the virtqueue handler
3540+
* function does not pop all elements. When the virtqueue is left non-empty
3541+
* polling consumes CPU cycles and should not be used.
3542+
*/
3543+
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
3544+
{
3545+
aio_set_event_notifier(ctx, &vq->host_notifier, true,
3546+
virtio_queue_host_notifier_read,
3547+
NULL, NULL);
3548+
}
3549+
35373550
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
35383551
{
35393552
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);

include/block/aio.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,8 @@ struct AioContext {
192192
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
193193
QEMUBH *co_schedule_bh;
194194

195+
int thread_pool_min;
196+
int thread_pool_max;
195197
/* Thread pool for performing work and receiving completion callbacks.
196198
* Has its own locking.
197199
*/
@@ -769,4 +771,12 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
769771
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
770772
Error **errp);
771773

774+
/**
775+
* aio_context_set_thread_pool_params:
776+
* @ctx: the aio context
777+
* @min: min number of threads to have readily available in the thread pool
778+
* @min: max number of threads the thread pool can contain
779+
*/
780+
void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
781+
int64_t max, Error **errp);
772782
#endif

0 commit comments

Comments
 (0)