s390x updates:

- rework interrupt handling for tcg, smp is now considered non-experimental
 - some general improvements in the flic
 - improvements in the pci code, and wiring it up in tcg
 - add PTFF subfunctions for multiple-epoch to the cpu model
 - maintainership updates
 - various other fixes and improvements
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEw9DWbcNiT/aowBjO3s9rk8bwL68FAlp9ZCISHGNvaHVja0By
 ZWRoYXQuY29tAAoJEN7Pa5PG8C+vuhwQAKS4IepY5sEA0eUScpA+xDBLiXYkwTIP
 Nq8Jm553voTvKgadBmYp8zC4U0jxfbsKd6NFxFisRiFtxsrT91L/akQTolTRU2F4
 i44K6LCcfqWIclSWQrZVGoxYpg7h2tZYBsq9stZOT+8eeoNeseZLMQGzjif9DbmS
 jTwGLcWUAmzvemk/rygzRHHhpNS3Ed5OHCeMunpG+RuKlRBkleVU9yft2QbztNtG
 LOBlLtlKX+6r8A0Xv3yKh/q9q+lxEOebONt8A1MhpcLlqMP3XHesS+T7v/rh7Bld
 CXCYWXy6QHm4xDUV+9rflawaNisYnZDJfovKk7k6UPHi+EXhivPBjT0teS/CZ0D+
 O+iDiXwlZc58/5A7fsUoXFzj/ZlLu5fJXvmGz0iYyDh+UthjqzrDqdcPp5h788gH
 8TsjapTc9X6g+D8lgPVh93/ejnCcVqaeSrjEbmjzksnBprTMEb94ew8nnjcWlppR
 zii1fQfpstoaBUWDPMpHImnL46oSEyKJ1XF/NA4X/gRIQsxfuUbtQAfwv+rgamQb
 T3nVyvdu0vlO+SSrkCJ/siZ+Nyf4A+ksNfIBSfUPgzVF6J0nzJ/uD3jOkxm/M5pa
 ScFrEop41jYcrZsOJqrb2HifzJpETssd4+s9lD44vUimiORpEPoeG7zF5sqbrlvc
 fO9X3ufY8n2i
 =/u33
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20180209' into staging

s390x updates:
- rework interrupt handling for tcg, smp is now considered non-experimental
- some general improvements in the flic
- improvements in the pci code, and wiring it up in tcg
- add PTFF subfunctions for multiple-epoch to the cpu model
- maintainership updates
- various other fixes and improvements

# gpg: Signature made Fri 09 Feb 2018 09:04:34 GMT
# gpg:                using RSA key DECF6B93C6F02FAF
# gpg: Good signature from "Cornelia Huck <conny@cornelia-huck.de>"
# gpg:                 aka "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg:                 aka "Cornelia Huck <cornelia.huck@de.ibm.com>"
# gpg:                 aka "Cornelia Huck <cohuck@kernel.org>"
# gpg:                 aka "Cornelia Huck <cohuck@redhat.com>"
# Primary key fingerprint: C3D0 D66D C362 4FF6 A8C0  18CE DECF 6B93 C6F0 2FAF

* remotes/cohuck/tags/s390x-20180209: (29 commits)
  MAINTAINERS: add David as additional tcg/s390 maintainer
  MAINTAINERS: reorganize s390-ccw bios maintainership
  MAINTAINERS: add myself as overall s390x maintainer
  s390x/pci: use the right pal and pba in reg_ioat()
  s390x/pci: fixup global refresh
  s390x/pci: fixup the code walking IOMMU tables
  s390x/cpumodel: model PTFF subfunctions for Multiple-epoch facility
  s390x/cpumodel: allow zpci features in qemu model
  s390x/tcg: wire up pci instructions
  s390x/sclp: fix event mask handling
  s390x/flic: cache the common flic class in a central function
  s390x/kvm: cache the kvm flic in a central function
  s390x/tcg: cache the qemu flic in a central function
  configure: s390x supports mttcg now
  s390x/tcg: remove SMP warning
  s390x/tcg: STSI overhaul
  s390x: fix size + content of STSI blocks
  s390x/flic: optimize CPU wakeup for TCG
  s390x/flic: implement qemu_s390_clear_io_flic()
  s390x/tcg: implement TEST PENDING INTERRUPTION
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-02-09 11:46:32 +00:00
commit fdcbebe451
28 changed files with 1241 additions and 549 deletions

View File

@ -76,6 +76,29 @@ K: ^Subject:.*(?i)trivial
T: git git://git.corpit.ru/qemu.git trivial-patches
T: git git://github.com/vivier/qemu.git trivial-patches
Architecture support
--------------------
S390
M: Cornelia Huck <cohuck@redhat.com>
S: Supported
F: default-configs/s390x-softmmu.mak
F: gdb-xml/s390*.xml
F: hw/char/sclp*.[hc]
F: hw/char/terminal3270.c
F: hw/intc/s390_flic.c
F: hw/intc/s390_flic_kvm.c
F: hw/s390x/
F: hw/vfio/ccw.c
F: hw/watchdog/wdt_diag288.c
F: include/hw/s390x/
F: include/hw/watchdog/wdt_diag288.h
F: pc-bios/s390-ccw/
F: pc-bios/s390-ccw.img
F: target/s390x/
K: ^Subject:.*(?i)s390x?
T: git git://github.com/cohuck/qemu.git s390-next
L: qemu-s390x@nongnu.org
Guest CPU cores (TCG):
----------------------
Overall
@ -213,6 +236,7 @@ F: disas/ppc.c
S390
M: Richard Henderson <rth@twiddle.net>
M: Alexander Graf <agraf@suse.de>
M: David Hildenbrand <david@redhat.com>
S: Maintained
F: target/s390x/
F: hw/s390x/
@ -832,15 +856,22 @@ F: hw/char/sclp*.[hc]
F: hw/char/terminal3270.c
F: hw/s390x/
F: include/hw/s390x/
F: pc-bios/s390-ccw/
F: hw/watchdog/wdt_diag288.c
F: include/hw/watchdog/wdt_diag288.h
F: pc-bios/s390-ccw.img
F: default-configs/s390x-softmmu.mak
T: git git://github.com/cohuck/qemu.git s390-next
T: git git://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
S390-ccw Bios
M: Christian Borntraeger <borntraeger@de.ibm.com>
M: Thomas Huth <thuth@redhat.com>
S: Supported
F: pc-bios/s390-ccw/
F: pc-bios/s390-ccw.img
T: git git://github.com/borntraeger/qemu.git s390-next
L: qemu-s390x@nongnu.org
UniCore32 Machines
-------------
PKUnity-3 SoC initramfs-with-busybox

5
configure vendored
View File

@ -1933,9 +1933,9 @@ int main(int argc, char *argv[]) {
EOF
if compile_object ; then
if grep -q BiGeNdIaN $TMPO ; then
if strings -a $TMPO | grep -q BiGeNdIaN ; then
bigendian="yes"
elif grep -q LiTtLeEnDiAn $TMPO ; then
elif strings -a $TMPO | grep -q LiTtLeEnDiAn ; then
bigendian="no"
else
echo big/little test failed
@ -6779,6 +6779,7 @@ case "$target_name" in
echo "TARGET_ABI32=y" >> $config_target_mak
;;
s390x)
mttcg=yes
gdb_xml_files="s390x-core64.xml s390-acr.xml s390-fpr.xml s390-vx.xml s390-cr.xml s390-virt.xml s390-gs.xml"
;;
tilegx)

View File

@ -22,16 +22,36 @@
#include "qapi/error.h"
#include "hw/s390x/s390-virtio-ccw.h"
S390FLICStateClass *s390_get_flic_class(S390FLICState *fs)
{
static S390FLICStateClass *class;
if (!class) {
/* we only have one flic device, so this is fine to cache */
class = S390_FLIC_COMMON_GET_CLASS(fs);
}
return class;
}
QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs)
{
static QEMUS390FLICState *flic;
if (!flic) {
/* we only have one flic device, so this is fine to cache */
flic = QEMU_S390_FLIC(fs);
}
return flic;
}
S390FLICState *s390_get_flic(void)
{
static S390FLICState *fs;
if (!fs) {
fs = S390_FLIC_COMMON(object_resolve_path(TYPE_KVM_S390_FLIC, NULL));
if (!fs) {
fs = S390_FLIC_COMMON(object_resolve_path(TYPE_QEMU_S390_FLIC,
NULL));
}
fs = S390_FLIC_COMMON(object_resolve_path_type("",
TYPE_S390_FLIC_COMMON,
NULL));
}
return fs;
}
@ -40,8 +60,11 @@ void s390_flic_init(void)
{
DeviceState *dev;
dev = s390_flic_kvm_create();
if (!dev) {
if (kvm_enabled()) {
dev = qdev_create(NULL, TYPE_KVM_S390_FLIC);
object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC,
OBJECT(dev), NULL);
} else {
dev = qdev_create(NULL, TYPE_QEMU_S390_FLIC);
object_property_add_child(qdev_get_machine(), TYPE_QEMU_S390_FLIC,
OBJECT(dev), NULL);
@ -78,14 +101,41 @@ static void qemu_s390_release_adapter_routes(S390FLICState *fs,
static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
uint16_t subchannel_nr)
{
/* Fixme TCG */
return -ENOSYS;
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
QEMUS390FlicIO *cur, *next;
uint8_t isc;
g_assert(qemu_mutex_iothread_locked());
if (!(flic->pending & FLIC_PENDING_IO)) {
return 0;
}
/* check all iscs */
for (isc = 0; isc < 8; isc++) {
if (QLIST_EMPTY(&flic->io[isc])) {
continue;
}
/* search and delete any matching one */
QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
if (cur->id == subchannel_id && cur->nr == subchannel_nr) {
QLIST_REMOVE(cur, next);
g_free(cur);
}
}
/* update our indicator bit */
if (QLIST_EMPTY(&flic->io[isc])) {
flic->pending &= ~ISC_TO_PENDING_IO(isc);
}
}
return 0;
}
static int qemu_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
uint16_t mode)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(fs);
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
switch (mode) {
case SIC_IRQ_MODE_ALL:
@ -106,7 +156,8 @@ static int qemu_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type,
uint8_t isc, uint8_t flags)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(fs);
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
bool flag = flags & S390_ADAPTER_SUPPRESSIBLE;
uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
@ -115,7 +166,7 @@ static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type,
return 0;
}
s390_io_interrupt(0, 0, 0, io_int_word);
fsc->inject_io(fs, 0, 0, 0, io_int_word);
if (flag && (flic->simm & AIS_MODE_MASK(isc))) {
flic->nimm |= AIS_MODE_MASK(isc);
@ -126,12 +177,180 @@ static int qemu_s390_inject_airq(S390FLICState *fs, uint8_t type,
return 0;
}
static void qemu_s390_flic_notify(uint32_t type)
{
CPUState *cs;
/*
* We have to make all CPUs see CPU_INTERRUPT_HARD, so they might
* consider it. We will kick all running CPUs and only relevant
* sleeping ones.
*/
CPU_FOREACH(cs) {
S390CPU *cpu = S390_CPU(cs);
cs->interrupt_request |= CPU_INTERRUPT_HARD;
/* ignore CPUs that are not sleeping */
if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING &&
s390_cpu_get_state(cpu) != CPU_STATE_LOAD) {
continue;
}
/* we always kick running CPUs for now, this is tricky */
if (cs->halted) {
/* don't check for subclasses, CPUs double check when waking up */
if (type & FLIC_PENDING_SERVICE) {
if (!(cpu->env.psw.mask & PSW_MASK_EXT)) {
continue;
}
} else if (type & FLIC_PENDING_IO) {
if (!(cpu->env.psw.mask & PSW_MASK_IO)) {
continue;
}
} else if (type & FLIC_PENDING_MCHK_CR) {
if (!(cpu->env.psw.mask & PSW_MASK_MCHECK)) {
continue;
}
}
}
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
}
}
uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic)
{
uint32_t tmp;
g_assert(qemu_mutex_iothread_locked());
g_assert(flic->pending & FLIC_PENDING_SERVICE);
tmp = flic->service_param;
flic->service_param = 0;
flic->pending &= ~FLIC_PENDING_SERVICE;
return tmp;
}
/* caller has to free the returned object */
QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6)
{
QEMUS390FlicIO *io;
uint8_t isc;
g_assert(qemu_mutex_iothread_locked());
if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) {
return NULL;
}
for (isc = 0; isc < 8; isc++) {
if (QLIST_EMPTY(&flic->io[isc]) || !(cr6 & ISC_TO_ISC_BITS(isc))) {
continue;
}
io = QLIST_FIRST(&flic->io[isc]);
QLIST_REMOVE(io, next);
/* update our indicator bit */
if (QLIST_EMPTY(&flic->io[isc])) {
flic->pending &= ~ISC_TO_PENDING_IO(isc);
}
return io;
}
return NULL;
}
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic)
{
g_assert(qemu_mutex_iothread_locked());
g_assert(flic->pending & FLIC_PENDING_MCHK_CR);
flic->pending &= ~FLIC_PENDING_MCHK_CR;
}
static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
g_assert(qemu_mutex_iothread_locked());
/* multiplexing is good enough for sclp - kvm does it internally as well */
flic->service_param |= parm;
flic->pending |= FLIC_PENDING_SERVICE;
qemu_s390_flic_notify(FLIC_PENDING_SERVICE);
}
static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word)
{
const uint8_t isc = IO_INT_WORD_ISC(io_int_word);
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
QEMUS390FlicIO *io;
g_assert(qemu_mutex_iothread_locked());
io = g_new0(QEMUS390FlicIO, 1);
io->id = subchannel_id;
io->nr = subchannel_nr;
io->parm = io_int_parm;
io->word = io_int_word;
QLIST_INSERT_HEAD(&flic->io[isc], io, next);
flic->pending |= ISC_TO_PENDING_IO(isc);
qemu_s390_flic_notify(ISC_TO_PENDING_IO(isc));
}
static void qemu_s390_inject_crw_mchk(S390FLICState *fs)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(fs);
g_assert(qemu_mutex_iothread_locked());
flic->pending |= FLIC_PENDING_MCHK_CR;
qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR);
}
bool qemu_s390_flic_has_service(QEMUS390FLICState *flic)
{
/* called without lock via cc->has_work, will be validated under lock */
return !!(flic->pending & FLIC_PENDING_SERVICE);
}
bool qemu_s390_flic_has_io(QEMUS390FLICState *flic, uint64_t cr6)
{
/* called without lock via cc->has_work, will be validated under lock */
return !!(flic->pending & CR6_TO_PENDING_IO(cr6));
}
bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic)
{
/* called without lock via cc->has_work, will be validated under lock */
return !!(flic->pending & FLIC_PENDING_MCHK_CR);
}
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic)
{
g_assert(qemu_mutex_iothread_locked());
return !!flic->pending;
}
static void qemu_s390_flic_reset(DeviceState *dev)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(dev);
QEMUS390FlicIO *cur, *next;
int isc;
g_assert(qemu_mutex_iothread_locked());
flic->simm = 0;
flic->nimm = 0;
flic->pending = 0;
/* remove all pending io interrupts */
for (isc = 0; isc < 8; isc++) {
QLIST_FOREACH_SAFE(cur, &flic->io[isc], next, next) {
QLIST_REMOVE(cur, next);
g_free(cur);
}
}
}
bool ais_needed(void *opaque)
@ -153,6 +372,16 @@ static const VMStateDescription qemu_s390_flic_vmstate = {
}
};
static void qemu_s390_flic_instance_init(Object *obj)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(obj);
int isc;
for (isc = 0; isc < 8; isc++) {
QLIST_INIT(&flic->io[isc]);
}
}
static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@ -167,6 +396,9 @@ static void qemu_s390_flic_class_init(ObjectClass *oc, void *data)
fsc->clear_io_irq = qemu_s390_clear_io_flic;
fsc->modify_ais_mode = qemu_s390_modify_ais_mode;
fsc->inject_airq = qemu_s390_inject_airq;
fsc->inject_service = qemu_s390_inject_service;
fsc->inject_io = qemu_s390_inject_io;
fsc->inject_crw_mchk = qemu_s390_inject_crw_mchk;
}
static Property s390_flic_common_properties[] = {
@ -201,6 +433,7 @@ static const TypeInfo qemu_s390_flic_info = {
.name = TYPE_QEMU_S390_FLIC,
.parent = TYPE_S390_FLIC_COMMON,
.instance_size = sizeof(QEMUS390FLICState),
.instance_init = qemu_s390_flic_instance_init,
.class_init = qemu_s390_flic_class_init,
};

View File

@ -35,16 +35,15 @@ typedef struct KVMS390FLICState {
bool clear_io_supported;
} KVMS390FLICState;
DeviceState *s390_flic_kvm_create(void)
static KVMS390FLICState *s390_get_kvm_flic(S390FLICState *fs)
{
DeviceState *dev = NULL;
static KVMS390FLICState *flic;
if (kvm_enabled()) {
dev = qdev_create(NULL, TYPE_KVM_S390_FLIC);
object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC,
OBJECT(dev), NULL);
if (!flic) {
/* we only have one flic device, so this is fine to cache */
flic = KVM_S390_FLIC(fs);
}
return dev;
return flic;
}
/**
@ -123,20 +122,70 @@ static int flic_enqueue_irqs(void *buf, uint64_t len,
return rc ? -errno : 0;
}
int kvm_s390_inject_flic(struct kvm_s390_irq *irq)
static void kvm_s390_inject_flic(S390FLICState *fs, struct kvm_s390_irq *irq)
{
static KVMS390FLICState *flic;
static bool use_flic = true;
int r;
if (unlikely(!flic)) {
flic = KVM_S390_FLIC(s390_get_flic());
if (use_flic) {
r = flic_enqueue_irqs(irq, sizeof(*irq), s390_get_kvm_flic(fs));
if (r == -ENOSYS) {
use_flic = false;
}
if (!r) {
return;
}
}
return flic_enqueue_irqs(irq, sizeof(*irq), flic);
/* fallback to legacy KVM IOCTL in case FLIC fails */
kvm_s390_floating_interrupt_legacy(irq);
}
static void kvm_s390_inject_service(S390FLICState *fs, uint32_t parm)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_SERVICE,
.u.ext.ext_params = parm,
};
kvm_s390_inject_flic(fs, &irq);
}
static void kvm_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id,
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word)
{
struct kvm_s390_irq irq = {
.u.io.subchannel_id = subchannel_id,
.u.io.subchannel_nr = subchannel_nr,
.u.io.io_int_parm = io_int_parm,
.u.io.io_int_word = io_int_word,
};
if (io_int_word & IO_INT_WORD_AI) {
irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
} else {
irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
(subchannel_id & 0x0006),
subchannel_nr);
}
kvm_s390_inject_flic(fs, &irq);
}
static void kvm_s390_inject_crw_mchk(S390FLICState *fs)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_MCHK,
.u.mchk.cr14 = CR14_CHANNEL_REPORT_SC,
.u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP,
};
kvm_s390_inject_flic(fs, &irq);
}
static int kvm_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
uint16_t subchannel_nr)
{
KVMS390FLICState *flic = KVM_S390_FLIC(fs);
KVMS390FLICState *flic = s390_get_kvm_flic(fs);
int rc;
uint32_t sid = subchannel_id << 16 | subchannel_nr;
struct kvm_device_attr attr = {
@ -154,7 +203,7 @@ static int kvm_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id,
static int kvm_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
uint16_t mode)
{
KVMS390FLICState *flic = KVM_S390_FLIC(fs);
KVMS390FLICState *flic = s390_get_kvm_flic(fs);
struct kvm_s390_ais_req req = {
.isc = isc,
.mode = mode,
@ -174,7 +223,7 @@ static int kvm_s390_modify_ais_mode(S390FLICState *fs, uint8_t isc,
static int kvm_s390_inject_airq(S390FLICState *fs, uint8_t type,
uint8_t isc, uint8_t flags)
{
KVMS390FLICState *flic = KVM_S390_FLIC(fs);
KVMS390FLICState *flic = s390_get_kvm_flic(fs);
uint32_t id = css_get_adapter_id(type, isc);
struct kvm_device_attr attr = {
.group = KVM_DEV_FLIC_AIRQ_INJECT,
@ -263,7 +312,7 @@ static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
.group = KVM_DEV_FLIC_ADAPTER_MODIFY,
.addr = (uint64_t)&req,
};
KVMS390FLICState *flic = KVM_S390_FLIC(fs);
KVMS390FLICState *flic = s390_get_kvm_flic(fs);
int r;
if (!kvm_gsi_routing_enabled()) {
@ -614,6 +663,9 @@ static void kvm_s390_flic_class_init(ObjectClass *oc, void *data)
fsc->clear_io_irq = kvm_s390_clear_io_flic;
fsc->modify_ais_mode = kvm_s390_modify_ais_mode;
fsc->inject_airq = kvm_s390_inject_airq;
fsc->inject_service = kvm_s390_inject_service;
fsc->inject_io = kvm_s390_inject_io;
fsc->inject_crw_mchk = kvm_s390_inject_crw_mchk;
}
static const TypeInfo kvm_s390_flic_info = {

View File

@ -439,7 +439,7 @@ static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
bool do_map)
{
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
}
@ -520,7 +520,7 @@ void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
int ret, isc;
IoAdapter *adapter;
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
/*
* Disallow multiple registrations for the same device type.
@ -566,7 +566,7 @@ static void css_clear_io_interrupt(uint16_t subchannel_id,
Error *err = NULL;
static bool no_clear_irq;
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
int r;
if (unlikely(no_clear_irq)) {
@ -640,7 +640,7 @@ void css_conditional_io_interrupt(SubchDev *sch)
int css_do_sic(CPUS390XState *env, uint8_t isc, uint16_t mode)
{
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
int r;
if (env->psw.mask & PSW_MASK_PSTATE) {
@ -666,7 +666,7 @@ out:
void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc)
{
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
IoAdapter *adapter = channel_subsys.io_adapters[type][isc];

View File

@ -293,10 +293,10 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
ef->receive_mask = be32_to_cpu(tmp_mask);
/* return the SCLP's capability masks to the guest */
tmp_mask = cpu_to_be32(get_host_send_mask(ef));
tmp_mask = cpu_to_be32(get_host_receive_mask(ef));
copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));
tmp_mask = cpu_to_be32(get_host_receive_mask(ef));
tmp_mask = cpu_to_be32(get_host_send_mask(ef));
copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));

View File

@ -309,49 +309,187 @@ static uint64_t get_st_pto(uint64_t entry)
: 0;
}
static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
uint64_t guest_dma_address)
static bool rt_entry_isvalid(uint64_t entry)
{
uint64_t sto_a, pto_a, px_a;
uint64_t sto, pto, pte;
uint32_t rtx, sx, px;
return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
}
rtx = calc_rtx(guest_dma_address);
sx = calc_sx(guest_dma_address);
px = calc_px(guest_dma_address);
static bool pt_entry_isvalid(uint64_t entry)
{
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
sto_a = guest_iota + rtx * sizeof(uint64_t);
sto = address_space_ldq(&address_space_memory, sto_a,
MEMTXATTRS_UNSPECIFIED, NULL);
sto = get_rt_sto(sto);
if (!sto) {
pte = 0;
static bool entry_isprotected(uint64_t entry)
{
return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
}
/* ett is expected table type, -1 page table, 0 segment table, 1 region table */
static uint64_t get_table_index(uint64_t iova, int8_t ett)
{
switch (ett) {
case ZPCI_ETT_PT:
return calc_px(iova);
case ZPCI_ETT_ST:
return calc_sx(iova);
case ZPCI_ETT_RT:
return calc_rtx(iova);
}
return -1;
}
static bool entry_isvalid(uint64_t entry, int8_t ett)
{
switch (ett) {
case ZPCI_ETT_PT:
return pt_entry_isvalid(entry);
case ZPCI_ETT_ST:
case ZPCI_ETT_RT:
return rt_entry_isvalid(entry);
}
return false;
}
/* Return true if address translation is done */
static bool translate_iscomplete(uint64_t entry, int8_t ett)
{
switch (ett) {
case 0:
return (entry & ZPCI_TABLE_FC) ? true : false;
case 1:
return false;
}
return true;
}
static uint64_t get_frame_size(int8_t ett)
{
switch (ett) {
case ZPCI_ETT_PT:
return 1ULL << 12;
case ZPCI_ETT_ST:
return 1ULL << 20;
case ZPCI_ETT_RT:
return 1ULL << 31;
}
return 0;
}
static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
{
switch (ett) {
case ZPCI_ETT_PT:
return entry & ZPCI_PTE_ADDR_MASK;
case ZPCI_ETT_ST:
return get_st_pto(entry);
case ZPCI_ETT_RT:
return get_rt_sto(entry);
}
return 0;
}
/**
* table_translate: do translation within one table and return the following
* table origin
*
* @entry: the entry being translated, the result is stored in this.
* @to: the address of table origin.
* @ett: expected table type, 1 region table, 0 segment table and -1 page table.
* @error: error code
*/
static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett,
uint16_t *error)
{
uint64_t tx, te, nto = 0;
uint16_t err = 0;
tx = get_table_index(entry->iova, ett);
te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t),
MEMTXATTRS_UNSPECIFIED, NULL);
if (!te) {
err = ERR_EVENT_INVALTE;
goto out;
}
pto_a = sto + sx * sizeof(uint64_t);
pto = address_space_ldq(&address_space_memory, pto_a,
MEMTXATTRS_UNSPECIFIED, NULL);
pto = get_st_pto(pto);
if (!pto) {
pte = 0;
if (!entry_isvalid(te, ett)) {
entry->perm &= IOMMU_NONE;
goto out;
}
px_a = pto + px * sizeof(uint64_t);
pte = address_space_ldq(&address_space_memory, px_a,
MEMTXATTRS_UNSPECIFIED, NULL);
if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX
|| te & ZPCI_TABLE_OFFSET_MASK)) {
err = ERR_EVENT_INVALTL;
goto out;
}
nto = get_next_table_origin(te, ett);
if (!nto) {
err = ERR_EVENT_TT;
goto out;
}
if (entry_isprotected(te)) {
entry->perm &= IOMMU_RO;
} else {
entry->perm &= IOMMU_RW;
}
if (translate_iscomplete(te, ett)) {
switch (ett) {
case ZPCI_ETT_PT:
entry->translated_addr = te & ZPCI_PTE_ADDR_MASK;
break;
case ZPCI_ETT_ST:
entry->translated_addr = (te & ZPCI_SFAA_MASK) |
(entry->iova & ~ZPCI_SFAA_MASK);
break;
}
nto = 0;
}
out:
return pte;
if (err) {
entry->perm = IOMMU_NONE;
*error = err;
}
entry->len = get_frame_size(ett);
return nto;
}
uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
S390IOTLBEntry *entry)
{
uint64_t to = s390_pci_get_table_origin(g_iota);
int8_t ett = 1;
uint16_t error = 0;
entry->iova = addr & PAGE_MASK;
entry->translated_addr = 0;
entry->perm = IOMMU_RW;
if (entry_isprotected(g_iota)) {
entry->perm &= IOMMU_RO;
}
while (to) {
to = table_translate(entry, to, ett--, &error);
}
return error;
}
static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
IOMMUAccessFlags flag)
{
uint64_t pte;
uint32_t flags;
S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
S390IOTLBEntry *entry;
uint64_t iova = addr & PAGE_MASK;
uint16_t error = 0;
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = 0,
@ -374,26 +512,31 @@ static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
if (addr < iommu->pba || addr > iommu->pal) {
return ret;
error = ERR_EVENT_OORANGE;
goto err;
}
pte = s390_guest_io_table_walk(s390_pci_get_table_origin(iommu->g_iota),
addr);
if (!pte) {
return ret;
}
flags = pte & ZPCI_PTE_FLAG_MASK;
ret.iova = addr;
ret.translated_addr = pte & ZPCI_PTE_ADDR_MASK;
ret.addr_mask = 0xfff;
if (flags & ZPCI_PTE_INVALID) {
ret.perm = IOMMU_NONE;
entry = g_hash_table_lookup(iommu->iotlb, &iova);
if (entry) {
ret.iova = entry->iova;
ret.translated_addr = entry->translated_addr;
ret.addr_mask = entry->len - 1;
ret.perm = entry->perm;
} else {
ret.perm = IOMMU_RW;
ret.iova = iova;
ret.addr_mask = ~PAGE_MASK;
ret.perm = IOMMU_NONE;
}
if (flag != IOMMU_NONE && !(flag & ret.perm)) {
error = ERR_EVENT_TPROTE;
}
err:
if (error) {
iommu->pbdev->state = ZPCI_FS_ERROR;
s390_pci_generate_error_event(error, iommu->pbdev->fh,
iommu->pbdev->fid, addr, 0);
}
return ret;
}
@ -435,6 +578,8 @@ static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
PCI_FUNC(devfn));
memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
address_space_init(&iommu->as, &iommu->mr, as_name);
iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal,
NULL, g_free);
table->iommu[PCI_SLOT(devfn)] = iommu;
g_free(mr_name);
@ -524,6 +669,7 @@ void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
{
iommu->enabled = false;
g_hash_table_remove_all(iommu->iotlb);
memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
object_unparent(OBJECT(&iommu->iommu_mr));
}
@ -539,6 +685,7 @@ static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
}
table->iommu[PCI_SLOT(devfn)] = NULL;
g_hash_table_destroy(iommu->iotlb);
address_space_destroy(&iommu->as);
object_unparent(OBJECT(&iommu->mr));
object_unparent(OBJECT(iommu));

View File

@ -148,6 +148,8 @@ enum ZpciIoatDtype {
#define ZPCI_STE_FLAG_MASK 0x7ffULL
#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
#define ZPCI_SFAA_MASK (~((1ULL << 20) - 1))
/* I/O Page tables */
#define ZPCI_PTE_VALID_MASK 0x400
#define ZPCI_PTE_INVALID 0x400
@ -165,10 +167,15 @@ enum ZpciIoatDtype {
#define ZPCI_TABLE_INVALID 0x20
#define ZPCI_TABLE_PROTECTED 0x200
#define ZPCI_TABLE_UNPROTECTED 0x000
#define ZPCI_TABLE_FC 0x400
#define ZPCI_TABLE_VALID_MASK 0x20
#define ZPCI_TABLE_PROT_MASK 0x200
#define ZPCI_ETT_RT 1
#define ZPCI_ETT_ST 0
#define ZPCI_ETT_PT -1
/* PCI Function States
*
* reserved: default; device has just been plugged or is in progress of being
@ -253,6 +260,13 @@ typedef struct S390MsixInfo {
uint32_t pba_offset;
} S390MsixInfo;
typedef struct S390IOTLBEntry {
uint64_t iova;
uint64_t translated_addr;
uint64_t len;
uint64_t perm;
} S390IOTLBEntry;
typedef struct S390PCIBusDevice S390PCIBusDevice;
typedef struct S390PCIIOMMU {
Object parent_obj;
@ -264,6 +278,7 @@ typedef struct S390PCIIOMMU {
uint64_t g_iota;
uint64_t pba;
uint64_t pal;
GHashTable *iotlb;
} S390PCIIOMMU;
typedef struct S390PCIIOMMUTable {
@ -320,6 +335,8 @@ void s390_pci_iommu_enable(S390PCIIOMMU *iommu);
void s390_pci_iommu_disable(S390PCIIOMMU *iommu);
void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
uint64_t faddr, uint32_t e);
uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
S390IOTLBEntry *entry);
S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx);
S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh);
S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid);

View File

@ -571,27 +571,65 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
return 0;
}
static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry)
{
S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
IOMMUTLBEntry notify = {
.target_as = &address_space_memory,
.iova = entry->iova,
.translated_addr = entry->translated_addr,
.perm = entry->perm,
.addr_mask = ~PAGE_MASK,
};
if (entry->perm == IOMMU_NONE) {
if (!cache) {
return;
}
g_hash_table_remove(iommu->iotlb, &entry->iova);
} else {
if (cache) {
if (cache->perm == entry->perm &&
cache->translated_addr == entry->translated_addr) {
return;
}
notify.perm = IOMMU_NONE;
memory_region_notify_iommu(&iommu->iommu_mr, notify);
notify.perm = entry->perm;
}
cache = g_new(S390IOTLBEntry, 1);
cache->iova = entry->iova;
cache->translated_addr = entry->translated_addr;
cache->len = PAGE_SIZE;
cache->perm = entry->perm;
g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
}
memory_region_notify_iommu(&iommu->iommu_mr, notify);
}
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
uint16_t error = 0;
S390PCIBusDevice *pbdev;
S390PCIIOMMU *iommu;
S390IOTLBEntry entry;
hwaddr start, end;
IOMMUTLBEntry entry;
IOMMUMemoryRegion *iommu_mr;
IOMMUMemoryRegionClass *imrc;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
goto out;
return 0;
}
if (r2 & 0x1) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
goto out;
return 0;
}
fh = env->regs[r1] >> 32;
@ -602,7 +640,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
if (!pbdev) {
DPRINTF("rpcit no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
goto out;
return 0;
}
switch (pbdev->state) {
@ -622,44 +660,37 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
iommu = pbdev->iommu;
if (!iommu->g_iota) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid,
start, 0);
goto out;
error = ERR_EVENT_INVALAS;
goto err;
}
if (end < iommu->pba || start > iommu->pal) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid,
start, 0);
goto out;
error = ERR_EVENT_OORANGE;
goto err;
}
iommu_mr = &iommu->iommu_mr;
imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
while (start < end) {
entry = imrc->translate(iommu_mr, start, IOMMU_NONE);
if (!entry.translated_addr) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid,
start, ERR_EVENT_Q_BIT);
goto out;
error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
if (error) {
break;
}
memory_region_notify_iommu(iommu_mr, entry);
start += entry.addr_mask + 1;
start += entry.len;
while (entry.iova < start && entry.iova < end) {
s390_pci_update_iotlb(iommu, &entry);
entry.iova += PAGE_SIZE;
entry.translated_addr += PAGE_SIZE;
}
}
err:
if (error) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
} else {
setcc(cpu, ZPCI_PCI_LS_OK);
}
setcc(cpu, ZPCI_PCI_LS_OK);
out:
return 0;
}
@ -834,6 +865,8 @@ static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib,
uint8_t dt = (g_iota >> 2) & 0x7;
uint8_t t = (g_iota >> 11) & 0x1;
pba &= ~0xfff;
pal |= 0xfff;
if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return -EINVAL;

View File

@ -78,10 +78,6 @@ static void s390_init_cpus(MachineState *machine)
MachineClass *mc = MACHINE_GET_CLASS(machine);
int i;
if (tcg_enabled() && max_cpus > 1) {
error_report("WARNING: SMP support on s390x is experimental!");
}
/* initialize possible_cpus */
mc->possible_cpu_arch_ids(machine);

View File

@ -1111,7 +1111,7 @@ static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
int ret;
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
ret = virtio_ccw_get_mappings(dev);
if (ret) {
@ -1129,7 +1129,7 @@ static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
{
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
S390FLICStateClass *fsc = s390_get_flic_class(fs);
fsc->release_adapter_routes(fs, &dev->routes);
}

View File

@ -16,6 +16,7 @@
#include "hw/sysbus.h"
#include "hw/s390x/adapter.h"
#include "hw/virtio/virtio.h"
#include "qemu/queue.h"
/*
* Reserve enough gsis to accommodate all virtio devices.
@ -66,6 +67,11 @@ typedef struct S390FLICStateClass {
int (*modify_ais_mode)(S390FLICState *fs, uint8_t isc, uint16_t mode);
int (*inject_airq)(S390FLICState *fs, uint8_t type, uint8_t isc,
uint8_t flags);
void (*inject_service)(S390FLICState *fs, uint32_t parm);
void (*inject_io)(S390FLICState *fs, uint16_t subchannel_id,
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word);
void (*inject_crw_mchk)(S390FLICState *fs);
} S390FLICStateClass;
#define TYPE_KVM_S390_FLIC "s390-flic-kvm"
@ -80,24 +86,57 @@ typedef struct S390FLICStateClass {
#define SIC_IRQ_MODE_SINGLE 1
#define AIS_MODE_MASK(isc) (0x80 >> isc)
#define ISC_TO_PENDING_IO(_isc) (0x80 >> (_isc))
#define CR6_TO_PENDING_IO(_cr6) (((_cr6) >> 24) & 0xff)
/* organize the ISC bits so that the macros above work */
#define FLIC_PENDING_IO_ISC7 (1 << 0)
#define FLIC_PENDING_IO_ISC6 (1 << 1)
#define FLIC_PENDING_IO_ISC5 (1 << 2)
#define FLIC_PENDING_IO_ISC4 (1 << 3)
#define FLIC_PENDING_IO_ISC3 (1 << 4)
#define FLIC_PENDING_IO_ISC2 (1 << 5)
#define FLIC_PENDING_IO_ISC1 (1 << 6)
#define FLIC_PENDING_IO_ISC0 (1 << 7)
#define FLIC_PENDING_SERVICE (1 << 8)
#define FLIC_PENDING_MCHK_CR (1 << 9)
#define FLIC_PENDING_IO (FLIC_PENDING_IO_ISC0 | FLIC_PENDING_IO_ISC1 | \
FLIC_PENDING_IO_ISC2 | FLIC_PENDING_IO_ISC3 | \
FLIC_PENDING_IO_ISC4 | FLIC_PENDING_IO_ISC5 | \
FLIC_PENDING_IO_ISC6 | FLIC_PENDING_IO_ISC7)
typedef struct QEMUS390FlicIO {
uint16_t id;
uint16_t nr;
uint32_t parm;
uint32_t word;
QLIST_ENTRY(QEMUS390FlicIO) next;
} QEMUS390FlicIO;
typedef struct QEMUS390FLICState {
S390FLICState parent_obj;
uint32_t pending;
uint32_t service_param;
uint8_t simm;
uint8_t nimm;
QLIST_HEAD(, QEMUS390FlicIO) io[8];
} QEMUS390FLICState;
uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic);
QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic,
uint64_t cr6);
void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic);
bool qemu_s390_flic_has_service(QEMUS390FLICState *flic);
bool qemu_s390_flic_has_io(QEMUS390FLICState *fs, uint64_t cr6);
bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic);
bool qemu_s390_flic_has_any(QEMUS390FLICState *flic);
void s390_flic_init(void);
S390FLICState *s390_get_flic(void);
QEMUS390FLICState *s390_get_qemu_flic(S390FLICState *fs);
S390FLICStateClass *s390_get_flic_class(S390FLICState *fs);
bool ais_needed(void *opaque);
#ifdef CONFIG_KVM
DeviceState *s390_flic_kvm_create(void);
#else
static inline DeviceState *s390_flic_kvm_create(void)
{
return NULL;
}
#endif
#endif /* HW_S390_FLIC_H */

View File

@ -100,7 +100,6 @@ static void s390_cpu_initial_reset(CPUState *s)
{
S390CPU *cpu = S390_CPU(s);
CPUS390XState *env = &cpu->env;
int i;
s390_cpu_reset(s);
/* initial reset does not clear everything! */
@ -116,10 +115,6 @@ static void s390_cpu_initial_reset(CPUState *s)
env->gbea = 1;
env->pfault_token = -1UL;
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
env->mchk_index = -1;
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,
@ -137,7 +132,6 @@ static void s390_cpu_full_reset(CPUState *s)
S390CPU *cpu = S390_CPU(s);
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
CPUS390XState *env = &cpu->env;
int i;
scc->parent_reset(s);
cpu->env.sigp_order = 0;
@ -153,10 +147,6 @@ static void s390_cpu_full_reset(CPUState *s)
env->gbea = 1;
env->pfault_token = -1UL;
for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
env->io_index[i] = -1;
}
env->mchk_index = -1;
/* tininess for underflow is detected before rounding */
set_float_detect_tininess(float_tininess_before_rounding,

View File

@ -53,12 +53,6 @@
#define MMU_USER_IDX 0
#define MAX_IO_QUEUE 16
#define MAX_MCHK_QUEUE 16
#define PSW_MCHK_MASK 0x0004000000000000
#define PSW_IO_MASK 0x0200000000000000
#define S390_MAX_CPUS 248
typedef struct PSW {
@ -66,17 +60,6 @@ typedef struct PSW {
uint64_t addr;
} PSW;
typedef struct IOIntQueue {
uint16_t id;
uint16_t nr;
uint32_t parm;
uint32_t word;
} IOIntQueue;
typedef struct MchkQueue {
uint16_t type;
} MchkQueue;
struct CPUS390XState {
uint64_t regs[16]; /* GP registers */
/*
@ -122,15 +105,9 @@ struct CPUS390XState {
uint64_t cregs[16]; /* control registers */
IOIntQueue io_queue[MAX_IO_QUEUE][8];
MchkQueue mchk_queue[MAX_MCHK_QUEUE];
int pending_int;
uint32_t service_param;
uint16_t external_call_addr;
DECLARE_BITMAP(emergency_signals, S390_MAX_CPUS);
int io_index[8];
int mchk_index;
uint64_t ckc;
uint64_t cputm;
@ -409,9 +386,6 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
#define EXCP_IO 7 /* I/O interrupt */
#define EXCP_MCHK 8 /* machine check */
#define INTERRUPT_IO (1 << 0)
#define INTERRUPT_MCHK (1 << 1)
#define INTERRUPT_EXT_SERVICE (1 << 2)
#define INTERRUPT_EXT_CPU_TIMER (1 << 3)
#define INTERRUPT_EXT_CLOCK_COMPARATOR (1 << 4)
#define INTERRUPT_EXTERNAL_CALL (1 << 5)
@ -452,62 +426,66 @@ static inline void setcc(S390CPU *cpu, uint64_t cc)
}
/* STSI */
#define STSI_LEVEL_MASK 0x00000000f0000000ULL
#define STSI_LEVEL_CURRENT 0x0000000000000000ULL
#define STSI_LEVEL_1 0x0000000010000000ULL
#define STSI_LEVEL_2 0x0000000020000000ULL
#define STSI_LEVEL_3 0x0000000030000000ULL
#define STSI_R0_FC_MASK 0x00000000f0000000ULL
#define STSI_R0_FC_CURRENT 0x0000000000000000ULL
#define STSI_R0_FC_LEVEL_1 0x0000000010000000ULL
#define STSI_R0_FC_LEVEL_2 0x0000000020000000ULL
#define STSI_R0_FC_LEVEL_3 0x0000000030000000ULL
#define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
#define STSI_R0_SEL1_MASK 0x00000000000000ffULL
#define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
#define STSI_R1_SEL2_MASK 0x000000000000ffffULL
/* Basic Machine Configuration */
struct sysib_111 {
uint32_t res1[8];
typedef struct SysIB_111 {
uint8_t res1[32];
uint8_t manuf[16];
uint8_t type[4];
uint8_t res2[12];
uint8_t model[16];
uint8_t sequence[16];
uint8_t plant[4];
uint8_t res3[156];
};
uint8_t res3[3996];
} SysIB_111;
QEMU_BUILD_BUG_ON(sizeof(SysIB_111) != 4096);
/* Basic Machine CPU */
struct sysib_121 {
uint32_t res1[80];
typedef struct SysIB_121 {
uint8_t res1[80];
uint8_t sequence[16];
uint8_t plant[4];
uint8_t res2[2];
uint16_t cpu_addr;
uint8_t res3[152];
};
uint8_t res3[3992];
} SysIB_121;
QEMU_BUILD_BUG_ON(sizeof(SysIB_121) != 4096);
/* Basic Machine CPUs */
struct sysib_122 {
typedef struct SysIB_122 {
uint8_t res1[32];
uint32_t capability;
uint16_t total_cpus;
uint16_t active_cpus;
uint16_t conf_cpus;
uint16_t standby_cpus;
uint16_t reserved_cpus;
uint16_t adjustments[2026];
};
} SysIB_122;
QEMU_BUILD_BUG_ON(sizeof(SysIB_122) != 4096);
/* LPAR CPU */
struct sysib_221 {
uint32_t res1[80];
typedef struct SysIB_221 {
uint8_t res1[80];
uint8_t sequence[16];
uint8_t plant[4];
uint16_t cpu_id;
uint16_t cpu_addr;
uint8_t res3[152];
};
uint8_t res3[3992];
} SysIB_221;
QEMU_BUILD_BUG_ON(sizeof(SysIB_221) != 4096);
/* LPAR CPUs */
struct sysib_222 {
uint32_t res1[32];
typedef struct SysIB_222 {
uint8_t res1[32];
uint16_t lpar_num;
uint8_t res2;
uint8_t lcpuc;
@ -520,11 +498,12 @@ struct sysib_222 {
uint8_t res3[16];
uint16_t dedicated_cpus;
uint16_t shared_cpus;
uint8_t res4[180];
};
uint8_t res4[4020];
} SysIB_222;
QEMU_BUILD_BUG_ON(sizeof(SysIB_222) != 4096);
/* VM CPUs */
struct sysib_322 {
typedef struct SysIB_322 {
uint8_t res1[31];
uint8_t count;
struct {
@ -543,7 +522,18 @@ struct sysib_322 {
} vm[8];
uint8_t res4[1504];
uint8_t ext_names[8][256];
};
} SysIB_322;
QEMU_BUILD_BUG_ON(sizeof(SysIB_322) != 4096);
typedef union SysIB {
SysIB_111 sysib_111;
SysIB_121 sysib_121;
SysIB_122 sysib_122;
SysIB_221 sysib_221;
SysIB_222 sysib_222;
SysIB_322 sysib_322;
} SysIB;
QEMU_BUILD_BUG_ON(sizeof(SysIB) != 4096);
/* MMU defines */
#define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
@ -718,6 +708,10 @@ static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
return 0;
}
#endif /* CONFIG_USER_ONLY */
static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
{
return cpu->env.cpu_state;
}
/* cpu_models.c */
@ -752,7 +746,6 @@ void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
/* service interrupts are floating therefore we must not pass an cpustate */
void s390_sclp_extint(uint32_t parm);
/* mmu_helper.c */
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int len, bool is_write);

View File

@ -156,8 +156,12 @@ static const S390FeatDef s390_features[] = {
FEAT_INIT("ptff-qpc", S390_FEAT_TYPE_PTFF, 3, "PTFF Query Physical Clock"),
FEAT_INIT("ptff-qui", S390_FEAT_TYPE_PTFF, 4, "PTFF Query UTC Information"),
FEAT_INIT("ptff-qtou", S390_FEAT_TYPE_PTFF, 5, "PTFF Query TOD Offset User"),
FEAT_INIT("ptff-qsie", S390_FEAT_TYPE_PTFF, 10, "PTFF Query Steering Information Extended"),
FEAT_INIT("ptff-qtoue", S390_FEAT_TYPE_PTFF, 13, "PTFF Query TOD Offset User Extended"),
FEAT_INIT("ptff-sto", S390_FEAT_TYPE_PTFF, 65, "PTFF Set TOD Offset"),
FEAT_INIT("ptff-stou", S390_FEAT_TYPE_PTFF, 69, "PTFF Set TOD Offset User"),
FEAT_INIT("ptff-stoe", S390_FEAT_TYPE_PTFF, 73, "PTFF Set TOD Offset Extended"),
FEAT_INIT("ptff-stoue", S390_FEAT_TYPE_PTFF, 77, "PTFF Set TOD Offset User Extended"),
FEAT_INIT("kmac-dea", S390_FEAT_TYPE_KMAC, 1, "KMAC DEA"),
FEAT_INIT("kmac-tdea-128", S390_FEAT_TYPE_KMAC, 2, "KMAC TDEA-128"),
@ -445,6 +449,7 @@ static S390FeatGroupDef s390_feature_groups[] = {
FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"),
FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"),
FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"),
FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"),
FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"),
FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"),
FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"),

View File

@ -151,8 +151,12 @@ typedef enum {
S390_FEAT_PTFF_QPT,
S390_FEAT_PTFF_QUI,
S390_FEAT_PTFF_QTOU,
S390_FEAT_PTFF_QSIE,
S390_FEAT_PTFF_QTOUE,
S390_FEAT_PTFF_STO,
S390_FEAT_PTFF_STOU,
S390_FEAT_PTFF_STOE,
S390_FEAT_PTFF_STOUE,
/* KMAC */
S390_FEAT_KMAC_DEA,

View File

@ -23,6 +23,7 @@
#include "qapi/qmp/qbool.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/arch_init.h"
#include "hw/pci/pci.h"
#endif
#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \
@ -1271,6 +1272,11 @@ static void register_types(void)
/* init all bitmaps from gnerated data initially */
s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat);
#ifndef CONFIG_USER_ONLY
if (!pci_available) {
clear_bit(S390_FEAT_ZPCI, qemu_max_cpu_feat);
}
#endif
for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
s390_init_feat_bitmap(s390_cpu_defs[i].base_init,
s390_cpu_defs[i].base_feat);

View File

@ -29,6 +29,7 @@
#include "exec/address-spaces.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
#include "hw/s390x/s390_flic.h"
#endif
/* #define DEBUG_S390 */
@ -237,6 +238,7 @@ static void do_svc_interrupt(CPUS390XState *env)
static void do_ext_interrupt(CPUS390XState *env)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
S390CPU *cpu = s390_env_get_cpu(env);
uint64_t mask, addr;
uint16_t cpu_addr;
@ -273,17 +275,14 @@ static void do_ext_interrupt(CPUS390XState *env)
lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
lowcore->cpu_addr = 0;
env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
} else if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
} else if (qemu_s390_flic_has_service(flic) &&
(env->cregs[0] & CR0_SERVICE_SC)) {
/*
* FIXME: floating IRQs should be considered by all CPUs and
* shuld not get cleared by CPU reset.
*/
uint32_t param;
param = qemu_s390_flic_dequeue_service(flic);
lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
lowcore->ext_params = cpu_to_be32(env->service_param);
lowcore->ext_params = cpu_to_be32(param);
lowcore->cpu_addr = 0;
env->service_param = 0;
env->pending_int &= ~INTERRUPT_EXT_SERVICE;
} else {
g_assert_not_reached();
}
@ -303,95 +302,46 @@ static void do_ext_interrupt(CPUS390XState *env)
static void do_io_interrupt(CPUS390XState *env)
{
S390CPU *cpu = s390_env_get_cpu(env);
QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
uint64_t mask, addr;
QEMUS390FlicIO *io;
LowCore *lowcore;
IOIntQueue *q;
uint8_t isc;
int disable = 1;
int found = 0;
if (!(env->psw.mask & PSW_MASK_IO)) {
cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
}
g_assert(env->psw.mask & PSW_MASK_IO);
io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
g_assert(io);
for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
uint64_t isc_bits;
lowcore = cpu_map_lowcore(env);
if (env->io_index[isc] < 0) {
continue;
}
if (env->io_index[isc] >= MAX_IO_QUEUE) {
cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
isc, env->io_index[isc]);
}
lowcore->subchannel_id = cpu_to_be16(io->id);
lowcore->subchannel_nr = cpu_to_be16(io->nr);
lowcore->io_int_parm = cpu_to_be32(io->parm);
lowcore->io_int_word = cpu_to_be32(io->word);
lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->io_new_psw.mask);
addr = be64_to_cpu(lowcore->io_new_psw.addr);
q = &env->io_queue[env->io_index[isc]][isc];
isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
if (!(env->cregs[6] & isc_bits)) {
disable = 0;
continue;
}
if (!found) {
uint64_t mask, addr;
found = 1;
lowcore = cpu_map_lowcore(env);
lowcore->subchannel_id = cpu_to_be16(q->id);
lowcore->subchannel_nr = cpu_to_be16(q->nr);
lowcore->io_int_parm = cpu_to_be32(q->parm);
lowcore->io_int_word = cpu_to_be32(q->word);
lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->io_new_psw.mask);
addr = be64_to_cpu(lowcore->io_new_psw.addr);
cpu_unmap_lowcore(lowcore);
env->io_index[isc]--;
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
env->psw.mask, env->psw.addr);
load_psw(env, mask, addr);
}
if (env->io_index[isc] >= 0) {
disable = 0;
}
continue;
}
if (disable) {
env->pending_int &= ~INTERRUPT_IO;
}
cpu_unmap_lowcore(lowcore);
g_free(io);
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask,
env->psw.addr);
load_psw(env, mask, addr);
}
static void do_mchk_interrupt(CPUS390XState *env)
{
S390CPU *cpu = s390_env_get_cpu(env);
QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
uint64_t mask, addr;
LowCore *lowcore;
MchkQueue *q;
int i;
if (!(env->psw.mask & PSW_MASK_MCHECK)) {
cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
}
/* for now we only support channel report machine checks (floating) */
g_assert(env->psw.mask & PSW_MASK_MCHECK);
g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
}
q = &env->mchk_queue[env->mchk_index];
if (q->type != 1) {
/* Don't know how to handle this... */
cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
}
if (!(env->cregs[14] & (1 << 28))) {
/* CRW machine checks disabled */
return;
}
qemu_s390_flic_dequeue_crw_mchk(flic);
lowcore = cpu_map_lowcore(env);
@ -418,11 +368,6 @@ static void do_mchk_interrupt(CPUS390XState *env)
cpu_unmap_lowcore(lowcore);
env->mchk_index--;
if (env->mchk_index == -1) {
env->pending_int &= ~INTERRUPT_MCHK;
}
DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
env->psw.mask, env->psw.addr);
@ -431,12 +376,15 @@ static void do_mchk_interrupt(CPUS390XState *env)
void s390_cpu_do_interrupt(CPUState *cs)
{
QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
bool stopped = false;
qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
__func__, cs->exception_index, env->psw.addr);
try_deliver:
/* handle machine checks */
if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
cs->exception_index = EXCP_MCHK;
@ -479,20 +427,30 @@ void s390_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_STOP:
do_stop_interrupt(env);
stopped = true;
break;
}
/* WAIT PSW during interrupt injection or STOP interrupt */
if (cs->exception_index == EXCP_HLT) {
/* don't trigger a cpu_loop_exit(), use an interrupt instead */
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
if (cs->exception_index != -1 && !stopped) {
/* check if there are more pending interrupts to deliver */
cs->exception_index = -1;
goto try_deliver;
}
cs->exception_index = -1;
/* we might still have pending interrupts, but not deliverable */
if (!env->pending_int) {
if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
}
/* WAIT PSW during interrupt injection or STOP interrupt */
if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
/* don't trigger a cpu_loop_exit(), use an interrupt instead */
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
} else if (cs->halted) {
/* unhalt if we had a WAIT PSW somehwere in our injection chain */
s390_cpu_unhalt(cpu);
}
}
bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@ -510,6 +468,11 @@ bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
s390_cpu_do_interrupt(cs);
return true;
}
if (env->psw.mask & PSW_MASK_WAIT) {
/* Woken up because of a floating interrupt but it has already
* been delivered. Go back to sleep. */
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
}
}
return false;
}

View File

@ -59,6 +59,12 @@
S390_FEAT_PTFF_QTOU, \
S390_FEAT_PTFF_STOU
#define S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF \
S390_FEAT_PTFF_QSIE, \
S390_FEAT_PTFF_QTOUE, \
S390_FEAT_PTFF_STOE, \
S390_FEAT_PTFF_STOUE
#define S390_FEAT_GROUP_MSA \
S390_FEAT_MSA, \
S390_FEAT_KMAC_DEA, \
@ -219,6 +225,9 @@ static uint16_t group_TOD_CLOCK_STEERING[] = {
static uint16_t group_GEN13_PTFF[] = {
S390_FEAT_GROUP_GEN13_PTFF,
};
static uint16_t group_MULTIPLE_EPOCH_PTFF[] = {
S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
};
static uint16_t group_MSA[] = {
S390_FEAT_GROUP_MSA,
};
@ -466,6 +475,7 @@ static uint16_t full_GEN14_GA1[] = {
S390_FEAT_CMM_NT,
S390_FEAT_HPMA2,
S390_FEAT_SIE_KSS,
S390_FEAT_GROUP_MULTIPLE_EPOCH_PTFF,
};
/* Default features (in order of release)
@ -572,8 +582,10 @@ static uint16_t qemu_LATEST[] = {
S390_FEAT_STFLE_49,
S390_FEAT_LOCAL_TLB_CLEARING,
S390_FEAT_INTERLOCKED_ACCESS_2,
S390_FEAT_MSA_EXT_4,
S390_FEAT_ADAPTER_EVENT_NOTIFICATION,
S390_FEAT_ADAPTER_INT_SUPPRESSION,
S390_FEAT_MSA_EXT_3,
S390_FEAT_MSA_EXT_4,
};
/* add all new definitions before this point */
@ -582,6 +594,8 @@ static uint16_t qemu_MAX[] = {
S390_FEAT_STFLE_53,
/* generates a dependency warning, leave it out for now */
S390_FEAT_MSA_EXT_5,
/* only with CONFIG_PCI */
S390_FEAT_ZPCI,
};
/****** END FEATURE DEFS ******/
@ -664,6 +678,7 @@ static FeatGroupDefSpec FeatGroupDef[] = {
FEAT_GROUP_INITIALIZER(PLO),
FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING),
FEAT_GROUP_INITIALIZER(GEN13_PTFF),
FEAT_GROUP_INITIALIZER(MULTIPLE_EPOCH_PTFF),
FEAT_GROUP_INITIALIZER(MSA),
FEAT_GROUP_INITIALIZER(MSA_EXT_1),
FEAT_GROUP_INITIALIZER(MSA_EXT_2),

View File

@ -170,6 +170,16 @@ DEF_HELPER_4(schm, void, env, i64, i64, i64)
DEF_HELPER_3(ssch, void, env, i64, i64)
DEF_HELPER_2(stcrw, void, env, i64)
DEF_HELPER_3(stsch, void, env, i64, i64)
DEF_HELPER_2(tpi, i32, env, i64)
DEF_HELPER_3(tsch, void, env, i64, i64)
DEF_HELPER_2(chsc, void, env, i64)
DEF_HELPER_2(clp, void, env, i32)
DEF_HELPER_3(pcilg, void, env, i32, i32)
DEF_HELPER_3(pcistg, void, env, i32, i32)
DEF_HELPER_4(stpcifc, void, env, i32, i64, i32)
DEF_HELPER_3(sic, void, env, i64, i64)
DEF_HELPER_3(rpcit, void, env, i32, i32)
DEF_HELPER_5(pcistb, void, env, i32, i32, i64, i32)
DEF_HELPER_4(mpcifc, void, env, i32, i64, i32)
#endif

View File

@ -1063,8 +1063,22 @@
C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0)
C(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0)
C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0)
C(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0)
C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0)
/* ??? Not listed in PoO ninth edition, but there's a linux driver that
uses it: "A CHSC subchannel is usually present on LPAR only." */
C(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0)
/* zPCI Instructions */
/* None of these instructions are documented in the PoP, so this is all
based upon target/s390x/kvm.c and Linux code and likely incomplete */
C(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0)
C(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0)
C(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0)
C(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0)
C(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0)
C(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0)
C(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0)
C(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0)
#endif /* CONFIG_USER_ONLY */

View File

@ -278,11 +278,6 @@ static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
cpu_reset(cs);
}
static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
{
return cpu->env.cpu_state;
}
/* arch_dump.c */
int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,

View File

@ -15,6 +15,9 @@
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
#include "hw/s390x/ioinst.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/s390x/s390_flic.h"
#endif
/* Ensure to exit the TB after this call! */
void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
@ -55,17 +58,6 @@ void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
}
#if !defined(CONFIG_USER_ONLY)
static void cpu_inject_service(S390CPU *cpu, uint32_t param)
{
CPUS390XState *env = &cpu->env;
/* multiplexing is good enough for sclp - kvm does it internally as well*/
env->service_param |= param;
env->pending_int |= INTERRUPT_EXT_SERVICE;
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
void cpu_inject_clock_comparator(S390CPU *cpu)
{
CPUS390XState *env = &cpu->env;
@ -134,48 +126,6 @@ void cpu_inject_stop(S390CPU *cpu)
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
static void cpu_inject_io(S390CPU *cpu, uint16_t subchannel_id,
uint16_t subchannel_number,
uint32_t io_int_parm, uint32_t io_int_word)
{
CPUS390XState *env = &cpu->env;
int isc = IO_INT_WORD_ISC(io_int_word);
if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
/* ugh - can't queue anymore. Let's drop. */
return;
}
env->io_index[isc]++;
assert(env->io_index[isc] < MAX_IO_QUEUE);
env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
env->io_queue[env->io_index[isc]][isc].word = io_int_word;
env->pending_int |= INTERRUPT_IO;
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
static void cpu_inject_crw_mchk(S390CPU *cpu)
{
CPUS390XState *env = &cpu->env;
if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
/* ugh - can't queue anymore. Let's drop. */
return;
}
env->mchk_index++;
assert(env->mchk_index < MAX_MCHK_QUEUE);
env->mchk_queue[env->mchk_index].type = 1;
env->pending_int |= INTERRUPT_MCHK;
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
/*
* All of the following interrupts are floating, i.e. not per-vcpu.
* We just need a dummy cpustate in order to be able to inject in the
@ -183,53 +133,50 @@ static void cpu_inject_crw_mchk(S390CPU *cpu)
*/
void s390_sclp_extint(uint32_t parm)
{
if (kvm_enabled()) {
kvm_s390_service_interrupt(parm);
} else {
S390CPU *dummy_cpu = s390_cpu_addr2state(0);
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = s390_get_flic_class(fs);
cpu_inject_service(dummy_cpu, parm);
}
fsc->inject_service(fs, parm);
}
void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
uint32_t io_int_parm, uint32_t io_int_word)
{
if (kvm_enabled()) {
kvm_s390_io_interrupt(subchannel_id, subchannel_nr, io_int_parm,
io_int_word);
} else {
S390CPU *dummy_cpu = s390_cpu_addr2state(0);
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = s390_get_flic_class(fs);
cpu_inject_io(dummy_cpu, subchannel_id, subchannel_nr, io_int_parm,
io_int_word);
}
fsc->inject_io(fs, subchannel_id, subchannel_nr, io_int_parm, io_int_word);
}
void s390_crw_mchk(void)
{
if (kvm_enabled()) {
kvm_s390_crw_mchk();
} else {
S390CPU *dummy_cpu = s390_cpu_addr2state(0);
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = s390_get_flic_class(fs);
cpu_inject_crw_mchk(dummy_cpu);
}
fsc->inject_crw_mchk(fs);
}
bool s390_cpu_has_mcck_int(S390CPU *cpu)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
CPUS390XState *env = &cpu->env;
if (!(env->psw.mask & PSW_MASK_MCHECK)) {
return false;
}
return env->pending_int & INTERRUPT_MCHK;
/* for now we only support channel report machine checks (floating) */
if (qemu_s390_flic_has_crw_mchk(flic) &&
(env->cregs[14] & CR14_CHANNEL_REPORT_SC)) {
return true;
}
return false;
}
bool s390_cpu_has_ext_int(S390CPU *cpu)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
CPUS390XState *env = &cpu->env;
if (!(env->psw.mask & PSW_MASK_EXT)) {
@ -261,7 +208,7 @@ bool s390_cpu_has_ext_int(S390CPU *cpu)
return true;
}
if ((env->pending_int & INTERRUPT_EXT_SERVICE) &&
if (qemu_s390_flic_has_service(flic) &&
(env->cregs[0] & CR0_SERVICE_SC)) {
return true;
}
@ -271,13 +218,14 @@ bool s390_cpu_has_ext_int(S390CPU *cpu)
bool s390_cpu_has_io_int(S390CPU *cpu)
{
QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
CPUS390XState *env = &cpu->env;
if (!(env->psw.mask & PSW_MASK_IO)) {
return false;
}
return env->pending_int & INTERRUPT_IO;
return qemu_s390_flic_has_io(flic, env->cregs[6]);
}
bool s390_cpu_has_restart_int(S390CPU *cpu)

View File

@ -12,10 +12,6 @@
#include "cpu.h"
#include "kvm_s390x.h"
void kvm_s390_service_interrupt(uint32_t parm)
{
}
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
{
}
@ -30,15 +26,6 @@ void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
{
}
void kvm_s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
uint32_t io_int_parm, uint32_t io_int_word)
{
}
void kvm_s390_crw_mchk(void)
{
}
int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
{
return -ENOSYS;

View File

@ -1034,7 +1034,7 @@ void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
inject_vcpu_irq_legacy(cs, irq);
}
static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq)
{
struct kvm_s390_interrupt kvmint = {};
int r;
@ -1052,33 +1052,6 @@ static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
}
}
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
{
static bool use_flic = true;
int r;
if (use_flic) {
r = kvm_s390_inject_flic(irq);
if (r == -ENOSYS) {
use_flic = false;
}
if (!r) {
return;
}
}
__kvm_s390_floating_interrupt(irq);
}
void kvm_s390_service_interrupt(uint32_t parm)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_INT_SERVICE,
.u.ext.ext_params = parm,
};
kvm_s390_floating_interrupt(&irq);
}
void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
{
struct kvm_s390_irq irq = {
@ -1690,10 +1663,10 @@ static int handle_tsch(S390CPU *cpu)
* If an I/O interrupt had been dequeued, we have to reinject it.
*/
if (run->s390_tsch.dequeued) {
kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
run->s390_tsch.subchannel_nr,
run->s390_tsch.io_int_parm,
run->s390_tsch.io_int_word);
s390_io_interrupt(run->s390_tsch.subchannel_id,
run->s390_tsch.subchannel_nr,
run->s390_tsch.io_int_parm,
run->s390_tsch.io_int_word);
}
ret = 0;
}
@ -1702,7 +1675,7 @@ static int handle_tsch(S390CPU *cpu)
static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
{
struct sysib_322 sysib;
SysIB_322 sysib;
int del;
if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
@ -1840,37 +1813,6 @@ bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
return true;
}
void kvm_s390_io_interrupt(uint16_t subchannel_id,
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word)
{
struct kvm_s390_irq irq = {
.u.io.subchannel_id = subchannel_id,
.u.io.subchannel_nr = subchannel_nr,
.u.io.io_int_parm = io_int_parm,
.u.io.io_int_word = io_int_word,
};
if (io_int_word & IO_INT_WORD_AI) {
irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
} else {
irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
(subchannel_id & 0x0006),
subchannel_nr);
}
kvm_s390_floating_interrupt(&irq);
}
void kvm_s390_crw_mchk(void)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_MCHK,
.u.mchk.cr14 = CR14_CHANNEL_REPORT_SC,
.u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP,
};
kvm_s390_floating_interrupt(&irq);
}
void kvm_s390_enable_css_support(S390CPU *cpu)
{
int r;
@ -2279,6 +2221,14 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
return;
}
/* PTFF subfunctions might be indicated although kernel support missing */
if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) {
clear_bit(S390_FEAT_PTFF_QSIE, model->features);
clear_bit(S390_FEAT_PTFF_QTOUE, model->features);
clear_bit(S390_FEAT_PTFF_STOE, model->features);
clear_bit(S390_FEAT_PTFF_STOUE, model->features);
}
/* with cpu model support, CMM is only indicated if really available */
if (kvm_s390_cmma_available()) {
set_bit(S390_FEAT_CMM, model->features);

View File

@ -12,17 +12,12 @@
struct kvm_s390_irq;
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
void kvm_s390_service_interrupt(uint32_t parm);
void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq);
void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
int len, bool is_write);
void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code);
void kvm_s390_io_interrupt(uint16_t subchannel_id,
uint16_t subchannel_nr, uint32_t io_int_parm,
uint32_t io_int_word);
void kvm_s390_crw_mchk(void);
int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
@ -44,7 +39,4 @@ void kvm_s390_crypto_reset(void);
void kvm_s390_restart_interrupt(S390CPU *cpu);
void kvm_s390_stop_interrupt(S390CPU *cpu);
/* implemented outside of target/s390x/ */
int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
#endif /* KVM_S390X_H */

View File

@ -36,6 +36,10 @@
#include "hw/s390x/ebcdic.h"
#include "hw/s390x/s390-virtio-hcall.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/ioinst.h"
#include "hw/s390x/s390-pci-inst.h"
#include "hw/boards.h"
#endif
/* #define DEBUG_HELPER */
@ -194,132 +198,148 @@ void HELPER(spt)(CPUS390XState *env, uint64_t time)
}
/* Store System Information */
uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
uint64_t r0, uint64_t r1)
uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1)
{
const uintptr_t ra = GETPC();
const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK;
const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK;
const MachineState *ms = MACHINE(qdev_get_machine());
uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
S390CPU *cpu = s390_env_get_cpu(env);
int cc = 0;
int sel1, sel2;
SysIB sysib = { 0 };
int i, cc = 0;
if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
/* valid function code, invalid reserved bits */
s390_program_interrupt(env, PGM_SPECIFICATION, 4, GETPC());
if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {
/* invalid function code: no other checks are performed */
return 3;
}
sel1 = r0 & STSI_R0_SEL1_MASK;
sel2 = r1 & STSI_R1_SEL2_MASK;
if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
/* XXX: spec exception if sysib is not 4k-aligned */
if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) {
/* query the current level: no further checks are performed */
env->regs[0] = STSI_R0_FC_LEVEL_3;
return 0;
}
switch (r0 & STSI_LEVEL_MASK) {
case STSI_LEVEL_1:
if (a0 & ~TARGET_PAGE_MASK) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
/* count the cpus and split them into configured and reserved ones */
for (i = 0; i < ms->possible_cpus->len; i++) {
total_cpus++;
if (ms->possible_cpus->cpus[i].cpu) {
conf_cpus++;
} else {
reserved_cpus++;
}
}
/*
* In theory, we could report Level 1 / Level 2 as current. However,
* the Linux kernel will detect this as running under LPAR and assume
* that we have a sclp linemode console (which is always present on
* LPAR, but not the default for QEMU), therefore not displaying boot
* messages and making booting a Linux kernel under TCG harder.
*
* For now we fake the same SMP configuration on all levels.
*
* TODO: We could later make the level configurable via the machine
* and change defaults (linemode console) based on machine type
* and accelerator.
*/
switch (r0 & STSI_R0_FC_MASK) {
case STSI_R0_FC_LEVEL_1:
if ((sel1 == 1) && (sel2 == 1)) {
/* Basic Machine Configuration */
struct sysib_111 sysib;
char type[5] = {};
memset(&sysib, 0, sizeof(sysib));
ebcdic_put(sysib.manuf, "QEMU ", 16);
ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16);
/* same as machine type number in STORE CPU ID, but in EBCDIC */
snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type);
ebcdic_put(sysib.type, type, 4);
ebcdic_put(sysib.sysib_111.type, type, 4);
/* model number (not stored in STORE CPU ID for z/Architecure) */
ebcdic_put(sysib.model, "QEMU ", 16);
ebcdic_put(sysib.sequence, "QEMU ", 16);
ebcdic_put(sysib.plant, "QEMU", 4);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
ebcdic_put(sysib.sysib_111.model, "QEMU ", 16);
ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16);
ebcdic_put(sysib.sysib_111.plant, "QEMU", 4);
} else if ((sel1 == 2) && (sel2 == 1)) {
/* Basic Machine CPU */
struct sysib_121 sysib;
memset(&sysib, 0, sizeof(sysib));
/* XXX make different for different CPUs? */
ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
ebcdic_put(sysib.plant, "QEMU", 4);
stw_p(&sysib.cpu_addr, env->core_id);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16);
ebcdic_put(sysib.sysib_121.plant, "QEMU", 4);
sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id);
} else if ((sel1 == 2) && (sel2 == 2)) {
/* Basic Machine CPUs */
struct sysib_122 sysib;
memset(&sysib, 0, sizeof(sysib));
stl_p(&sysib.capability, 0x443afc29);
/* XXX change when SMP comes */
stw_p(&sysib.total_cpus, 1);
stw_p(&sysib.active_cpus, 1);
stw_p(&sysib.standby_cpus, 0);
stw_p(&sysib.reserved_cpus, 0);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
sysib.sysib_122.capability = cpu_to_be32(0x443afc29);
sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus);
sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus);
sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus);
} else {
cc = 3;
}
break;
case STSI_LEVEL_2:
{
if ((sel1 == 2) && (sel2 == 1)) {
/* LPAR CPU */
struct sysib_221 sysib;
memset(&sysib, 0, sizeof(sysib));
/* XXX make different for different CPUs? */
ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
ebcdic_put(sysib.plant, "QEMU", 4);
stw_p(&sysib.cpu_addr, env->core_id);
stw_p(&sysib.cpu_id, 0);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else if ((sel1 == 2) && (sel2 == 2)) {
/* LPAR CPUs */
struct sysib_222 sysib;
memset(&sysib, 0, sizeof(sysib));
stw_p(&sysib.lpar_num, 0);
sysib.lcpuc = 0;
/* XXX change when SMP comes */
stw_p(&sysib.total_cpus, 1);
stw_p(&sysib.conf_cpus, 1);
stw_p(&sysib.standby_cpus, 0);
stw_p(&sysib.reserved_cpus, 0);
ebcdic_put(sysib.name, "QEMU ", 8);
stl_p(&sysib.caf, 1000);
stw_p(&sysib.dedicated_cpus, 0);
stw_p(&sysib.shared_cpus, 0);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else {
cc = 3;
}
break;
case STSI_R0_FC_LEVEL_2:
if ((sel1 == 2) && (sel2 == 1)) {
/* LPAR CPU */
ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16);
ebcdic_put(sysib.sysib_221.plant, "QEMU", 4);
sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id);
} else if ((sel1 == 2) && (sel2 == 2)) {
/* LPAR CPUs */
sysib.sysib_222.lcpuc = 0x80; /* dedicated */
sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus);
sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus);
sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus);
ebcdic_put(sysib.sysib_222.name, "QEMU ", 8);
sysib.sysib_222.caf = cpu_to_be32(1000);
sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus);
} else {
cc = 3;
}
case STSI_LEVEL_3:
{
if ((sel1 == 2) && (sel2 == 2)) {
/* VM CPUs */
struct sysib_322 sysib;
break;
case STSI_R0_FC_LEVEL_3:
if ((sel1 == 2) && (sel2 == 2)) {
/* VM CPUs */
sysib.sysib_322.count = 1;
sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus);
sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus);
sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus);
sysib.sysib_322.vm[0].caf = cpu_to_be32(1000);
/* Linux kernel uses this to distinguish us from z/VM */
ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16);
sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */
memset(&sysib, 0, sizeof(sysib));
sysib.count = 1;
/* XXX change when SMP comes */
stw_p(&sysib.vm[0].total_cpus, 1);
stw_p(&sysib.vm[0].conf_cpus, 1);
stw_p(&sysib.vm[0].standby_cpus, 0);
stw_p(&sysib.vm[0].reserved_cpus, 0);
ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
stl_p(&sysib.vm[0].caf, 1000);
ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
/* If our VM has a name, use the real name */
if (qemu_name) {
memset(sysib.sysib_322.vm[0].name, 0x40,
sizeof(sysib.sysib_322.vm[0].name));
ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name,
MIN(sizeof(sysib.sysib_322.vm[0].name),
strlen(qemu_name)));
strncpy((char *)sysib.sysib_322.ext_names[0], qemu_name,
sizeof(sysib.sysib_322.ext_names[0]));
} else {
cc = 3;
ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8);
strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest");
}
break;
/* add the uuid */
memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid,
sizeof(sysib.sysib_322.vm[0].uuid));
} else {
cc = 3;
}
case STSI_LEVEL_CURRENT:
env->regs[0] = STSI_LEVEL_3;
break;
default:
cc = 3;
break;
}
if (cc == 0) {
if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) {
s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
return cc;
}
@ -429,6 +449,59 @@ void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
qemu_mutex_unlock_iothread();
}
uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr)
{
const uintptr_t ra = GETPC();
S390CPU *cpu = s390_env_get_cpu(env);
QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic());
QEMUS390FlicIO *io = NULL;
LowCore *lowcore;
if (addr & 0x3) {
s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
qemu_mutex_lock_iothread();
io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
if (!io) {
qemu_mutex_unlock_iothread();
return 0;
}
if (addr) {
struct {
uint16_t id;
uint16_t nr;
uint32_t parm;
} intc = {
.id = cpu_to_be16(io->id),
.nr = cpu_to_be16(io->nr),
.parm = cpu_to_be32(io->parm),
};
if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) {
/* writing failed, reinject and properly clean up */
s390_io_interrupt(io->id, io->nr, io->parm, io->word);
qemu_mutex_unlock_iothread();
g_free(io);
s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
} else {
/* no protection applies */
lowcore = cpu_map_lowcore(env);
lowcore->subchannel_id = cpu_to_be16(io->id);
lowcore->subchannel_nr = cpu_to_be16(io->nr);
lowcore->io_int_parm = cpu_to_be32(io->parm);
lowcore->io_int_word = cpu_to_be32(io->word);
cpu_unmap_lowcore(lowcore);
}
g_free(io);
qemu_mutex_unlock_iothread();
return 1;
}
void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
@ -560,3 +633,91 @@ uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1);
return count_bytes >= max_bytes ? 0 : 3;
}
#ifndef CONFIG_USER_ONLY
/*
* Note: we ignore any return code of the functions called for the pci
* instructions, as the only time they return !0 is when the stub is
* called, and in that case we didn't even offer the zpci facility.
* The only exception is SIC, where program checks need to be handled
* by the caller.
*/
void HELPER(clp)(CPUS390XState *env, uint32_t r2)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
clp_service_call(cpu, r2, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
pcilg_service_call(cpu, r1, r2, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
pcistg_service_call(cpu, r1, r2, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
uint32_t ar)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
stpcifc_service_call(cpu, r1, fiba, ar, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
int r;
qemu_mutex_lock_iothread();
r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff);
qemu_mutex_unlock_iothread();
/* css_do_sic() may actually return a PGM_xxx value to inject */
if (r) {
s390_program_interrupt(env, -r, 4, GETPC());
}
}
void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
rpcit_service_call(cpu, r1, r2, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3,
uint64_t gaddr, uint32_t ar)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC());
qemu_mutex_unlock_iothread();
}
void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba,
uint32_t ar)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
qemu_mutex_unlock_iothread();
}
#endif

View File

@ -4199,6 +4199,14 @@ static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_tpi(cc_op, cpu_env, o->addr1);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
@ -4777,6 +4785,106 @@ static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
static ExitStatus op_clp(DisasContext *s, DisasOps *o)
{
TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
check_privileged(s);
gen_helper_clp(cpu_env, r2);
tcg_temp_free_i32(r2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
check_privileged(s);
gen_helper_pcilg(cpu_env, r1, r2);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
check_privileged(s);
gen_helper_pcistg(cpu_env, r1, r2);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
check_privileged(s);
gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
tcg_temp_free_i32(ar);
tcg_temp_free_i32(r1);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_sic(DisasContext *s, DisasOps *o)
{
check_privileged(s);
gen_helper_sic(cpu_env, o->in1, o->in2);
return NO_EXIT;
}
static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
check_privileged(s);
gen_helper_rpcit(cpu_env, r1, r2);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
check_privileged(s);
gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
tcg_temp_free_i32(ar);
tcg_temp_free_i32(r1);
tcg_temp_free_i32(r3);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
{
TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
check_privileged(s);
gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
tcg_temp_free_i32(ar);
tcg_temp_free_i32(r1);
set_cc_static(s);
return NO_EXIT;
}
#endif
/* ====================================================================== */
/* The "Cc OUTput" generators. Given the generated output (and in some cases
the original inputs), update the various cc data structures in order to
@ -5708,6 +5816,8 @@ enum DisasInsnEnum {
#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
#define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */
#define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION
static const DisasInsn insn_info[] = {
#include "insn-data.def"