From 8f364ec918c177cb5e72602f263fe0061066eea0 Mon Sep 17 00:00:00 2001 From: Konstantin Nazarov Date: Tue, 20 Jul 2021 21:09:48 +0000 Subject: [PATCH] Add latest qemu-virgl patchset --- Patches/qemu-v04.diff | 63281 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63281 insertions(+) create mode 100644 Patches/qemu-v04.diff diff --git a/Patches/qemu-v04.diff b/Patches/qemu-v04.diff new file mode 100644 index 0000000..994c5ab --- /dev/null +++ b/Patches/qemu-v04.diff @@ -0,0 +1,63281 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index 3026b979b7..7cec39a891 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -297,6 +297,8 @@ M: Richard Henderson + M: David Hildenbrand + S: Maintained + F: target/s390x/ ++F: target/s390x/tcg ++F: target/s390x/cpu_models_*.[ch] + F: hw/s390x/ + F: disas/s390.c + F: tests/tcg/s390x/ +@@ -393,9 +395,7 @@ M: Halil Pasic + M: Cornelia Huck + M: Christian Borntraeger + S: Supported +-F: target/s390x/kvm.c +-F: target/s390x/kvm_s390x.h +-F: target/s390x/kvm-stub.c ++F: target/s390x/kvm/ + F: target/s390x/ioinst.[ch] + F: target/s390x/machine.c + F: target/s390x/sigp.c +@@ -432,6 +432,11 @@ F: accel/accel-*.c + F: accel/Makefile.objs + F: accel/stubs/Makefile.objs + ++Apple Silicon HVF CPUs ++M: Alexander Graf ++S: Maintained ++F: target/arm/hvf/ ++ + X86 HVF CPUs + M: Cameron Esfahani + M: Roman Bolshakov +@@ -1304,7 +1309,7 @@ S: Maintained + F: hw/ppc/prep.c + F: hw/ppc/prep_systemio.c + F: hw/ppc/rs6000_mc.c +-F: hw/pci-host/prep.[hc] ++F: hw/pci-host/raven.c + F: hw/isa/i82378.c + F: hw/isa/pc87312.c + F: hw/dma/i82374.c +@@ -1823,7 +1828,8 @@ F: include/hw/sd/sd* + F: hw/sd/core.c + F: hw/sd/sd* + F: hw/sd/ssi-sd.c +-F: tests/qtest/sd* ++F: tests/qtest/fuzz-sdcard-test.c ++F: tests/qtest/sdhci-test.c + + USB + M: Gerd Hoffmann +diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c +index d1691be989..63ec8a6f25 100644 +--- a/accel/hvf/hvf-accel-ops.c ++++ b/accel/hvf/hvf-accel-ops.c +@@ -60,6 +60,10 @@ + + HVFState *hvf_state; + ++#ifdef __aarch64__ ++#define HV_VM_DEFAULT NULL ++#endif ++ + /* Memory slots */ + + hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size) +@@ -365,17 +369,20 @@ static int hvf_init_vcpu(CPUState *cpu) + cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); + + /* init cpu signals */ +- sigset_t set; + struct sigaction sigact; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = dummy_signal; + sigaction(SIG_IPI, &sigact, NULL); + +- pthread_sigmask(SIG_BLOCK, NULL, &set); +- sigdelset(&set, SIG_IPI); ++ pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); ++ sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); + ++#ifdef __aarch64__ ++ r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); ++#else + r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); ++#endif + cpu->vcpu_dirty = 1; + assert_hvf_ok(r); + +@@ -446,11 +453,17 @@ static void hvf_start_vcpu_thread(CPUState *cpu) + cpu, QEMU_THREAD_JOINABLE); + } + ++__attribute__((weak)) void hvf_kick_vcpu_thread(CPUState *cpu) ++{ ++ cpus_kick_thread(cpu); ++} ++ + static void hvf_accel_ops_class_init(ObjectClass *oc, void *data) + { + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = hvf_start_vcpu_thread; ++ ops->kick_vcpu_thread = hvf_kick_vcpu_thread; + + ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; + ops->synchronize_post_init = hvf_cpu_synchronize_post_init; +diff --git a/audio/coreaudio.c b/audio/coreaudio.c +index d8a21d3e50..eb29be71d1 100644 +--- a/audio/coreaudio.c ++++ b/audio/coreaudio.c +@@ -356,7 +356,10 @@ static OSStatus audioDeviceIOProc( + static OSStatus init_out_device(coreaudioVoiceOut *core) + { + OSStatus status; ++ AudioDeviceID deviceID; + AudioValueRange frameRange; ++ UInt32 audioDevicePropertyBufferFrameSize; ++ AudioDeviceIOProcID ioprocid; + + AudioStreamBasicDescription streamBasicDescription = { + .mBitsPerChannel = core->hw.info.bits, +@@ -369,20 +372,19 @@ static OSStatus init_out_device(coreaudioVoiceOut *core) + .mSampleRate = core->hw.info.freq + }; + +- status = coreaudio_get_voice(&core->outputDeviceID); ++ status = coreaudio_get_voice(&deviceID); + if (status != kAudioHardwareNoError) { + coreaudio_playback_logerr (status, + "Could not get default output Device\n"); + return status; + } +- if (core->outputDeviceID == kAudioDeviceUnknown) { ++ if (deviceID == kAudioDeviceUnknown) { + dolog ("Could not initialize playback - Unknown Audiodevice\n"); + return status; + } + + /* get minimum and maximum buffer frame sizes */ +- status = coreaudio_get_framesizerange(core->outputDeviceID, +- &frameRange); ++ status = coreaudio_get_framesizerange(deviceID, &frameRange); + if (status == kAudioHardwareBadObjectError) { + return 0; + } +@@ -393,31 +395,31 @@ static OSStatus init_out_device(coreaudioVoiceOut *core) + } + + if (frameRange.mMinimum > core->frameSizeSetting) { +- core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; ++ audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum; + dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum); + } else if (frameRange.mMaximum < core->frameSizeSetting) { +- core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; ++ audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum; + dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum); + } else { +- core->audioDevicePropertyBufferFrameSize = core->frameSizeSetting; ++ audioDevicePropertyBufferFrameSize = core->frameSizeSetting; + } + + /* set Buffer Frame Size */ +- status = coreaudio_set_framesize(core->outputDeviceID, +- &core->audioDevicePropertyBufferFrameSize); ++ status = coreaudio_set_framesize(deviceID, ++ &audioDevicePropertyBufferFrameSize); + if (status == kAudioHardwareBadObjectError) { + return 0; + } + if (status != kAudioHardwareNoError) { + coreaudio_playback_logerr (status, + "Could not set device buffer frame size %" PRIu32 "\n", +- (uint32_t)core->audioDevicePropertyBufferFrameSize); ++ (uint32_t)audioDevicePropertyBufferFrameSize); + return status; + } + + /* get Buffer Frame Size */ +- status = coreaudio_get_framesize(core->outputDeviceID, +- &core->audioDevicePropertyBufferFrameSize); ++ status = coreaudio_get_framesize(deviceID, ++ &audioDevicePropertyBufferFrameSize); + if (status == kAudioHardwareBadObjectError) { + return 0; + } +@@ -426,11 +428,9 @@ static OSStatus init_out_device(coreaudioVoiceOut *core) + "Could not get device buffer frame size\n"); + return status; + } +- core->hw.samples = core->bufferCount * core->audioDevicePropertyBufferFrameSize; + + /* set Samplerate */ +- status = coreaudio_set_streamformat(core->outputDeviceID, +- &streamBasicDescription); ++ status = coreaudio_set_streamformat(deviceID, &streamBasicDescription); + if (status == kAudioHardwareBadObjectError) { + return 0; + } +@@ -438,7 +438,6 @@ static OSStatus init_out_device(coreaudioVoiceOut *core) + coreaudio_playback_logerr (status, + "Could not set samplerate %lf\n", + streamBasicDescription.mSampleRate); +- core->outputDeviceID = kAudioDeviceUnknown; + return status; + } + +@@ -452,20 +451,24 @@ static OSStatus init_out_device(coreaudioVoiceOut *core) + * Therefore, the specified callback must be designed to avoid a deadlock + * with the callers of AudioObjectGetPropertyData. + */ +- core->ioprocid = NULL; +- status = AudioDeviceCreateIOProcID(core->outputDeviceID, ++ ioprocid = NULL; ++ status = AudioDeviceCreateIOProcID(deviceID, + audioDeviceIOProc, + &core->hw, +- &core->ioprocid); ++ &ioprocid); + if (status == kAudioHardwareBadDeviceError) { + return 0; + } +- if (status != kAudioHardwareNoError || core->ioprocid == NULL) { ++ if (status != kAudioHardwareNoError || ioprocid == NULL) { + coreaudio_playback_logerr (status, "Could not set IOProc\n"); +- core->outputDeviceID = kAudioDeviceUnknown; + return status; + } + ++ core->outputDeviceID = deviceID; ++ core->audioDevicePropertyBufferFrameSize = audioDevicePropertyBufferFrameSize; ++ core->hw.samples = core->bufferCount * core->audioDevicePropertyBufferFrameSize; ++ core->ioprocid = ioprocid; ++ + return 0; + } + +@@ -551,7 +554,7 @@ static OSStatus handle_voice_change( + } + + status = init_out_device(core); +- if (!status) { ++ if (core->outputDeviceID) { + update_device_playback_state(core); + } + +diff --git a/block/file-posix.c b/block/file-posix.c +index cb9bffe047..6fbde4484d 100644 +--- a/block/file-posix.c ++++ b/block/file-posix.c +@@ -134,6 +134,14 @@ + #define RAW_LOCK_PERM_BASE 100 + #define RAW_LOCK_SHARED_BASE 200 + ++/* ++ * qemu_lock_fd_test always returns 0 when fd is not open for writing and ++ * exclusive is true on macOS 11.3.1. ++ */ ++#ifdef __APPLE__ ++#define RAW_LOCK_WRITES ++#endif ++ + typedef struct BDRVRawState { + int fd; + bool use_lock; +@@ -652,7 +660,11 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, + false); + + s->open_flags = open_flags; ++#ifdef RAW_LOCK_WRITES ++ raw_parse_flags(bdrv_flags, &s->open_flags, s->use_lock); ++#else + raw_parse_flags(bdrv_flags, &s->open_flags, false); ++#endif + + s->fd = -1; + fd = qemu_open(filename, s->open_flags, errp); +@@ -1018,6 +1030,11 @@ static int raw_reconfigure_getfd(BlockDriverState *bs, int flags, + bool has_writers = perm & + (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE); + int fcntl_flags = O_APPEND | O_NONBLOCK; ++#ifdef RAW_LOCK_WRITES ++ if (s->use_lock) { ++ has_writers = true; ++ } ++#endif + #ifdef O_NOATIME + fcntl_flags |= O_NOATIME; + #endif +diff --git a/block/nbd.c b/block/nbd.c +index 601fccc5ba..f6ff1c4fb4 100644 +--- a/block/nbd.c ++++ b/block/nbd.c +@@ -371,6 +371,9 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, + return -ECONNREFUSED; + } + ++ yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank, ++ bs); ++ + ret = nbd_handle_updated_info(s->bs, NULL); + if (ret < 0) { + /* +@@ -381,6 +384,8 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, + + nbd_send_request(s->ioc, &request); + ++ yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), ++ nbd_yank, bs); + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; + +@@ -390,9 +395,6 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, + qio_channel_set_blocking(s->ioc, false, NULL); + qio_channel_attach_aio_context(s->ioc, bdrv_get_aio_context(bs)); + +- yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank, +- bs); +- + /* successfully connected */ + s->state = NBD_CLIENT_CONNECTED; + qemu_co_queue_restart_all(&s->free_sema); +diff --git a/configure b/configure +index 85db248ac1..f711778086 100755 +--- a/configure ++++ b/configure +@@ -377,6 +377,7 @@ u2f="auto" + libusb="auto" + usb_redir="auto" + opengl="$default_feature" ++egl="no" + cpuid_h="no" + avx2_opt="$default_feature" + capstone="auto" +@@ -3380,18 +3381,7 @@ if $pkg_config gbm; then + fi + + if test "$opengl" != "no" ; then +- epoxy=no + if $pkg_config epoxy; then +- cat > $TMPC << EOF +-#include +-int main(void) { return 0; } +-EOF +- if compile_prog "" "" ; then +- epoxy=yes +- fi +- fi +- +- if test "$epoxy" = "yes" ; then + opengl_cflags="$($pkg_config --cflags epoxy)" + opengl_libs="$($pkg_config --libs epoxy)" + opengl=yes +@@ -3405,6 +3395,16 @@ EOF + fi + fi + ++if test "$opengl" = "yes"; then ++ cat > $TMPC << EOF ++#include ++int main(void) { return 0; } ++EOF ++ if compile_prog "" "" ; then ++ egl=yes ++ fi ++fi ++ + ########################################## + # libnuma probe + +@@ -3606,21 +3606,8 @@ fi + ########################################## + # For 'ust' backend, test if ust headers are present + if have_backend "ust"; then +- cat > $TMPC << EOF +-#include +-int main(void) { return 0; } +-EOF +- if compile_prog "" "-Wl,--no-as-needed -ldl" ; then +- if $pkg_config lttng-ust --exists; then +- lttng_ust_libs=$($pkg_config --libs lttng-ust) +- else +- lttng_ust_libs="-llttng-ust -ldl" +- fi +- if $pkg_config liburcu-bp --exists; then +- urcu_bp_libs=$($pkg_config --libs liburcu-bp) +- else +- urcu_bp_libs="-lurcu-bp" +- fi ++ if $pkg_config lttng-ust --exists; then ++ lttng_ust_libs=$($pkg_config --libs lttng-ust) + else + error_exit "Trace backend 'ust' missing lttng-ust header files" + fi +@@ -4671,6 +4658,9 @@ if test "$opengl" = "yes" ; then + echo "CONFIG_OPENGL=y" >> $config_host_mak + echo "OPENGL_CFLAGS=$opengl_cflags" >> $config_host_mak + echo "OPENGL_LIBS=$opengl_libs" >> $config_host_mak ++ if test "$egl" = "yes" ; then ++ echo "CONFIG_EGL=y" >> $config_host_mak ++ fi + fi + + if test "$gbm" = "yes" ; then +@@ -4773,7 +4763,6 @@ fi + if have_backend "ust"; then + echo "CONFIG_TRACE_UST=y" >> $config_host_mak + echo "LTTNG_UST_LIBS=$lttng_ust_libs" >> $config_host_mak +- echo "URCU_BP_LIBS=$urcu_bp_libs" >> $config_host_mak + fi + if have_backend "dtrace"; then + echo "CONFIG_TRACE_DTRACE=y" >> $config_host_mak +diff --git a/contrib/vhost-user-gpu/virgl.c b/contrib/vhost-user-gpu/virgl.c +index 3e45e1bd33..33930b1847 100644 +--- a/contrib/vhost-user-gpu/virgl.c ++++ b/contrib/vhost-user-gpu/virgl.c +@@ -323,7 +323,7 @@ virgl_cmd_set_scanout(VuGpu *g, + struct virtio_gpu_ctrl_command *cmd) + { + struct virtio_gpu_set_scanout ss; +- struct virgl_renderer_resource_info info; ++ struct virgl_renderer_texture_info info; + int ret; + + VUGPU_FILL_CMD(ss); +@@ -338,7 +338,7 @@ virgl_cmd_set_scanout(VuGpu *g, + memset(&info, 0, sizeof(info)); + + if (ss.resource_id && ss.r.width && ss.r.height) { +- ret = virgl_renderer_resource_get_info(ss.resource_id, &info); ++ ret = virgl_renderer_borrow_texture_for_scanout(ss.resource_id, &info); + if (ret == -1) { + g_critical("%s: illegal resource specified %d\n", + __func__, ss.resource_id); +diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst +index cfe1147879..1d8470eada 100644 +--- a/docs/tools/qemu-img.rst ++++ b/docs/tools/qemu-img.rst +@@ -593,13 +593,16 @@ Command description: + the ``start``, ``length``, ``offset`` fields; + it will also include other more specific information: + +- - whether the sectors contain actual data or not (boolean field ``data``; +- if false, the sectors are either unallocated or stored as optimized +- all-zero clusters); +- - whether the data is known to read as zero (boolean field ``zero``); +- - in order to make the output shorter, the target file is expressed as +- a ``depth``; for example, a depth of 2 refers to the backing file +- of the backing file of *FILENAME*. ++ - boolean field ``data``: true if the sectors contain actual data, ++ false if the sectors are either unallocated or stored as optimized ++ all-zero clusters ++ - boolean field ``zero``: true if the data is known to read as zero ++ - boolean field ``present``: true if the data belongs to the backing ++ chain, false if rebasing the backing chain onto a deeper file ++ would pick up data from the deeper file; ++ - integer field ``depth``: the depth within the backing chain at ++ which the data was resolved; for example, a depth of 2 refers to ++ the backing file of the backing file of *FILENAME*. + + In JSON format, the ``offset`` field is optional; it is absent in + cases where ``human`` format would omit the entry or exit with an error. +diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c +index 49df56cd14..f88d548164 100644 +--- a/hw/display/vhost-user-gpu.c ++++ b/hw/display/vhost-user-gpu.c +@@ -249,7 +249,7 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) + } + + con = g->parent_obj.scanout[m->scanout_id].con; +- if (!console_has_gl(con)) { ++ if (!console_has_gl()) { + error_report("console doesn't support GL!"); + vhost_user_gpu_unblock(g); + break; +diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c +index c8da4806e0..765eb08921 100644 +--- a/hw/display/virtio-gpu-base.c ++++ b/hw/display/virtio-gpu-base.c +@@ -79,6 +79,7 @@ static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) + + g->req_state[idx].x = info->xoff; + g->req_state[idx].y = info->yoff; ++ g->req_state[idx].refresh_rate = info->refresh_rate; + g->req_state[idx].width = info->width; + g->req_state[idx].height = info->height; + g->req_state[idx].width_mm = info->width_mm; +diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c +index 7ab93bf8c8..ef0d0b75aa 100644 +--- a/hw/display/virtio-gpu-gl.c ++++ b/hw/display/virtio-gpu-gl.c +@@ -90,17 +90,12 @@ static void virtio_gpu_gl_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) + + static void virtio_gpu_gl_reset(VirtIODevice *vdev) + { +- VirtIOGPU *g = VIRTIO_GPU(vdev); + VirtIOGPUGL *gl = VIRTIO_GPU_GL(vdev); + + virtio_gpu_reset(vdev); + + if (gl->renderer_inited) { +- if (g->parent_obj.renderer_blocked) { +- gl->renderer_reset = true; +- } else { +- virtio_gpu_virgl_reset(g); +- } ++ gl->renderer_reset = true; + } + } + +diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c +index 092c6dc380..99a93720b0 100644 +--- a/hw/display/virtio-gpu-virgl.c ++++ b/hw/display/virtio-gpu-virgl.c +@@ -140,12 +140,39 @@ static void virgl_cmd_resource_flush(VirtIOGPU *g, + } + } + ++static GLuint virgl_borrow_texture_for_scanout(uint32_t id, bool *y_0_top, ++ uint32_t *width, ++ uint32_t *height) ++{ ++ struct virgl_renderer_texture_info info; ++ int ret; ++ ++ memset(&info, 0, sizeof(info)); ++ ++ ret = virgl_renderer_borrow_texture_for_scanout(id, &info); ++ if (ret == -1) { ++ return 0; ++ } ++ ++ if (y_0_top) { ++ *y_0_top = info.flags & 1 /* FIXME: Y_0_TOP */; ++ } ++ ++ if (width) { ++ *width = info.width; ++ } ++ ++ if (height) { ++ *height = info.height; ++ } ++ ++ return info.tex_id; ++} ++ + static void virgl_cmd_set_scanout(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) + { + struct virtio_gpu_set_scanout ss; +- struct virgl_renderer_resource_info info; +- int ret; + + VIRTIO_GPU_FILL_CMD(ss); + trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, +@@ -159,24 +186,13 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, + } + g->parent_obj.enable = 1; + +- memset(&info, 0, sizeof(info)); +- + if (ss.resource_id && ss.r.width && ss.r.height) { +- ret = virgl_renderer_resource_get_info(ss.resource_id, &info); +- if (ret == -1) { +- qemu_log_mask(LOG_GUEST_ERROR, +- "%s: illegal resource specified %d\n", +- __func__, ss.resource_id); +- cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; +- return; +- } + qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con, + ss.r.width, ss.r.height); + virgl_renderer_force_ctx_0(); + dpy_gl_scanout_texture( +- g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, +- info.flags & 1 /* FIXME: Y_0_TOP */, +- info.width, info.height, ++ g->parent_obj.scanout[ss.scanout_id].con, ss.resource_id, ++ virgl_borrow_texture_for_scanout, + ss.r.x, ss.r.y, ss.r.width, ss.r.height); + } else { + dpy_gfx_replace_surface( +diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c +index 6b7f643951..274cd46f89 100644 +--- a/hw/display/virtio-gpu.c ++++ b/hw/display/virtio-gpu.c +@@ -217,6 +217,7 @@ virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, + .height_mm = b->req_state[scanout].height_mm, + .prefx = b->req_state[scanout].width, + .prefy = b->req_state[scanout].height, ++ .refresh_rate = b->req_state[scanout].refresh_rate, + }; + + edid->size = cpu_to_le32(sizeof(edid->edid)); +@@ -520,7 +521,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + scanout = &g->parent_obj.scanout[i]; + if (scanout->resource_id == res->resource_id && +- console_has_gl(scanout->con)) { ++ console_has_gl()) { + dpy_gl_update(scanout->con, 0, 0, scanout->width, + scanout->height); + return; +@@ -632,7 +633,7 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, + g->parent_obj.enable = 1; + + if (res->blob) { +- if (console_has_gl(scanout->con)) { ++ if (console_has_gl()) { + if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb)) { + virtio_gpu_update_scanout(g, scanout_id, res, r); + return; +@@ -645,7 +646,7 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, + } + + /* create a surface for this scanout */ +- if ((res->blob && !console_has_gl(scanout->con)) || ++ if ((res->blob && !console_has_gl()) || + !scanout->ds || + surface_data(scanout->ds) != data + fb->offset || + scanout->width != r->width || +diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c +index 838260b6ad..a53341ef67 100644 +--- a/hw/display/xenfb.c ++++ b/hw/display/xenfb.c +@@ -777,16 +777,24 @@ static void xenfb_update(void *opaque) + xenfb->up_fullscreen = 0; + } + +-static void xenfb_update_interval(void *opaque, uint64_t interval) ++static void xenfb_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) + { + struct XenFB *xenfb = opaque; ++ uint32_t refresh_rate; + + if (xenfb->feature_update) { + #ifdef XENFB_TYPE_REFRESH_PERIOD + if (xenfb_queue_full(xenfb)) { + return; + } +- xenfb_send_refresh_period(xenfb, interval); ++ ++ refresh_rate = info->refresh_rate; ++ if (!refresh_rate) { ++ refresh_rate = 75; ++ } ++ ++ /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ ++ xenfb_send_refresh_period(xenfb, 1000 * 1000 / refresh_rate); + #endif + } + } +@@ -983,5 +991,5 @@ struct XenDevOps xen_framebuffer_ops = { + static const GraphicHwOps xenfb_ops = { + .invalidate = xenfb_invalidate, + .gfx_update = xenfb_update, +- .update_interval = xenfb_update_interval, ++ .ui_info = xenfb_ui_info, + }; +diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c +index 929cfa3a68..efe5054182 100644 +--- a/hw/intc/s390_flic_kvm.c ++++ b/hw/intc/s390_flic_kvm.c +@@ -11,7 +11,7 @@ + */ + + #include "qemu/osdep.h" +-#include "kvm_s390x.h" ++#include "kvm/kvm_s390x.h" + #include + #include "qemu/error-report.h" + #include "qemu/module.h" +diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c +index 252c0a2664..45b954e46c 100644 +--- a/hw/net/dp8393x.c ++++ b/hw/net/dp8393x.c +@@ -85,6 +85,7 @@ static const char *reg_names[] = { + #define SONIC_MPT 0x2e + #define SONIC_MDT 0x2f + #define SONIC_DCR2 0x3f ++#define SONIC_REG_COUNT 0x40 + + #define SONIC_CR_HTX 0x0001 + #define SONIC_CR_TXP 0x0002 +@@ -157,12 +158,11 @@ struct dp8393xState { + MemoryRegion mmio; + + /* Registers */ +- uint8_t cam[16][6]; +- uint16_t regs[0x40]; ++ uint16_t cam[16][3]; ++ uint16_t regs[SONIC_REG_COUNT]; + + /* Temporaries */ + uint8_t tx_buffer[0x10000]; +- uint16_t data[12]; + int loopback_packet; + + /* Memory access */ +@@ -219,34 +219,48 @@ static uint32_t dp8393x_wt(dp8393xState *s) + return s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0]; + } + +-static uint16_t dp8393x_get(dp8393xState *s, int width, int offset) ++static uint16_t dp8393x_get(dp8393xState *s, hwaddr addr, int offset) + { ++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + uint16_t val; + +- if (s->big_endian) { +- val = be16_to_cpu(s->data[offset * width + width - 1]); ++ if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { ++ addr += offset << 2; ++ if (s->big_endian) { ++ val = address_space_ldl_be(&s->as, addr, attrs, NULL); ++ } else { ++ val = address_space_ldl_le(&s->as, addr, attrs, NULL); ++ } + } else { +- val = le16_to_cpu(s->data[offset * width]); ++ addr += offset << 1; ++ if (s->big_endian) { ++ val = address_space_lduw_be(&s->as, addr, attrs, NULL); ++ } else { ++ val = address_space_lduw_le(&s->as, addr, attrs, NULL); ++ } + } ++ + return val; + } + +-static void dp8393x_put(dp8393xState *s, int width, int offset, +- uint16_t val) ++static void dp8393x_put(dp8393xState *s, ++ hwaddr addr, int offset, uint16_t val) + { +- if (s->big_endian) { +- if (width == 2) { +- s->data[offset * 2] = 0; +- s->data[offset * 2 + 1] = cpu_to_be16(val); ++ const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; ++ ++ if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { ++ addr += offset << 2; ++ if (s->big_endian) { ++ address_space_stl_be(&s->as, addr, val, attrs, NULL); + } else { +- s->data[offset] = cpu_to_be16(val); ++ address_space_stl_le(&s->as, addr, val, attrs, NULL); + } + } else { +- if (width == 2) { +- s->data[offset * 2] = cpu_to_le16(val); +- s->data[offset * 2 + 1] = 0; ++ addr += offset << 1; ++ if (s->big_endian) { ++ address_space_stw_be(&s->as, addr, val, attrs, NULL); + } else { +- s->data[offset] = cpu_to_le16(val); ++ address_space_stw_le(&s->as, addr, val, attrs, NULL); + } + } + } +@@ -270,34 +284,28 @@ static void dp8393x_update_irq(dp8393xState *s) + static void dp8393x_do_load_cam(dp8393xState *s) + { + int width, size; +- uint16_t index = 0; ++ uint16_t index; + + width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; + size = sizeof(uint16_t) * 4 * width; + + while (s->regs[SONIC_CDC] & 0x1f) { + /* Fill current entry */ +- address_space_read(&s->as, dp8393x_cdp(s), +- MEMTXATTRS_UNSPECIFIED, s->data, size); +- s->cam[index][0] = dp8393x_get(s, width, 1) & 0xff; +- s->cam[index][1] = dp8393x_get(s, width, 1) >> 8; +- s->cam[index][2] = dp8393x_get(s, width, 2) & 0xff; +- s->cam[index][3] = dp8393x_get(s, width, 2) >> 8; +- s->cam[index][4] = dp8393x_get(s, width, 3) & 0xff; +- s->cam[index][5] = dp8393x_get(s, width, 3) >> 8; +- trace_dp8393x_load_cam(index, s->cam[index][0], s->cam[index][1], +- s->cam[index][2], s->cam[index][3], +- s->cam[index][4], s->cam[index][5]); ++ index = dp8393x_get(s, dp8393x_cdp(s), 0) & 0xf; ++ s->cam[index][0] = dp8393x_get(s, dp8393x_cdp(s), 1); ++ s->cam[index][1] = dp8393x_get(s, dp8393x_cdp(s), 2); ++ s->cam[index][2] = dp8393x_get(s, dp8393x_cdp(s), 3); ++ trace_dp8393x_load_cam(index, ++ s->cam[index][0] >> 8, s->cam[index][0] & 0xff, ++ s->cam[index][1] >> 8, s->cam[index][1] & 0xff, ++ s->cam[index][2] >> 8, s->cam[index][2] & 0xff); + /* Move to next entry */ + s->regs[SONIC_CDC]--; + s->regs[SONIC_CDP] += size; +- index++; + } + + /* Read CAM enable */ +- address_space_read(&s->as, dp8393x_cdp(s), +- MEMTXATTRS_UNSPECIFIED, s->data, size); +- s->regs[SONIC_CE] = dp8393x_get(s, width, 0); ++ s->regs[SONIC_CE] = dp8393x_get(s, dp8393x_cdp(s), 0); + trace_dp8393x_load_cam_done(s->regs[SONIC_CE]); + + /* Done */ +@@ -313,14 +321,12 @@ static void dp8393x_do_read_rra(dp8393xState *s) + /* Read memory */ + width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; + size = sizeof(uint16_t) * 4 * width; +- address_space_read(&s->as, dp8393x_rrp(s), +- MEMTXATTRS_UNSPECIFIED, s->data, size); + + /* Update SONIC registers */ +- s->regs[SONIC_CRBA0] = dp8393x_get(s, width, 0); +- s->regs[SONIC_CRBA1] = dp8393x_get(s, width, 1); +- s->regs[SONIC_RBWC0] = dp8393x_get(s, width, 2); +- s->regs[SONIC_RBWC1] = dp8393x_get(s, width, 3); ++ s->regs[SONIC_CRBA0] = dp8393x_get(s, dp8393x_rrp(s), 0); ++ s->regs[SONIC_CRBA1] = dp8393x_get(s, dp8393x_rrp(s), 1); ++ s->regs[SONIC_RBWC0] = dp8393x_get(s, dp8393x_rrp(s), 2); ++ s->regs[SONIC_RBWC1] = dp8393x_get(s, dp8393x_rrp(s), 3); + trace_dp8393x_read_rra_regs(s->regs[SONIC_CRBA0], s->regs[SONIC_CRBA1], + s->regs[SONIC_RBWC0], s->regs[SONIC_RBWC1]); + +@@ -416,28 +422,22 @@ static void dp8393x_do_receiver_disable(dp8393xState *s) + static void dp8393x_do_transmit_packets(dp8393xState *s) + { + NetClientState *nc = qemu_get_queue(s->nic); +- int width, size; + int tx_len, len; + uint16_t i; + +- width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1; +- + while (1) { + /* Read memory */ +- size = sizeof(uint16_t) * 6 * width; + s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA]; + trace_dp8393x_transmit_packet(dp8393x_ttda(s)); +- address_space_read(&s->as, dp8393x_ttda(s) + sizeof(uint16_t) * width, +- MEMTXATTRS_UNSPECIFIED, s->data, size); + tx_len = 0; + + /* Update registers */ +- s->regs[SONIC_TCR] = dp8393x_get(s, width, 0) & 0xf000; +- s->regs[SONIC_TPS] = dp8393x_get(s, width, 1); +- s->regs[SONIC_TFC] = dp8393x_get(s, width, 2); +- s->regs[SONIC_TSA0] = dp8393x_get(s, width, 3); +- s->regs[SONIC_TSA1] = dp8393x_get(s, width, 4); +- s->regs[SONIC_TFS] = dp8393x_get(s, width, 5); ++ s->regs[SONIC_TCR] = dp8393x_get(s, dp8393x_ttda(s), 1) & 0xf000; ++ s->regs[SONIC_TPS] = dp8393x_get(s, dp8393x_ttda(s), 2); ++ s->regs[SONIC_TFC] = dp8393x_get(s, dp8393x_ttda(s), 3); ++ s->regs[SONIC_TSA0] = dp8393x_get(s, dp8393x_ttda(s), 4); ++ s->regs[SONIC_TSA1] = dp8393x_get(s, dp8393x_ttda(s), 5); ++ s->regs[SONIC_TFS] = dp8393x_get(s, dp8393x_ttda(s), 6); + + /* Handle programmable interrupt */ + if (s->regs[SONIC_TCR] & SONIC_TCR_PINT) { +@@ -459,15 +459,12 @@ static void dp8393x_do_transmit_packets(dp8393xState *s) + i++; + if (i != s->regs[SONIC_TFC]) { + /* Read next fragment details */ +- size = sizeof(uint16_t) * 3 * width; +- address_space_read(&s->as, +- dp8393x_ttda(s) +- + sizeof(uint16_t) * width * (4 + 3 * i), +- MEMTXATTRS_UNSPECIFIED, s->data, +- size); +- s->regs[SONIC_TSA0] = dp8393x_get(s, width, 0); +- s->regs[SONIC_TSA1] = dp8393x_get(s, width, 1); +- s->regs[SONIC_TFS] = dp8393x_get(s, width, 2); ++ s->regs[SONIC_TSA0] = dp8393x_get(s, dp8393x_ttda(s), ++ 4 + 3 * i); ++ s->regs[SONIC_TSA1] = dp8393x_get(s, dp8393x_ttda(s), ++ 5 + 3 * i); ++ s->regs[SONIC_TFS] = dp8393x_get(s, dp8393x_ttda(s), ++ 6 + 3 * i); + } + } + +@@ -500,22 +497,12 @@ static void dp8393x_do_transmit_packets(dp8393xState *s) + s->regs[SONIC_TCR] |= SONIC_TCR_PTX; + + /* Write status */ +- dp8393x_put(s, width, 0, +- s->regs[SONIC_TCR] & 0x0fff); /* status */ +- size = sizeof(uint16_t) * width; +- address_space_write(&s->as, dp8393x_ttda(s), +- MEMTXATTRS_UNSPECIFIED, s->data, size); ++ dp8393x_put(s, dp8393x_ttda(s), 0, s->regs[SONIC_TCR] & 0x0fff); + + if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) { + /* Read footer of packet */ +- size = sizeof(uint16_t) * width; +- address_space_read(&s->as, +- dp8393x_ttda(s) +- + sizeof(uint16_t) * width +- * (4 + 3 * s->regs[SONIC_TFC]), +- MEMTXATTRS_UNSPECIFIED, s->data, +- size); +- s->regs[SONIC_CTDA] = dp8393x_get(s, width, 0); ++ s->regs[SONIC_CTDA] = dp8393x_get(s, dp8393x_ttda(s), ++ 4 + 3 * s->regs[SONIC_TFC]); + if (s->regs[SONIC_CTDA] & SONIC_DESC_EOL) { + /* EOL detected */ + break; +@@ -591,8 +578,7 @@ static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size) + case SONIC_CAP1: + case SONIC_CAP0: + if (s->regs[SONIC_CR] & SONIC_CR_RST) { +- val = s->cam[s->regs[SONIC_CEP] & 0xf][2 * (SONIC_CAP0 - reg) + 1] << 8; +- val |= s->cam[s->regs[SONIC_CEP] & 0xf][2 * (SONIC_CAP0 - reg)]; ++ val = s->cam[s->regs[SONIC_CEP] & 0xf][SONIC_CAP0 - reg]; + } + break; + /* All other registers have no special contraints */ +@@ -602,15 +588,14 @@ static uint64_t dp8393x_read(void *opaque, hwaddr addr, unsigned int size) + + trace_dp8393x_read(reg, reg_names[reg], val, size); + +- return s->big_endian ? val << 16 : val; ++ return val; + } + +-static void dp8393x_write(void *opaque, hwaddr addr, uint64_t data, ++static void dp8393x_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) + { + dp8393xState *s = opaque; + int reg = addr >> s->it_shift; +- uint32_t val = s->big_endian ? data >> 16 : data; + + trace_dp8393x_write(reg, reg_names[reg], val, size); + +@@ -691,11 +676,16 @@ static void dp8393x_write(void *opaque, hwaddr addr, uint64_t data, + } + } + ++/* ++ * Since .impl.max_access_size is effectively controlled by the it_shift ++ * property, leave it unspecified for now to allow the memory API to ++ * correctly zero extend the 16-bit register values to the access size up to and ++ * including it_shift. ++ */ + static const MemoryRegionOps dp8393x_ops = { + .read = dp8393x_read, + .write = dp8393x_write, +- .impl.min_access_size = 4, +- .impl.max_access_size = 4, ++ .impl.min_access_size = 2, + .endianness = DEVICE_NATIVE_ENDIAN, + }; + +@@ -764,7 +754,7 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + dp8393xState *s = qemu_get_nic_opaque(nc); + int packet_type; + uint32_t available, address; +- int width, rx_len, padded_len; ++ int rx_len, padded_len; + uint32_t checksum; + int size; + +@@ -777,10 +767,8 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + + rx_len = pkt_size + sizeof(checksum); + if (s->regs[SONIC_DCR] & SONIC_DCR_DW) { +- width = 2; + padded_len = ((rx_len - 1) | 3) + 1; + } else { +- width = 1; + padded_len = ((rx_len - 1) | 1) + 1; + } + +@@ -801,11 +789,7 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + /* Check for EOL */ + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* Are we still in resource exhaustion? */ +- size = sizeof(uint16_t) * 1 * width; +- address = dp8393x_crda(s) + sizeof(uint16_t) * 5 * width; +- address_space_read(&s->as, address, MEMTXATTRS_UNSPECIFIED, +- s->data, size); +- s->regs[SONIC_LLFA] = dp8393x_get(s, width, 0); ++ s->regs[SONIC_LLFA] = dp8393x_get(s, dp8393x_crda(s), 5); + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* Still EOL ; stop reception */ + return -1; +@@ -813,11 +797,7 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + /* Link has been updated by host */ + + /* Clear in_use */ +- size = sizeof(uint16_t) * width; +- address = dp8393x_crda(s) + sizeof(uint16_t) * 6 * width; +- dp8393x_put(s, width, 0, 0); +- address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED, +- (uint8_t *)s->data, size, 1); ++ dp8393x_put(s, dp8393x_crda(s), 6, 0x0000); + + /* Move to next descriptor */ + s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA]; +@@ -846,8 +826,8 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + /* Pad short packets to keep pointers aligned */ + if (rx_len < padded_len) { + size = padded_len - rx_len; +- address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED, +- (uint8_t *)"\xFF\xFF\xFF", size, 1); ++ address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED, ++ "\xFF\xFF\xFF", size); + address += size; + } + +@@ -871,32 +851,20 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf, + + /* Write status to memory */ + trace_dp8393x_receive_write_status(dp8393x_crda(s)); +- dp8393x_put(s, width, 0, s->regs[SONIC_RCR]); /* status */ +- dp8393x_put(s, width, 1, rx_len); /* byte count */ +- dp8393x_put(s, width, 2, s->regs[SONIC_TRBA0]); /* pkt_ptr0 */ +- dp8393x_put(s, width, 3, s->regs[SONIC_TRBA1]); /* pkt_ptr1 */ +- dp8393x_put(s, width, 4, s->regs[SONIC_RSC]); /* seq_no */ +- size = sizeof(uint16_t) * 5 * width; +- address_space_write(&s->as, dp8393x_crda(s), +- MEMTXATTRS_UNSPECIFIED, +- s->data, size); ++ dp8393x_put(s, dp8393x_crda(s), 0, s->regs[SONIC_RCR]); /* status */ ++ dp8393x_put(s, dp8393x_crda(s), 1, rx_len); /* byte count */ ++ dp8393x_put(s, dp8393x_crda(s), 2, s->regs[SONIC_TRBA0]); /* pkt_ptr0 */ ++ dp8393x_put(s, dp8393x_crda(s), 3, s->regs[SONIC_TRBA1]); /* pkt_ptr1 */ ++ dp8393x_put(s, dp8393x_crda(s), 4, s->regs[SONIC_RSC]); /* seq_no */ + + /* Check link field */ +- size = sizeof(uint16_t) * width; +- address_space_read(&s->as, +- dp8393x_crda(s) + sizeof(uint16_t) * 5 * width, +- MEMTXATTRS_UNSPECIFIED, s->data, size); +- s->regs[SONIC_LLFA] = dp8393x_get(s, width, 0); ++ s->regs[SONIC_LLFA] = dp8393x_get(s, dp8393x_crda(s), 5); + if (s->regs[SONIC_LLFA] & SONIC_DESC_EOL) { + /* EOL detected */ + s->regs[SONIC_ISR] |= SONIC_ISR_RDE; + } else { + /* Clear in_use */ +- size = sizeof(uint16_t) * width; +- address = dp8393x_crda(s) + sizeof(uint16_t) * 6 * width; +- dp8393x_put(s, width, 0, 0); +- address_space_write(&s->as, address, MEMTXATTRS_UNSPECIFIED, +- s->data, size); ++ dp8393x_put(s, dp8393x_crda(s), 6, 0x0000); + + /* Move to next descriptor */ + s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA]; +@@ -972,7 +940,7 @@ static void dp8393x_realize(DeviceState *dev, Error **errp) + + address_space_init(&s->as, s->dma_mr, "dp8393x"); + memory_region_init_io(&s->mmio, OBJECT(dev), &dp8393x_ops, s, +- "dp8393x-regs", 0x40 << s->it_shift); ++ "dp8393x-regs", SONIC_REG_COUNT << s->it_shift); + + s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, + object_get_typename(OBJECT(dev)), dev->id, s); +@@ -983,11 +951,11 @@ static void dp8393x_realize(DeviceState *dev, Error **errp) + + static const VMStateDescription vmstate_dp8393x = { + .name = "dp8393x", +- .version_id = 0, +- .minimum_version_id = 0, ++ .version_id = 1, ++ .minimum_version_id = 1, + .fields = (VMStateField []) { +- VMSTATE_BUFFER_UNSAFE(cam, dp8393xState, 0, 16 * 6), +- VMSTATE_UINT16_ARRAY(regs, dp8393xState, 0x40), ++ VMSTATE_UINT16_2DARRAY(cam, dp8393xState, 16, 3), ++ VMSTATE_UINT16_ARRAY(regs, dp8393xState, SONIC_REG_COUNT), + VMSTATE_END_OF_LIST() + } + }; +diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig +index 79c20bf28b..84494400b8 100644 +--- a/hw/pci-host/Kconfig ++++ b/hw/pci-host/Kconfig +@@ -6,7 +6,7 @@ config XEN_IGD_PASSTHROUGH + default y + depends on XEN && PCI_I440FX + +-config PREP_PCI ++config RAVEN_PCI + bool + select PCI + select OR_IRQ +diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build +index 1698d3a192..4c4f39c15c 100644 +--- a/hw/pci-host/meson.build ++++ b/hw/pci-host/meson.build +@@ -13,7 +13,7 @@ pci_ss.add(when: 'CONFIG_REMOTE_PCIHOST', if_true: files('remote.c')) + pci_ss.add(when: 'CONFIG_SH_PCI', if_true: files('sh_pci.c')) + + # PPC devices +-pci_ss.add(when: 'CONFIG_PREP_PCI', if_true: files('prep.c')) ++pci_ss.add(when: 'CONFIG_RAVEN_PCI', if_true: files('raven.c')) + pci_ss.add(when: 'CONFIG_GRACKLE_PCI', if_true: files('grackle.c')) + # NewWorld PowerMac + pci_ss.add(when: 'CONFIG_UNIN_PCI', if_true: files('uninorth.c')) +diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c +index 20510d8680..92b0f5d047 100644 +--- a/hw/pci-host/mv64361.c ++++ b/hw/pci-host/mv64361.c +@@ -687,7 +687,6 @@ static void mv64361_write(void *opaque, hwaddr addr, uint64_t val, + case MV64340_PCI_1_IO_BASE_ADDR: + s->pci[1].io_base = val & 0x30fffffULL; + warn_swap_bit(val); +- break; + if (!(s->cpu_conf & BIT(27))) { + s->pci[1].remap[4] = (val & 0xffffULL) << 16; + } +diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c +deleted file mode 100644 +index 9fef74fc56..0000000000 +--- a/hw/pci-host/prep.c ++++ /dev/null +@@ -1,442 +0,0 @@ +-/* +- * QEMU PREP PCI host +- * +- * Copyright (c) 2006 Fabrice Bellard +- * Copyright (c) 2011-2013 Andreas Färber +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this software and associated documentation files (the "Software"), to deal +- * in the Software without restriction, including without limitation the rights +- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +- * copies of the Software, and to permit persons to whom the Software is +- * furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +- * THE SOFTWARE. +- */ +- +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "qemu/datadir.h" +-#include "qemu/units.h" +-#include "qemu/log.h" +-#include "qapi/error.h" +-#include "hw/pci/pci.h" +-#include "hw/pci/pci_bus.h" +-#include "hw/pci/pci_host.h" +-#include "hw/qdev-properties.h" +-#include "migration/vmstate.h" +-#include "hw/intc/i8259.h" +-#include "hw/irq.h" +-#include "hw/loader.h" +-#include "hw/or-irq.h" +-#include "elf.h" +-#include "qom/object.h" +- +-#define TYPE_RAVEN_PCI_DEVICE "raven" +-#define TYPE_RAVEN_PCI_HOST_BRIDGE "raven-pcihost" +- +-OBJECT_DECLARE_SIMPLE_TYPE(RavenPCIState, RAVEN_PCI_DEVICE) +- +-struct RavenPCIState { +- PCIDevice dev; +- +- uint32_t elf_machine; +- char *bios_name; +- MemoryRegion bios; +-}; +- +-typedef struct PRePPCIState PREPPCIState; +-DECLARE_INSTANCE_CHECKER(PREPPCIState, RAVEN_PCI_HOST_BRIDGE, +- TYPE_RAVEN_PCI_HOST_BRIDGE) +- +-struct PRePPCIState { +- PCIHostState parent_obj; +- +- qemu_or_irq *or_irq; +- qemu_irq pci_irqs[PCI_NUM_PINS]; +- PCIBus pci_bus; +- AddressSpace pci_io_as; +- MemoryRegion pci_io; +- MemoryRegion pci_io_non_contiguous; +- MemoryRegion pci_memory; +- MemoryRegion pci_intack; +- MemoryRegion bm; +- MemoryRegion bm_ram_alias; +- MemoryRegion bm_pci_memory_alias; +- AddressSpace bm_as; +- RavenPCIState pci_dev; +- +- int contiguous_map; +- bool is_legacy_prep; +-}; +- +-#define BIOS_SIZE (1 * MiB) +- +-static inline uint32_t raven_pci_io_config(hwaddr addr) +-{ +- int i; +- +- for (i = 0; i < 11; i++) { +- if ((addr & (1 << (11 + i))) != 0) { +- break; +- } +- } +- return (addr & 0x7ff) | (i << 11); +-} +- +-static void raven_pci_io_write(void *opaque, hwaddr addr, +- uint64_t val, unsigned int size) +-{ +- PREPPCIState *s = opaque; +- PCIHostState *phb = PCI_HOST_BRIDGE(s); +- pci_data_write(phb->bus, raven_pci_io_config(addr), val, size); +-} +- +-static uint64_t raven_pci_io_read(void *opaque, hwaddr addr, +- unsigned int size) +-{ +- PREPPCIState *s = opaque; +- PCIHostState *phb = PCI_HOST_BRIDGE(s); +- return pci_data_read(phb->bus, raven_pci_io_config(addr), size); +-} +- +-static const MemoryRegionOps raven_pci_io_ops = { +- .read = raven_pci_io_read, +- .write = raven_pci_io_write, +- .endianness = DEVICE_LITTLE_ENDIAN, +-}; +- +-static uint64_t raven_intack_read(void *opaque, hwaddr addr, +- unsigned int size) +-{ +- return pic_read_irq(isa_pic); +-} +- +-static void raven_intack_write(void *opaque, hwaddr addr, +- uint64_t data, unsigned size) +-{ +- qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); +-} +- +-static const MemoryRegionOps raven_intack_ops = { +- .read = raven_intack_read, +- .write = raven_intack_write, +- .valid = { +- .max_access_size = 1, +- }, +-}; +- +-static inline hwaddr raven_io_address(PREPPCIState *s, +- hwaddr addr) +-{ +- if (s->contiguous_map == 0) { +- /* 64 KB contiguous space for IOs */ +- addr &= 0xFFFF; +- } else { +- /* 8 MB non-contiguous space for IOs */ +- addr = (addr & 0x1F) | ((addr & 0x007FFF000) >> 7); +- } +- +- /* FIXME: handle endianness switch */ +- +- return addr; +-} +- +-static uint64_t raven_io_read(void *opaque, hwaddr addr, +- unsigned int size) +-{ +- PREPPCIState *s = opaque; +- uint8_t buf[4]; +- +- addr = raven_io_address(s, addr); +- address_space_read(&s->pci_io_as, addr + 0x80000000, +- MEMTXATTRS_UNSPECIFIED, buf, size); +- +- if (size == 1) { +- return buf[0]; +- } else if (size == 2) { +- return lduw_le_p(buf); +- } else if (size == 4) { +- return ldl_le_p(buf); +- } else { +- g_assert_not_reached(); +- } +-} +- +-static void raven_io_write(void *opaque, hwaddr addr, +- uint64_t val, unsigned int size) +-{ +- PREPPCIState *s = opaque; +- uint8_t buf[4]; +- +- addr = raven_io_address(s, addr); +- +- if (size == 1) { +- buf[0] = val; +- } else if (size == 2) { +- stw_le_p(buf, val); +- } else if (size == 4) { +- stl_le_p(buf, val); +- } else { +- g_assert_not_reached(); +- } +- +- address_space_write(&s->pci_io_as, addr + 0x80000000, +- MEMTXATTRS_UNSPECIFIED, buf, size); +-} +- +-static const MemoryRegionOps raven_io_ops = { +- .read = raven_io_read, +- .write = raven_io_write, +- .endianness = DEVICE_LITTLE_ENDIAN, +- .impl.max_access_size = 4, +- .valid.unaligned = true, +-}; +- +-static int raven_map_irq(PCIDevice *pci_dev, int irq_num) +-{ +- return (irq_num + (pci_dev->devfn >> 3)) & 1; +-} +- +-static void raven_set_irq(void *opaque, int irq_num, int level) +-{ +- PREPPCIState *s = opaque; +- +- qemu_set_irq(s->pci_irqs[irq_num], level); +-} +- +-static AddressSpace *raven_pcihost_set_iommu(PCIBus *bus, void *opaque, +- int devfn) +-{ +- PREPPCIState *s = opaque; +- +- return &s->bm_as; +-} +- +-static void raven_change_gpio(void *opaque, int n, int level) +-{ +- PREPPCIState *s = opaque; +- +- s->contiguous_map = level; +-} +- +-static void raven_pcihost_realizefn(DeviceState *d, Error **errp) +-{ +- SysBusDevice *dev = SYS_BUS_DEVICE(d); +- PCIHostState *h = PCI_HOST_BRIDGE(dev); +- PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(dev); +- MemoryRegion *address_space_mem = get_system_memory(); +- int i; +- +- if (s->is_legacy_prep) { +- for (i = 0; i < PCI_NUM_PINS; i++) { +- sysbus_init_irq(dev, &s->pci_irqs[i]); +- } +- } else { +- /* According to PReP specification section 6.1.6 "System Interrupt +- * Assignments", all PCI interrupts are routed via IRQ 15 */ +- s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ)); +- object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS, +- &error_fatal); +- qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal); +- sysbus_init_irq(dev, &s->or_irq->out_irq); +- +- for (i = 0; i < PCI_NUM_PINS; i++) { +- s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i); +- } +- } +- +- qdev_init_gpio_in(d, raven_change_gpio, 1); +- +- pci_bus_irqs(&s->pci_bus, raven_set_irq, raven_map_irq, s, PCI_NUM_PINS); +- +- memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, s, +- "pci-conf-idx", 4); +- memory_region_add_subregion(&s->pci_io, 0xcf8, &h->conf_mem); +- +- memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, s, +- "pci-conf-data", 4); +- memory_region_add_subregion(&s->pci_io, 0xcfc, &h->data_mem); +- +- memory_region_init_io(&h->mmcfg, OBJECT(s), &raven_pci_io_ops, s, +- "pciio", 0x00400000); +- memory_region_add_subregion(address_space_mem, 0x80800000, &h->mmcfg); +- +- memory_region_init_io(&s->pci_intack, OBJECT(s), &raven_intack_ops, s, +- "pci-intack", 1); +- memory_region_add_subregion(address_space_mem, 0xbffffff0, &s->pci_intack); +- +- /* TODO Remove once realize propagates to child devices. */ +- qdev_realize(DEVICE(&s->pci_dev), BUS(&s->pci_bus), errp); +-} +- +-static void raven_pcihost_initfn(Object *obj) +-{ +- PCIHostState *h = PCI_HOST_BRIDGE(obj); +- PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(obj); +- MemoryRegion *address_space_mem = get_system_memory(); +- DeviceState *pci_dev; +- +- memory_region_init(&s->pci_io, obj, "pci-io", 0x3f800000); +- memory_region_init_io(&s->pci_io_non_contiguous, obj, &raven_io_ops, s, +- "pci-io-non-contiguous", 0x00800000); +- memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); +- address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); +- +- /* CPU address space */ +- memory_region_add_subregion(address_space_mem, 0x80000000, &s->pci_io); +- memory_region_add_subregion_overlap(address_space_mem, 0x80000000, +- &s->pci_io_non_contiguous, 1); +- memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory); +- pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, +- &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); +- +- /* Bus master address space */ +- memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB); +- memory_region_init_alias(&s->bm_pci_memory_alias, obj, "bm-pci-memory", +- &s->pci_memory, 0, +- memory_region_size(&s->pci_memory)); +- memory_region_init_alias(&s->bm_ram_alias, obj, "bm-system", +- get_system_memory(), 0, 0x80000000); +- memory_region_add_subregion(&s->bm, 0 , &s->bm_pci_memory_alias); +- memory_region_add_subregion(&s->bm, 0x80000000, &s->bm_ram_alias); +- address_space_init(&s->bm_as, &s->bm, "raven-bm"); +- pci_setup_iommu(&s->pci_bus, raven_pcihost_set_iommu, s); +- +- h->bus = &s->pci_bus; +- +- object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_RAVEN_PCI_DEVICE); +- pci_dev = DEVICE(&s->pci_dev); +- object_property_set_int(OBJECT(&s->pci_dev), "addr", PCI_DEVFN(0, 0), +- NULL); +- qdev_prop_set_bit(pci_dev, "multifunction", false); +-} +- +-static void raven_realize(PCIDevice *d, Error **errp) +-{ +- RavenPCIState *s = RAVEN_PCI_DEVICE(d); +- char *filename; +- int bios_size = -1; +- +- d->config[0x0C] = 0x08; // cache_line_size +- d->config[0x0D] = 0x10; // latency_timer +- d->config[0x34] = 0x00; // capabilities_pointer +- +- memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, +- &error_fatal); +- memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), +- &s->bios); +- if (s->bios_name) { +- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name); +- if (filename) { +- if (s->elf_machine != EM_NONE) { +- bios_size = load_elf(filename, NULL, NULL, NULL, NULL, +- NULL, NULL, NULL, 1, s->elf_machine, +- 0, 0); +- } +- if (bios_size < 0) { +- bios_size = get_image_size(filename); +- if (bios_size > 0 && bios_size <= BIOS_SIZE) { +- hwaddr bios_addr; +- bios_size = (bios_size + 0xfff) & ~0xfff; +- bios_addr = (uint32_t)(-BIOS_SIZE); +- bios_size = load_image_targphys(filename, bios_addr, +- bios_size); +- } +- } +- } +- g_free(filename); +- if (bios_size < 0 || bios_size > BIOS_SIZE) { +- memory_region_del_subregion(get_system_memory(), &s->bios); +- error_setg(errp, "Could not load bios image '%s'", s->bios_name); +- return; +- } +- } +- +- vmstate_register_ram_global(&s->bios); +-} +- +-static const VMStateDescription vmstate_raven = { +- .name = "raven", +- .version_id = 0, +- .minimum_version_id = 0, +- .fields = (VMStateField[]) { +- VMSTATE_PCI_DEVICE(dev, RavenPCIState), +- VMSTATE_END_OF_LIST() +- }, +-}; +- +-static void raven_class_init(ObjectClass *klass, void *data) +-{ +- PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); +- DeviceClass *dc = DEVICE_CLASS(klass); +- +- k->realize = raven_realize; +- k->vendor_id = PCI_VENDOR_ID_MOTOROLA; +- k->device_id = PCI_DEVICE_ID_MOTOROLA_RAVEN; +- k->revision = 0x00; +- k->class_id = PCI_CLASS_BRIDGE_HOST; +- dc->desc = "PReP Host Bridge - Motorola Raven"; +- dc->vmsd = &vmstate_raven; +- /* +- * Reason: PCI-facing part of the host bridge, not usable without +- * the host-facing part, which can't be device_add'ed, yet. +- */ +- dc->user_creatable = false; +-} +- +-static const TypeInfo raven_info = { +- .name = TYPE_RAVEN_PCI_DEVICE, +- .parent = TYPE_PCI_DEVICE, +- .instance_size = sizeof(RavenPCIState), +- .class_init = raven_class_init, +- .interfaces = (InterfaceInfo[]) { +- { INTERFACE_CONVENTIONAL_PCI_DEVICE }, +- { }, +- }, +-}; +- +-static Property raven_pcihost_properties[] = { +- DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine, +- EM_NONE), +- DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name), +- /* Temporary workaround until legacy prep machine is removed */ +- DEFINE_PROP_BOOL("is-legacy-prep", PREPPCIState, is_legacy_prep, +- false), +- DEFINE_PROP_END_OF_LIST() +-}; +- +-static void raven_pcihost_class_init(ObjectClass *klass, void *data) +-{ +- DeviceClass *dc = DEVICE_CLASS(klass); +- +- set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +- dc->realize = raven_pcihost_realizefn; +- device_class_set_props(dc, raven_pcihost_properties); +- dc->fw_name = "pci"; +-} +- +-static const TypeInfo raven_pcihost_info = { +- .name = TYPE_RAVEN_PCI_HOST_BRIDGE, +- .parent = TYPE_PCI_HOST_BRIDGE, +- .instance_size = sizeof(PREPPCIState), +- .instance_init = raven_pcihost_initfn, +- .class_init = raven_pcihost_class_init, +-}; +- +-static void raven_register_types(void) +-{ +- type_register_static(&raven_pcihost_info); +- type_register_static(&raven_info); +-} +- +-type_init(raven_register_types) +diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c +new file mode 100644 +index 0000000000..3be27f0a14 +--- /dev/null ++++ b/hw/pci-host/raven.c +@@ -0,0 +1,445 @@ ++/* ++ * QEMU PREP PCI host ++ * ++ * Copyright (c) 2006 Fabrice Bellard ++ * Copyright (c) 2011-2013 Andreas Färber ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "qemu/datadir.h" ++#include "qemu/units.h" ++#include "qemu/log.h" ++#include "qapi/error.h" ++#include "hw/pci/pci.h" ++#include "hw/pci/pci_bus.h" ++#include "hw/pci/pci_host.h" ++#include "hw/qdev-properties.h" ++#include "migration/vmstate.h" ++#include "hw/intc/i8259.h" ++#include "hw/irq.h" ++#include "hw/loader.h" ++#include "hw/or-irq.h" ++#include "elf.h" ++#include "qom/object.h" ++ ++#define TYPE_RAVEN_PCI_DEVICE "raven" ++#define TYPE_RAVEN_PCI_HOST_BRIDGE "raven-pcihost" ++ ++OBJECT_DECLARE_SIMPLE_TYPE(RavenPCIState, RAVEN_PCI_DEVICE) ++ ++struct RavenPCIState { ++ PCIDevice dev; ++ ++ uint32_t elf_machine; ++ char *bios_name; ++ MemoryRegion bios; ++}; ++ ++typedef struct PRePPCIState PREPPCIState; ++DECLARE_INSTANCE_CHECKER(PREPPCIState, RAVEN_PCI_HOST_BRIDGE, ++ TYPE_RAVEN_PCI_HOST_BRIDGE) ++ ++struct PRePPCIState { ++ PCIHostState parent_obj; ++ ++ qemu_or_irq *or_irq; ++ qemu_irq pci_irqs[PCI_NUM_PINS]; ++ PCIBus pci_bus; ++ AddressSpace pci_io_as; ++ MemoryRegion pci_io; ++ MemoryRegion pci_io_non_contiguous; ++ MemoryRegion pci_memory; ++ MemoryRegion pci_intack; ++ MemoryRegion bm; ++ MemoryRegion bm_ram_alias; ++ MemoryRegion bm_pci_memory_alias; ++ AddressSpace bm_as; ++ RavenPCIState pci_dev; ++ ++ int contiguous_map; ++ bool is_legacy_prep; ++}; ++ ++#define BIOS_SIZE (1 * MiB) ++ ++#define PCI_IO_BASE_ADDR 0x80000000 /* Physical address on main bus */ ++ ++static inline uint32_t raven_pci_io_config(hwaddr addr) ++{ ++ int i; ++ ++ for (i = 0; i < 11; i++) { ++ if ((addr & (1 << (11 + i))) != 0) { ++ break; ++ } ++ } ++ return (addr & 0x7ff) | (i << 11); ++} ++ ++static void raven_pci_io_write(void *opaque, hwaddr addr, ++ uint64_t val, unsigned int size) ++{ ++ PREPPCIState *s = opaque; ++ PCIHostState *phb = PCI_HOST_BRIDGE(s); ++ pci_data_write(phb->bus, raven_pci_io_config(addr), val, size); ++} ++ ++static uint64_t raven_pci_io_read(void *opaque, hwaddr addr, ++ unsigned int size) ++{ ++ PREPPCIState *s = opaque; ++ PCIHostState *phb = PCI_HOST_BRIDGE(s); ++ return pci_data_read(phb->bus, raven_pci_io_config(addr), size); ++} ++ ++static const MemoryRegionOps raven_pci_io_ops = { ++ .read = raven_pci_io_read, ++ .write = raven_pci_io_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++}; ++ ++static uint64_t raven_intack_read(void *opaque, hwaddr addr, ++ unsigned int size) ++{ ++ return pic_read_irq(isa_pic); ++} ++ ++static void raven_intack_write(void *opaque, hwaddr addr, ++ uint64_t data, unsigned size) ++{ ++ qemu_log_mask(LOG_UNIMP, "%s not implemented\n", __func__); ++} ++ ++static const MemoryRegionOps raven_intack_ops = { ++ .read = raven_intack_read, ++ .write = raven_intack_write, ++ .valid = { ++ .max_access_size = 1, ++ }, ++}; ++ ++static inline hwaddr raven_io_address(PREPPCIState *s, ++ hwaddr addr) ++{ ++ if (s->contiguous_map == 0) { ++ /* 64 KB contiguous space for IOs */ ++ addr &= 0xFFFF; ++ } else { ++ /* 8 MB non-contiguous space for IOs */ ++ addr = (addr & 0x1F) | ((addr & 0x007FFF000) >> 7); ++ } ++ ++ /* FIXME: handle endianness switch */ ++ ++ return addr; ++} ++ ++static uint64_t raven_io_read(void *opaque, hwaddr addr, ++ unsigned int size) ++{ ++ PREPPCIState *s = opaque; ++ uint8_t buf[4]; ++ ++ addr = raven_io_address(s, addr); ++ address_space_read(&s->pci_io_as, addr + PCI_IO_BASE_ADDR, ++ MEMTXATTRS_UNSPECIFIED, buf, size); ++ ++ if (size == 1) { ++ return buf[0]; ++ } else if (size == 2) { ++ return lduw_le_p(buf); ++ } else if (size == 4) { ++ return ldl_le_p(buf); ++ } else { ++ g_assert_not_reached(); ++ } ++} ++ ++static void raven_io_write(void *opaque, hwaddr addr, ++ uint64_t val, unsigned int size) ++{ ++ PREPPCIState *s = opaque; ++ uint8_t buf[4]; ++ ++ addr = raven_io_address(s, addr); ++ ++ if (size == 1) { ++ buf[0] = val; ++ } else if (size == 2) { ++ stw_le_p(buf, val); ++ } else if (size == 4) { ++ stl_le_p(buf, val); ++ } else { ++ g_assert_not_reached(); ++ } ++ ++ address_space_write(&s->pci_io_as, addr + PCI_IO_BASE_ADDR, ++ MEMTXATTRS_UNSPECIFIED, buf, size); ++} ++ ++static const MemoryRegionOps raven_io_ops = { ++ .read = raven_io_read, ++ .write = raven_io_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .impl.max_access_size = 4, ++ .valid.unaligned = true, ++}; ++ ++static int raven_map_irq(PCIDevice *pci_dev, int irq_num) ++{ ++ return (irq_num + (pci_dev->devfn >> 3)) & 1; ++} ++ ++static void raven_set_irq(void *opaque, int irq_num, int level) ++{ ++ PREPPCIState *s = opaque; ++ ++ qemu_set_irq(s->pci_irqs[irq_num], level); ++} ++ ++static AddressSpace *raven_pcihost_set_iommu(PCIBus *bus, void *opaque, ++ int devfn) ++{ ++ PREPPCIState *s = opaque; ++ ++ return &s->bm_as; ++} ++ ++static void raven_change_gpio(void *opaque, int n, int level) ++{ ++ PREPPCIState *s = opaque; ++ ++ s->contiguous_map = level; ++} ++ ++static void raven_pcihost_realizefn(DeviceState *d, Error **errp) ++{ ++ SysBusDevice *dev = SYS_BUS_DEVICE(d); ++ PCIHostState *h = PCI_HOST_BRIDGE(dev); ++ PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(dev); ++ MemoryRegion *address_space_mem = get_system_memory(); ++ int i; ++ ++ if (s->is_legacy_prep) { ++ for (i = 0; i < PCI_NUM_PINS; i++) { ++ sysbus_init_irq(dev, &s->pci_irqs[i]); ++ } ++ } else { ++ /* According to PReP specification section 6.1.6 "System Interrupt ++ * Assignments", all PCI interrupts are routed via IRQ 15 */ ++ s->or_irq = OR_IRQ(object_new(TYPE_OR_IRQ)); ++ object_property_set_int(OBJECT(s->or_irq), "num-lines", PCI_NUM_PINS, ++ &error_fatal); ++ qdev_realize(DEVICE(s->or_irq), NULL, &error_fatal); ++ sysbus_init_irq(dev, &s->or_irq->out_irq); ++ ++ for (i = 0; i < PCI_NUM_PINS; i++) { ++ s->pci_irqs[i] = qdev_get_gpio_in(DEVICE(s->or_irq), i); ++ } ++ } ++ ++ qdev_init_gpio_in(d, raven_change_gpio, 1); ++ ++ pci_bus_irqs(&s->pci_bus, raven_set_irq, raven_map_irq, s, PCI_NUM_PINS); ++ ++ memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, s, ++ "pci-conf-idx", 4); ++ memory_region_add_subregion(&s->pci_io, 0xcf8, &h->conf_mem); ++ ++ memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, s, ++ "pci-conf-data", 4); ++ memory_region_add_subregion(&s->pci_io, 0xcfc, &h->data_mem); ++ ++ memory_region_init_io(&h->mmcfg, OBJECT(s), &raven_pci_io_ops, s, ++ "pciio", 0x00400000); ++ memory_region_add_subregion(address_space_mem, 0x80800000, &h->mmcfg); ++ ++ memory_region_init_io(&s->pci_intack, OBJECT(s), &raven_intack_ops, s, ++ "pci-intack", 1); ++ memory_region_add_subregion(address_space_mem, 0xbffffff0, &s->pci_intack); ++ ++ /* TODO Remove once realize propagates to child devices. */ ++ qdev_realize(DEVICE(&s->pci_dev), BUS(&s->pci_bus), errp); ++} ++ ++static void raven_pcihost_initfn(Object *obj) ++{ ++ PCIHostState *h = PCI_HOST_BRIDGE(obj); ++ PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(obj); ++ MemoryRegion *address_space_mem = get_system_memory(); ++ DeviceState *pci_dev; ++ ++ memory_region_init(&s->pci_io, obj, "pci-io", 0x3f800000); ++ memory_region_init_io(&s->pci_io_non_contiguous, obj, &raven_io_ops, s, ++ "pci-io-non-contiguous", 0x00800000); ++ memory_region_init(&s->pci_memory, obj, "pci-memory", 0x3f000000); ++ address_space_init(&s->pci_io_as, &s->pci_io, "raven-io"); ++ ++ /* CPU address space */ ++ memory_region_add_subregion(address_space_mem, PCI_IO_BASE_ADDR, ++ &s->pci_io); ++ memory_region_add_subregion_overlap(address_space_mem, PCI_IO_BASE_ADDR, ++ &s->pci_io_non_contiguous, 1); ++ memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory); ++ pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, ++ &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); ++ ++ /* Bus master address space */ ++ memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB); ++ memory_region_init_alias(&s->bm_pci_memory_alias, obj, "bm-pci-memory", ++ &s->pci_memory, 0, ++ memory_region_size(&s->pci_memory)); ++ memory_region_init_alias(&s->bm_ram_alias, obj, "bm-system", ++ get_system_memory(), 0, 0x80000000); ++ memory_region_add_subregion(&s->bm, 0 , &s->bm_pci_memory_alias); ++ memory_region_add_subregion(&s->bm, 0x80000000, &s->bm_ram_alias); ++ address_space_init(&s->bm_as, &s->bm, "raven-bm"); ++ pci_setup_iommu(&s->pci_bus, raven_pcihost_set_iommu, s); ++ ++ h->bus = &s->pci_bus; ++ ++ object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_RAVEN_PCI_DEVICE); ++ pci_dev = DEVICE(&s->pci_dev); ++ object_property_set_int(OBJECT(&s->pci_dev), "addr", PCI_DEVFN(0, 0), ++ NULL); ++ qdev_prop_set_bit(pci_dev, "multifunction", false); ++} ++ ++static void raven_realize(PCIDevice *d, Error **errp) ++{ ++ RavenPCIState *s = RAVEN_PCI_DEVICE(d); ++ char *filename; ++ int bios_size = -1; ++ ++ d->config[0x0C] = 0x08; // cache_line_size ++ d->config[0x0D] = 0x10; // latency_timer ++ d->config[0x34] = 0x00; // capabilities_pointer ++ ++ memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, ++ &error_fatal); ++ memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), ++ &s->bios); ++ if (s->bios_name) { ++ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name); ++ if (filename) { ++ if (s->elf_machine != EM_NONE) { ++ bios_size = load_elf(filename, NULL, NULL, NULL, NULL, ++ NULL, NULL, NULL, 1, s->elf_machine, ++ 0, 0); ++ } ++ if (bios_size < 0) { ++ bios_size = get_image_size(filename); ++ if (bios_size > 0 && bios_size <= BIOS_SIZE) { ++ hwaddr bios_addr; ++ bios_size = (bios_size + 0xfff) & ~0xfff; ++ bios_addr = (uint32_t)(-BIOS_SIZE); ++ bios_size = load_image_targphys(filename, bios_addr, ++ bios_size); ++ } ++ } ++ } ++ g_free(filename); ++ if (bios_size < 0 || bios_size > BIOS_SIZE) { ++ memory_region_del_subregion(get_system_memory(), &s->bios); ++ error_setg(errp, "Could not load bios image '%s'", s->bios_name); ++ return; ++ } ++ } ++ ++ vmstate_register_ram_global(&s->bios); ++} ++ ++static const VMStateDescription vmstate_raven = { ++ .name = "raven", ++ .version_id = 0, ++ .minimum_version_id = 0, ++ .fields = (VMStateField[]) { ++ VMSTATE_PCI_DEVICE(dev, RavenPCIState), ++ VMSTATE_END_OF_LIST() ++ }, ++}; ++ ++static void raven_class_init(ObjectClass *klass, void *data) ++{ ++ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ ++ k->realize = raven_realize; ++ k->vendor_id = PCI_VENDOR_ID_MOTOROLA; ++ k->device_id = PCI_DEVICE_ID_MOTOROLA_RAVEN; ++ k->revision = 0x00; ++ k->class_id = PCI_CLASS_BRIDGE_HOST; ++ dc->desc = "PReP Host Bridge - Motorola Raven"; ++ dc->vmsd = &vmstate_raven; ++ /* ++ * Reason: PCI-facing part of the host bridge, not usable without ++ * the host-facing part, which can't be device_add'ed, yet. ++ */ ++ dc->user_creatable = false; ++} ++ ++static const TypeInfo raven_info = { ++ .name = TYPE_RAVEN_PCI_DEVICE, ++ .parent = TYPE_PCI_DEVICE, ++ .instance_size = sizeof(RavenPCIState), ++ .class_init = raven_class_init, ++ .interfaces = (InterfaceInfo[]) { ++ { INTERFACE_CONVENTIONAL_PCI_DEVICE }, ++ { }, ++ }, ++}; ++ ++static Property raven_pcihost_properties[] = { ++ DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine, ++ EM_NONE), ++ DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name), ++ /* Temporary workaround until legacy prep machine is removed */ ++ DEFINE_PROP_BOOL("is-legacy-prep", PREPPCIState, is_legacy_prep, ++ false), ++ DEFINE_PROP_END_OF_LIST() ++}; ++ ++static void raven_pcihost_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ ++ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); ++ dc->realize = raven_pcihost_realizefn; ++ device_class_set_props(dc, raven_pcihost_properties); ++ dc->fw_name = "pci"; ++} ++ ++static const TypeInfo raven_pcihost_info = { ++ .name = TYPE_RAVEN_PCI_HOST_BRIDGE, ++ .parent = TYPE_PCI_HOST_BRIDGE, ++ .instance_size = sizeof(PREPPCIState), ++ .instance_init = raven_pcihost_initfn, ++ .class_init = raven_pcihost_class_init, ++}; ++ ++static void raven_register_types(void) ++{ ++ type_register_static(&raven_pcihost_info); ++ type_register_static(&raven_info); ++} ++ ++type_init(raven_register_types) +diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig +index 7fcafec60a..322a7eb031 100644 +--- a/hw/ppc/Kconfig ++++ b/hw/ppc/Kconfig +@@ -85,7 +85,7 @@ config PREP + imply PCI_DEVICES + imply TEST_DEVICES + select CS4231A +- select PREP_PCI ++ select RAVEN_PCI + select I82378 + select LSI_SCSI_PCI + select M48T59 +diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c +index 9a6ae867e4..9fad1854b1 100644 +--- a/hw/ppc/pegasos2.c ++++ b/hw/ppc/pegasos2.c +@@ -443,10 +443,17 @@ static target_ulong vhyp_encode_hpt_for_kvm_pr(PPCVirtualHypervisor *vhyp) + return POWERPC_CPU(current_cpu)->env.spr[SPR_SDR1]; + } + ++static bool pegasos2_setprop(MachineState *ms, const char *path, ++ const char *propname, void *val, int vallen) ++{ ++ return true; ++} ++ + static void pegasos2_machine_class_init(ObjectClass *oc, void *data) + { + MachineClass *mc = MACHINE_CLASS(oc); + PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); ++ VofMachineIfClass *vmc = VOF_MACHINE_CLASS(oc); + + mc->desc = "Genesi/bPlan Pegasos II"; + mc->init = pegasos2_init; +@@ -462,6 +469,8 @@ static void pegasos2_machine_class_init(ObjectClass *oc, void *data) + vhc->cpu_exec_enter = vhyp_nop; + vhc->cpu_exec_exit = vhyp_nop; + vhc->encode_hpt_for_kvm_pr = vhyp_encode_hpt_for_kvm_pr; ++ ++ vmc->setprop = pegasos2_setprop; + } + + static const TypeInfo pegasos2_machine_info = { +@@ -471,6 +480,7 @@ static const TypeInfo pegasos2_machine_info = { + .instance_size = sizeof(Pegasos2MachineState), + .interfaces = (InterfaceInfo[]) { + { TYPE_PPC_VIRTUAL_HYPERVISOR }, ++ { TYPE_VOF_MACHINE_IF }, + { } + }, + }; +diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build +index 327e9c93af..28484256ec 100644 +--- a/hw/s390x/meson.build ++++ b/hw/s390x/meson.build +@@ -16,7 +16,6 @@ s390x_ss.add(files( + 'sclp.c', + 'sclpcpu.c', + 'sclpquiesce.c', +- 'tod-qemu.c', + 'tod.c', + )) + s390x_ss.add(when: 'CONFIG_KVM', if_true: files( +@@ -25,6 +24,9 @@ s390x_ss.add(when: 'CONFIG_KVM', if_true: files( + 's390-stattrib-kvm.c', + 'pv.c', + )) ++s390x_ss.add(when: 'CONFIG_TCG', if_true: files( ++ 'tod-tcg.c', ++)) + s390x_ss.add(when: 'CONFIG_S390_CCW_VIRTIO', if_true: files('s390-virtio-ccw.c')) + s390x_ss.add(when: 'CONFIG_TERMINAL3270', if_true: files('3270-ccw.c')) + s390x_ss.add(when: 'CONFIG_VFIO', if_true: files('s390-pci-vfio.c')) +diff --git a/hw/s390x/s390-stattrib-kvm.c b/hw/s390x/s390-stattrib-kvm.c +index f0b11a74e4..24cd01382e 100644 +--- a/hw/s390x/s390-stattrib-kvm.c ++++ b/hw/s390x/s390-stattrib-kvm.c +@@ -16,7 +16,7 @@ + #include "qemu/error-report.h" + #include "sysemu/kvm.h" + #include "exec/ram_addr.h" +-#include "kvm_s390x.h" ++#include "kvm/kvm_s390x.h" + + Object *kvm_s390_stattrib_create(void) + { +diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c +index 0b94477486..ec855811ae 100644 +--- a/hw/s390x/tod-kvm.c ++++ b/hw/s390x/tod-kvm.c +@@ -13,7 +13,7 @@ + #include "qemu/module.h" + #include "sysemu/runstate.h" + #include "hw/s390x/tod.h" +-#include "kvm_s390x.h" ++#include "kvm/kvm_s390x.h" + + static void kvm_s390_get_tod_raw(S390TOD *tod, Error **errp) + { +diff --git a/hw/s390x/tod-qemu.c b/hw/s390x/tod-qemu.c +deleted file mode 100644 +index e91b9590f5..0000000000 +--- a/hw/s390x/tod-qemu.c ++++ /dev/null +@@ -1,89 +0,0 @@ +-/* +- * TOD (Time Of Day) clock - QEMU implementation +- * +- * Copyright 2018 Red Hat, Inc. +- * Author(s): David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "qapi/error.h" +-#include "hw/s390x/tod.h" +-#include "qemu/timer.h" +-#include "qemu/cutils.h" +-#include "qemu/module.h" +-#include "cpu.h" +-#include "tcg_s390x.h" +- +-static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod, +- Error **errp) +-{ +- *tod = td->base; +- +- tod->low += time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +- if (tod->low < td->base.low) { +- tod->high++; +- } +-} +- +-static void qemu_s390_tod_set(S390TODState *td, const S390TOD *tod, +- Error **errp) +-{ +- CPUState *cpu; +- +- td->base = *tod; +- +- td->base.low -= time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +- if (td->base.low > tod->low) { +- td->base.high--; +- } +- +- /* +- * The TOD has been changed and we have to recalculate the CKC values +- * for all CPUs. We do this asynchronously, as "SET CLOCK should be +- * issued only while all other activity on all CPUs .. has been +- * suspended". +- */ +- CPU_FOREACH(cpu) { +- async_run_on_cpu(cpu, tcg_s390_tod_updated, RUN_ON_CPU_NULL); +- } +-} +- +-static void qemu_s390_tod_class_init(ObjectClass *oc, void *data) +-{ +- S390TODClass *tdc = S390_TOD_CLASS(oc); +- +- tdc->get = qemu_s390_tod_get; +- tdc->set = qemu_s390_tod_set; +-} +- +-static void qemu_s390_tod_init(Object *obj) +-{ +- S390TODState *td = S390_TOD(obj); +- struct tm tm; +- +- qemu_get_timedate(&tm, 0); +- td->base.high = 0; +- td->base.low = TOD_UNIX_EPOCH + (time2tod(mktimegm(&tm)) * 1000000000ULL); +- if (td->base.low < TOD_UNIX_EPOCH) { +- td->base.high += 1; +- } +-} +- +-static TypeInfo qemu_s390_tod_info = { +- .name = TYPE_QEMU_S390_TOD, +- .parent = TYPE_S390_TOD, +- .instance_size = sizeof(S390TODState), +- .instance_init = qemu_s390_tod_init, +- .class_init = qemu_s390_tod_class_init, +- .class_size = sizeof(S390TODClass), +-}; +- +-static void register_types(void) +-{ +- type_register_static(&qemu_s390_tod_info); +-} +-type_init(register_types); +diff --git a/hw/s390x/tod-tcg.c b/hw/s390x/tod-tcg.c +new file mode 100644 +index 0000000000..9bb94ff72b +--- /dev/null ++++ b/hw/s390x/tod-tcg.c +@@ -0,0 +1,89 @@ ++/* ++ * TOD (Time Of Day) clock - TCG implementation ++ * ++ * Copyright 2018 Red Hat, Inc. ++ * Author(s): David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "qapi/error.h" ++#include "hw/s390x/tod.h" ++#include "qemu/timer.h" ++#include "qemu/cutils.h" ++#include "qemu/module.h" ++#include "cpu.h" ++#include "tcg/tcg_s390x.h" ++ ++static void qemu_s390_tod_get(const S390TODState *td, S390TOD *tod, ++ Error **errp) ++{ ++ *tod = td->base; ++ ++ tod->low += time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); ++ if (tod->low < td->base.low) { ++ tod->high++; ++ } ++} ++ ++static void qemu_s390_tod_set(S390TODState *td, const S390TOD *tod, ++ Error **errp) ++{ ++ CPUState *cpu; ++ ++ td->base = *tod; ++ ++ td->base.low -= time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); ++ if (td->base.low > tod->low) { ++ td->base.high--; ++ } ++ ++ /* ++ * The TOD has been changed and we have to recalculate the CKC values ++ * for all CPUs. We do this asynchronously, as "SET CLOCK should be ++ * issued only while all other activity on all CPUs .. has been ++ * suspended". ++ */ ++ CPU_FOREACH(cpu) { ++ async_run_on_cpu(cpu, tcg_s390_tod_updated, RUN_ON_CPU_NULL); ++ } ++} ++ ++static void qemu_s390_tod_class_init(ObjectClass *oc, void *data) ++{ ++ S390TODClass *tdc = S390_TOD_CLASS(oc); ++ ++ tdc->get = qemu_s390_tod_get; ++ tdc->set = qemu_s390_tod_set; ++} ++ ++static void qemu_s390_tod_init(Object *obj) ++{ ++ S390TODState *td = S390_TOD(obj); ++ struct tm tm; ++ ++ qemu_get_timedate(&tm, 0); ++ td->base.high = 0; ++ td->base.low = TOD_UNIX_EPOCH + (time2tod(mktimegm(&tm)) * 1000000000ULL); ++ if (td->base.low < TOD_UNIX_EPOCH) { ++ td->base.high += 1; ++ } ++} ++ ++static TypeInfo qemu_s390_tod_info = { ++ .name = TYPE_QEMU_S390_TOD, ++ .parent = TYPE_S390_TOD, ++ .instance_size = sizeof(S390TODState), ++ .instance_init = qemu_s390_tod_init, ++ .class_init = qemu_s390_tod_class_init, ++ .class_size = sizeof(S390TODClass), ++}; ++ ++static void register_types(void) ++{ ++ type_register_static(&qemu_s390_tod_info); ++} ++type_init(register_types); +diff --git a/hw/s390x/tod.c b/hw/s390x/tod.c +index 3c2979175e..fd5a36bf24 100644 +--- a/hw/s390x/tod.c ++++ b/hw/s390x/tod.c +@@ -14,6 +14,8 @@ + #include "qemu/error-report.h" + #include "qemu/module.h" + #include "sysemu/kvm.h" ++#include "sysemu/tcg.h" ++#include "sysemu/qtest.h" + #include "migration/qemu-file-types.h" + #include "migration/register.h" + +@@ -23,8 +25,13 @@ void s390_init_tod(void) + + if (kvm_enabled()) { + obj = object_new(TYPE_KVM_S390_TOD); +- } else { ++ } else if (tcg_enabled()) { + obj = object_new(TYPE_QEMU_S390_TOD); ++ } else if (qtest_enabled()) { ++ return; ++ } else { ++ error_report("current accelerator not handled in s390_init_tod!"); ++ abort(); + } + object_property_add_child(qdev_get_machine(), TYPE_S390_TOD, obj); + object_unref(obj); +diff --git a/hw/sd/sd.c b/hw/sd/sd.c +index 282d39a704..1f964e022b 100644 +--- a/hw/sd/sd.c ++++ b/hw/sd/sd.c +@@ -937,6 +937,19 @@ static void sd_lock_command(SDState *sd) + sd->card_status &= ~CARD_IS_LOCKED; + } + ++static bool address_in_range(SDState *sd, const char *desc, ++ uint64_t addr, uint32_t length) ++{ ++ if (addr + length > sd->size) { ++ qemu_log_mask(LOG_GUEST_ERROR, ++ "%s offset %"PRIu64" > card %"PRIu64" [%%%u]\n", ++ desc, addr, sd->size, length); ++ sd->card_status |= ADDRESS_ERROR; ++ return false; ++ } ++ return true; ++} ++ + static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + { + uint32_t rca = 0x0000; +@@ -1218,8 +1231,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + switch (sd->state) { + case sd_transfer_state: + +- if (addr + sd->blk_len > sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "READ_BLOCK", addr, sd->blk_len)) { + return sd_r1; + } + +@@ -1264,8 +1276,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + switch (sd->state) { + case sd_transfer_state: + +- if (addr + sd->blk_len > sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "WRITE_BLOCK", addr, sd->blk_len)) { + return sd_r1; + } + +@@ -1325,8 +1336,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + + switch (sd->state) { + case sd_transfer_state: +- if (addr >= sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "SET_WRITE_PROT", addr, 1)) { + return sd_r1b; + } + +@@ -1348,8 +1358,7 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + + switch (sd->state) { + case sd_transfer_state: +- if (addr >= sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "CLR_WRITE_PROT", addr, 1)) { + return sd_r1b; + } + +@@ -1371,6 +1380,11 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + + switch (sd->state) { + case sd_transfer_state: ++ if (!address_in_range(sd, "SEND_WRITE_PROT", ++ req.arg, sd->blk_len)) { ++ return sd_r1; ++ } ++ + sd->state = sd_sendingdata_state; + *(uint32_t *) sd->data = sd_wpbits(sd, req.arg); + sd->data_start = addr; +@@ -1504,7 +1518,8 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) + return sd_illegal; + } + +- qemu_log_mask(LOG_GUEST_ERROR, "SD: CMD%i in a wrong state\n", req.cmd); ++ qemu_log_mask(LOG_GUEST_ERROR, "SD: CMD%i in a wrong state: %s\n", ++ req.cmd, sd_state_name(sd->state)); + return sd_illegal; + } + +@@ -1825,8 +1840,8 @@ void sd_write_byte(SDState *sd, uint8_t value) + case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ + if (sd->data_offset == 0) { + /* Start of the block - let's check the address is valid */ +- if (sd->data_start + sd->blk_len > sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "WRITE_MULTIPLE_BLOCK", ++ sd->data_start, sd->blk_len)) { + break; + } + if (sd->size <= SDSC_MAX_CAPACITY) { +@@ -1998,8 +2013,8 @@ uint8_t sd_read_byte(SDState *sd) + + case 18: /* CMD18: READ_MULTIPLE_BLOCK */ + if (sd->data_offset == 0) { +- if (sd->data_start + io_len > sd->size) { +- sd->card_status |= ADDRESS_ERROR; ++ if (!address_in_range(sd, "READ_MULTIPLE_BLOCK", ++ sd->data_start, io_len)) { + return 0x00; + } + BLK_READ_BLOCK(sd->data_start, io_len); +diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c +index 4b32aca1a0..e0dd561e85 100644 +--- a/hw/vfio/ap.c ++++ b/hw/vfio/ap.c +@@ -21,7 +21,7 @@ + #include "qemu/module.h" + #include "qemu/option.h" + #include "qemu/config-file.h" +-#include "kvm_s390x.h" ++#include "kvm/kvm_s390x.h" + #include "migration/vmstate.h" + #include "hw/qdev-properties.h" + #include "hw/s390x/ap-bridge.h" +diff --git a/include/hw/s390x/tod.h b/include/hw/s390x/tod.h +index ff3195a4bf..0935e85089 100644 +--- a/include/hw/s390x/tod.h ++++ b/include/hw/s390x/tod.h +@@ -12,7 +12,7 @@ + #define HW_S390_TOD_H + + #include "hw/qdev-core.h" +-#include "target/s390x/s390-tod.h" ++#include "tcg/s390-tod.h" + #include "qom/object.h" + + typedef struct S390TOD { +diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h +index bcf54d970f..d5a1c69f99 100644 +--- a/include/hw/virtio/virtio-gpu.h ++++ b/include/hw/virtio/virtio-gpu.h +@@ -81,6 +81,7 @@ struct virtio_gpu_scanout { + struct virtio_gpu_requested_state { + uint16_t width_mm, height_mm; + uint32_t width, height; ++ uint32_t refresh_rate; + int x, y; + }; + +diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h +index 8b66a4e7d0..6d4eef8065 100644 +--- a/include/sysemu/hvf_int.h ++++ b/include/sysemu/hvf_int.h +@@ -11,7 +11,11 @@ + #ifndef HVF_INT_H + #define HVF_INT_H + ++#ifdef __aarch64__ ++#include ++#else + #include ++#endif + + /* hvf_slot flags */ + #define HVF_SLOT_LOG (1 << 0) +@@ -44,7 +48,10 @@ struct HVFState { + extern HVFState *hvf_state; + + struct hvf_vcpu_state { +- int fd; ++ uint64_t fd; ++ void *exit; ++ bool vtimer_masked; ++ sigset_t unblock_ipi_mask; + }; + + void assert_hvf_ok(hv_return_t ret); +@@ -54,5 +61,6 @@ int hvf_vcpu_exec(CPUState *); + hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t); + int hvf_put_registers(CPUState *); + int hvf_get_registers(CPUState *); ++void hvf_kick_vcpu_thread(CPUState *cpu); + + #endif +diff --git a/include/ui/cocoa.h b/include/ui/cocoa.h +new file mode 100644 +index 0000000000..12ccc97e6f +--- /dev/null ++++ b/include/ui/cocoa.h +@@ -0,0 +1,127 @@ ++/* ++ * QEMU Cocoa CG display driver ++ * ++ * Copyright (c) 2008 Mike Kronenberg ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#ifndef UI_COCOA_H ++#define UI_COCOA_H ++ ++#import ++ ++#include "ui/clipboard.h" ++#include "ui/console.h" ++#include "ui/kbd-state.h" ++#include "qemu/thread.h" ++ ++//#define DEBUG ++ ++#ifdef DEBUG ++#define COCOA_DEBUG(...) { (void) fprintf (stdout, __VA_ARGS__); } ++#else ++#define COCOA_DEBUG(...) ((void) 0) ++#endif ++ ++typedef void (^CodeBlock)(void); ++typedef bool (^BoolCodeBlock)(void); ++ ++typedef struct { ++ DisplayChangeListener dcl; ++ DisplaySurface *surface; ++ QemuMutex draw_mutex; ++ int mouse_x; ++ int mouse_y; ++ int mouse_on; ++ CGImageRef cursor_cgimage; ++ int cursor_show; ++ bool swap_option_command; ++ bool inited; ++} QEMUScreen; ++ ++typedef struct { ++ QemuClipboardInfo *info; ++ QemuEvent event; ++} QemuCocoaClipboard; ++ ++@interface QemuCocoaView : NSView ++{ ++ NSTextField *pauseLabel; ++ NSTrackingArea *trackingArea; ++ QEMUScreen *screen; ++ int screen_width; ++ int screen_height; ++ QKbdState *kbd; ++ BOOL isMouseGrabbed; ++ BOOL isAbsoluteEnabled; ++ CFMachPortRef eventsTap; ++} ++- (id)initWithFrame:(NSRect)frameRect ++ screen:(QEMUScreen *)given_screen; ++- (void) frameUpdated; ++- (NSSize) computeUnzoomedSize; ++- (NSSize) fixZoomedFullScreenSize:(NSSize)proposedSize; ++- (void) resizeWindow; ++- (void) updateUIInfo; ++- (void) updateScreenWidth:(int)w height:(int)h; ++- (void) grabMouse; ++- (void) ungrabMouse; ++- (void) setFullGrab:(id)sender; ++- (bool) handleEvent:(NSEvent *)event; ++- (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled; ++/* The state surrounding mouse grabbing is potentially confusing. ++ * isAbsoluteEnabled tracks qemu_input_is_absolute() [ie "is the emulated ++ * pointing device an absolute-position one?"], but is only updated on ++ * next refresh. ++ * isMouseGrabbed tracks whether GUI events are directed to the guest; ++ * it controls whether special keys like Cmd get sent to the guest, ++ * and whether we capture the mouse when in non-absolute mode. ++ */ ++- (BOOL) isMouseGrabbed; ++- (BOOL) isAbsoluteEnabled; ++- (void) setNeedsDisplayForCursorX:(int)x ++ y:(int)y ++ width:(int)width ++ height:(int)height ++ screenHeight:(int)screen_height; ++- (void)displayPause; ++- (void)removePause; ++@end ++ ++@interface QemuCocoaAppController : NSObject ++ ++{ ++ QemuSemaphore *started_sem; ++ NSArray * supportedImageFileTypes; ++ QemuCocoaView *cocoaView; ++} ++- (id) initWithStartedSem:(QemuSemaphore *)given_started_sem ++ screen:(QEMUScreen *)screen; ++- (QemuCocoaView *)cocoaView; ++@end ++ ++@interface QemuCocoaPasteboardTypeOwner : NSObject ++{ ++ QemuCocoaClipboard *cb; ++} ++- (id) initWith:(QemuCocoaClipboard *)aCb; ++@end ++ ++#endif +diff --git a/include/ui/console.h b/include/ui/console.h +index b30b63976a..d8d7649b14 100644 +--- a/include/ui/console.h ++++ b/include/ui/console.h +@@ -128,6 +128,7 @@ typedef struct QemuUIInfo { + int yoff; + uint32_t width; + uint32_t height; ++ uint32_t refresh_rate; + } QemuUIInfo; + + /* cursor data format is 32bit RGBA */ +@@ -206,41 +207,6 @@ typedef struct DisplayChangeListenerOps { + QEMUCursor *cursor); + + /* required if GL */ +- QEMUGLContext (*dpy_gl_ctx_create)(DisplayChangeListener *dcl, +- QEMUGLParams *params); +- /* required if GL */ +- void (*dpy_gl_ctx_destroy)(DisplayChangeListener *dcl, +- QEMUGLContext ctx); +- /* required if GL */ +- int (*dpy_gl_ctx_make_current)(DisplayChangeListener *dcl, +- QEMUGLContext ctx); +- +- /* required if GL */ +- void (*dpy_gl_scanout_disable)(DisplayChangeListener *dcl); +- /* required if GL */ +- void (*dpy_gl_scanout_texture)(DisplayChangeListener *dcl, +- uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, +- uint32_t x, uint32_t y, +- uint32_t w, uint32_t h); +- /* optional (default to true if has dpy_gl_scanout_dmabuf) */ +- bool (*dpy_has_dmabuf)(DisplayChangeListener *dcl); +- /* optional */ +- void (*dpy_gl_scanout_dmabuf)(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf); +- /* optional */ +- void (*dpy_gl_cursor_dmabuf)(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf, bool have_hot, +- uint32_t hot_x, uint32_t hot_y); +- /* optional */ +- void (*dpy_gl_cursor_position)(DisplayChangeListener *dcl, +- uint32_t pos_x, uint32_t pos_y); +- /* optional */ +- void (*dpy_gl_release_dmabuf)(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf); +- /* required if GL */ + void (*dpy_gl_update)(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, uint32_t w, uint32_t h); + +@@ -255,6 +221,36 @@ struct DisplayChangeListener { + QLIST_ENTRY(DisplayChangeListener) next; + }; + ++typedef uint32_t (* DisplayGLTextureBorrower)(uint32_t id, bool *y_0_top, ++ uint32_t *width, uint32_t *height); ++ ++typedef struct DisplayGLOps { ++ QEMUGLContext (*dpy_gl_ctx_create)(void *dg, QEMUGLParams *params); ++ void (*dpy_gl_ctx_destroy)(void *dg, QEMUGLContext ctx); ++ int (*dpy_gl_ctx_make_current)(void *dg, QEMUGLContext ctx); ++ ++ bool (*dpy_gl_scanout_get_enabled)(void *dg); ++ void (*dpy_gl_scanout_disable)(void *dg); ++ void (*dpy_gl_scanout_texture)(void *dg, ++ uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h); ++ ++ /* optional (default to true if has dpy_gl_scanout_dmabuf) */ ++ bool (*dpy_has_dmabuf)(void *dg); ++ /* optional */ ++ void (*dpy_gl_scanout_dmabuf)(void *dg, QemuDmaBuf *dmabuf); ++ /* optional */ ++ void (*dpy_gl_cursor_dmabuf)(void *dg, ++ QemuDmaBuf *dmabuf, bool have_hot, ++ uint32_t hot_x, uint32_t hot_y); ++ /* optional */ ++ void (*dpy_gl_cursor_position)(void *dg, uint32_t pos_x, uint32_t pos_y); ++ /* optional */ ++ void (*dpy_gl_release_dmabuf)(void *dg, QemuDmaBuf *dmabuf); ++} DisplayGLOps; ++ + DisplayState *init_displaystate(void); + DisplaySurface *qemu_create_displaysurface_from(int width, int height, + pixman_format_code_t format, +@@ -277,6 +273,7 @@ static inline int is_placeholder(DisplaySurface *surface) + return surface->flags & QEMU_PLACEHOLDER_FLAG; + } + ++void register_displayglops(const DisplayGLOps *dg_ops); + void register_displaychangelistener(DisplayChangeListener *dcl); + void update_displaychangelistener(DisplayChangeListener *dcl, + uint64_t interval); +@@ -300,9 +297,8 @@ bool dpy_gfx_check_format(QemuConsole *con, + pixman_format_code_t format); + + void dpy_gl_scanout_disable(QemuConsole *con); +-void dpy_gl_scanout_texture(QemuConsole *con, +- uint32_t backing_id, bool backing_y_0_top, +- uint32_t backing_width, uint32_t backing_height, ++void dpy_gl_scanout_texture(QemuConsole *con, uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, uint32_t w, uint32_t h); + void dpy_gl_scanout_dmabuf(QemuConsole *con, + QemuDmaBuf *dmabuf); +@@ -320,7 +316,8 @@ QEMUGLContext dpy_gl_ctx_create(QemuConsole *con, + void dpy_gl_ctx_destroy(QemuConsole *con, QEMUGLContext ctx); + int dpy_gl_ctx_make_current(QemuConsole *con, QEMUGLContext ctx); + +-bool console_has_gl(QemuConsole *con); ++void console_set_displayglcontext(QemuConsole *con, void *dg); ++bool console_has_gl(void); + + static inline int surface_stride(DisplaySurface *s) + { +@@ -380,7 +377,6 @@ typedef struct GraphicHwOps { + void (*gfx_update)(void *opaque); + bool gfx_update_async; /* if true, calls graphic_hw_update_done() */ + void (*text_update)(void *opaque, console_ch_t *text); +- void (*update_interval)(void *opaque, uint64_t interval); + int (*ui_info)(void *opaque, uint32_t head, QemuUIInfo *info); + void (*gl_block)(void *opaque, bool block); + void (*gl_flushed)(void *opaque); +diff --git a/include/ui/egl-context.h b/include/ui/egl-context.h +index 9374fe41e3..450ac78ad3 100644 +--- a/include/ui/egl-context.h ++++ b/include/ui/egl-context.h +@@ -4,10 +4,8 @@ + #include "ui/console.h" + #include "ui/egl-helpers.h" + +-QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params); +-void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx); +-int qemu_egl_make_context_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx); ++QEMUGLContext qemu_egl_create_context(void *dg, QEMUGLParams *params); ++void qemu_egl_destroy_context(void *dg, QEMUGLContext ctx); ++int qemu_egl_make_context_current(void *dg, QEMUGLContext ctx); + + #endif /* EGL_CONTEXT_H */ +diff --git a/include/ui/egl-helpers.h b/include/ui/egl-helpers.h +index f1bf8f97fc..e36cc00605 100644 +--- a/include/ui/egl-helpers.h ++++ b/include/ui/egl-helpers.h +@@ -48,7 +48,9 @@ void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf); + + #endif + +-EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win); ++EGLSurface qemu_egl_init_surface(EGLContext ectx, EGLNativeWindowType win); ++ ++int qemu_egl_init_dpy_cocoa(DisplayGLMode mode); + + #if defined(CONFIG_X11) || defined(CONFIG_GBM) + +diff --git a/include/ui/gtk.h b/include/ui/gtk.h +index 9516670ebc..6bb5c49427 100644 +--- a/include/ui/gtk.h ++++ b/include/ui/gtk.h +@@ -21,7 +21,7 @@ + #include "ui/clipboard.h" + #include "ui/console.h" + #include "ui/kbd-state.h" +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + #include "ui/egl-helpers.h" + #include "ui/egl-context.h" + #endif +@@ -39,7 +39,7 @@ typedef struct VirtualGfxConsole { + cairo_surface_t *surface; + double scale_x; + double scale_y; +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + QemuGLShader *gls; + EGLContext ectx; + EGLSurface esurface; +@@ -151,7 +151,7 @@ extern bool gtk_use_gl_area; + + /* ui/gtk.c */ + void gd_update_windowsize(VirtualConsole *vc); +-int gd_monitor_update_interval(GtkWidget *widget); ++void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget); + + /* ui/gtk-egl.c */ + void gd_egl_init(VirtualConsole *vc); +@@ -161,30 +161,24 @@ void gd_egl_update(DisplayChangeListener *dcl, + void gd_egl_refresh(DisplayChangeListener *dcl); + void gd_egl_switch(DisplayChangeListener *dcl, + DisplaySurface *surface); +-QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params); +-void gd_egl_scanout_disable(DisplayChangeListener *dcl); +-void gd_egl_scanout_texture(DisplayChangeListener *dcl, ++QEMUGLContext gd_egl_create_context(void *dg, QEMUGLParams *params); ++bool gd_egl_scanout_get_enabled(void *dg); ++void gd_egl_scanout_disable(void *dg); ++void gd_egl_scanout_texture(void *dg, + uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h); +-void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf); +-void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl, ++void gd_egl_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf); ++void gd_egl_cursor_dmabuf(void *dg, + QemuDmaBuf *dmabuf, bool have_hot, + uint32_t hot_x, uint32_t hot_y); +-void gd_egl_cursor_position(DisplayChangeListener *dcl, +- uint32_t pos_x, uint32_t pos_y); +-void gd_egl_release_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf); ++void gd_egl_cursor_position(void *dg, uint32_t pos_x, uint32_t pos_y); ++void gd_egl_release_dmabuf(void *dg, QemuDmaBuf *dmabuf); + void gd_egl_scanout_flush(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, uint32_t w, uint32_t h); + void gtk_egl_init(DisplayGLMode mode); +-int gd_egl_make_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx); ++int gd_egl_make_current(void *dg, QEMUGLContext ctx); + + /* ui/gtk-gl-area.c */ + void gd_gl_area_init(VirtualConsole *vc); +@@ -194,25 +188,20 @@ void gd_gl_area_update(DisplayChangeListener *dcl, + void gd_gl_area_refresh(DisplayChangeListener *dcl); + void gd_gl_area_switch(DisplayChangeListener *dcl, + DisplaySurface *surface); +-QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params); +-void gd_gl_area_destroy_context(DisplayChangeListener *dcl, +- QEMUGLContext ctx); +-void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf); +-void gd_gl_area_scanout_texture(DisplayChangeListener *dcl, ++QEMUGLContext gd_gl_area_create_context(void *dg, QEMUGLParams *params); ++void gd_gl_area_destroy_context(void *dg, QEMUGLContext ctx); ++bool gd_gl_area_scanout_get_enabled(void *dg); ++void gd_gl_area_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf); ++void gd_gl_area_scanout_texture(void *dg, + uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h); +-void gd_gl_area_scanout_disable(DisplayChangeListener *dcl); ++void gd_gl_area_scanout_disable(void *dg); + void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, + uint32_t x, uint32_t y, uint32_t w, uint32_t h); + void gtk_gl_area_init(void); +-int gd_gl_area_make_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx); ++int gd_gl_area_make_current(void *dg, QEMUGLContext ctx); + + /* gtk-clipboard.c */ + void gd_clipboard_init(GtkDisplayState *gd); +diff --git a/include/ui/sdl2.h b/include/ui/sdl2.h +index f85c117a78..3001916bbc 100644 +--- a/include/ui/sdl2.h ++++ b/include/ui/sdl2.h +@@ -11,7 +11,7 @@ + #endif + + #include "ui/kbd-state.h" +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + # include "ui/egl-helpers.h" + #endif + +@@ -32,7 +32,7 @@ struct sdl2_console { + int ignore_hotkeys; + SDL_GLContext winctx; + QKbdState *kbd; +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + QemuGLShader *gls; + egl_fb guest_fb; + egl_fb win_fb; +@@ -65,18 +65,15 @@ void sdl2_gl_switch(DisplayChangeListener *dcl, + void sdl2_gl_refresh(DisplayChangeListener *dcl); + void sdl2_gl_redraw(struct sdl2_console *scon); + +-QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params); +-void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx); +-int sdl2_gl_make_context_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx); ++QEMUGLContext sdl2_gl_create_context(void *dg, QEMUGLParams *params); ++void sdl2_gl_destroy_context(void *dg, QEMUGLContext ctx); ++int sdl2_gl_make_context_current(void *dg, QEMUGLContext ctx); + +-void sdl2_gl_scanout_disable(DisplayChangeListener *dcl); +-void sdl2_gl_scanout_texture(DisplayChangeListener *dcl, ++bool sdl2_gl_scanout_get_enabled(void *dg); ++void sdl2_gl_scanout_disable(void *dg); ++void sdl2_gl_scanout_texture(void *dg, + uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h); + void sdl2_gl_scanout_flush(DisplayChangeListener *dcl, +diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h +index ed298d58f0..894234e874 100644 +--- a/include/ui/spice-display.h ++++ b/include/ui/spice-display.h +@@ -27,7 +27,7 @@ + #include "ui/qemu-pixman.h" + #include "ui/console.h" + +-#if defined(CONFIG_OPENGL) && defined(CONFIG_GBM) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) && defined(CONFIG_GBM) + # if SPICE_SERVER_VERSION >= 0x000d01 /* release 0.13.1 */ + # define HAVE_SPICE_GL 1 + # include "ui/egl-helpers.h" +diff --git a/meson.build b/meson.build +index dd7f9ed6a8..6d4d26ab94 100644 +--- a/meson.build ++++ b/meson.build +@@ -77,6 +77,13 @@ else + endif + + accelerator_targets = { 'CONFIG_KVM': kvm_targets } ++ ++if cpu in ['aarch64'] ++ accelerator_targets += { ++ 'CONFIG_HVF': ['aarch64-softmmu'] ++ } ++endif ++ + if cpu in ['x86', 'x86_64', 'arm', 'aarch64'] + # i368 emulator provides xenpv machine type for multiple architectures + accelerator_targets += { +@@ -173,6 +180,7 @@ socket = [] + version_res = [] + coref = [] + iokit = [] ++vmnet = not_found + emulator_link_args = [] + nvmm =not_found + hvf = not_found +@@ -187,6 +195,7 @@ if targetos == 'windows' + elif targetos == 'darwin' + coref = dependency('appleframeworks', modules: 'CoreFoundation') + iokit = dependency('appleframeworks', modules: 'IOKit', required: false) ++ vmnet = dependency('appleframeworks', modules: 'vmnet') + elif targetos == 'sunos' + socket = [cc.find_library('socket'), + cc.find_library('nsl'), +@@ -319,10 +328,6 @@ lttng = not_found + if 'CONFIG_TRACE_UST' in config_host + lttng = declare_dependency(link_args: config_host['LTTNG_UST_LIBS'].split()) + endif +-urcubp = not_found +-if 'CONFIG_TRACE_UST' in config_host +- urcubp = declare_dependency(link_args: config_host['URCU_BP_LIBS'].split()) +-endif + pixman = not_found + if have_system or have_tools + pixman = dependency('pixman-1', required: have_system, version:'>=0.21.8', +@@ -381,7 +386,8 @@ if not get_option('attr').disabled() + endif + endif + +-cocoa = dependency('appleframeworks', modules: 'Cocoa', required: get_option('cocoa')) ++cocoa = dependency('appleframeworks', modules: ['Cocoa', 'CoreVideo'], ++ required: get_option('cocoa')) + if cocoa.found() and get_option('sdl').enabled() + error('Cocoa and SDL cannot be enabled at the same time') + endif +@@ -1260,6 +1266,7 @@ config_host_data.set('CONFIG_FUSE', fuse.found()) + config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found()) + config_host_data.set('CONFIG_X11', x11.found()) + config_host_data.set('CONFIG_CFI', get_option('cfi')) ++config_host_data.set('CONFIG_VMNET', vmnet.found()) + config_host_data.set('QEMU_VERSION', '"@0@"'.format(meson.project_version())) + config_host_data.set('QEMU_VERSION_MAJOR', meson.project_version().split('.')[0]) + config_host_data.set('QEMU_VERSION_MINOR', meson.project_version().split('.')[1]) +@@ -2116,6 +2123,7 @@ if have_system or have_user + 'accel/tcg', + 'hw/core', + 'target/arm', ++ 'target/arm/hvf', + 'target/hppa', + 'target/i386', + 'target/i386/kvm', +@@ -2123,6 +2131,7 @@ if have_system or have_user + 'target/ppc', + 'target/riscv', + 'target/s390x', ++ 'target/s390x/kvm', + 'target/sparc', + ] + endif +diff --git a/monitor/misc.c b/monitor/misc.c +index b28874d6dc..ffe7966870 100644 +--- a/monitor/misc.c ++++ b/monitor/misc.c +@@ -1804,7 +1804,7 @@ void info_trace_events_completion(ReadLineState *rs, int nb_args, const char *st + TraceEventIter iter; + TraceEvent *ev; + char *pattern = g_strdup_printf("%s*", str); +- trace_event_iter_init(&iter, pattern); ++ trace_event_iter_init_pattern(&iter, pattern); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + readline_add_completion(rs, trace_event_get_name(ev)); + } +@@ -1822,7 +1822,7 @@ void trace_event_completion(ReadLineState *rs, int nb_args, const char *str) + TraceEventIter iter; + TraceEvent *ev; + char *pattern = g_strdup_printf("%s*", str); +- trace_event_iter_init(&iter, pattern); ++ trace_event_iter_init_pattern(&iter, pattern); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + readline_add_completion(rs, trace_event_get_name(ev)); + } +diff --git a/net/clients.h b/net/clients.h +index 92f9b59aed..2c2af67f82 100644 +--- a/net/clients.h ++++ b/net/clients.h +@@ -63,4 +63,9 @@ int net_init_vhost_user(const Netdev *netdev, const char *name, + + int net_init_vhost_vdpa(const Netdev *netdev, const char *name, + NetClientState *peer, Error **errp); ++ ++#ifdef CONFIG_VMNET ++int net_init_vmnet_macos(const Netdev *netdev, const char *name, ++ NetClientState *peer, Error **errp); ++#endif + #endif /* QEMU_NET_CLIENTS_H */ +diff --git a/net/meson.build b/net/meson.build +index 1076b0a7ab..ba6a5b7fa0 100644 +--- a/net/meson.build ++++ b/net/meson.build +@@ -37,5 +37,6 @@ endif + softmmu_ss.add(when: 'CONFIG_POSIX', if_true: files(tap_posix)) + softmmu_ss.add(when: 'CONFIG_WIN32', if_true: files('tap-win32.c')) + softmmu_ss.add(when: 'CONFIG_VHOST_NET_VDPA', if_true: files('vhost-vdpa.c')) ++softmmu_ss.add(when: vmnet, if_true: files('vmnet-macos.c')) + + subdir('can') +diff --git a/net/net.c b/net/net.c +index 76bbb7c31b..e753063a27 100644 +--- a/net/net.c ++++ b/net/net.c +@@ -1001,6 +1001,9 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])( + #ifdef CONFIG_L2TPV3 + [NET_CLIENT_DRIVER_L2TPV3] = net_init_l2tpv3, + #endif ++#ifdef CONFIG_VMNET ++ [NET_CLIENT_DRIVER_VMNET_MACOS] = net_init_vmnet_macos, ++#endif + }; + + +diff --git a/net/vmnet-macos.c b/net/vmnet-macos.c +new file mode 100644 +index 0000000000..062ba2091e +--- /dev/null ++++ b/net/vmnet-macos.c +@@ -0,0 +1,447 @@ ++/* ++ * vmnet.framework backed netdev for macOS 10.15+ hosts ++ * ++ * Copyright (c) 2021 Phillip Tennen ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++#include "qemu/osdep.h" ++#include "qemu/main-loop.h" ++#include "qemu/error-report.h" ++#include "qapi/qapi-types-net.h" ++#include "net/net.h" ++/* macOS vmnet framework header */ ++#include ++ ++typedef struct vmnet_state { ++ NetClientState nc; ++ interface_ref vmnet_iface_ref; ++ /* Switched on after vmnet informs us that the interface has started */ ++ bool link_up; ++ /* ++ * If qemu_send_packet_async returns 0, this is switched off until our ++ * delivery callback is invoked ++ */ ++ bool qemu_ready_to_receive; ++} vmnet_state_t; ++ ++int net_init_vmnet_macos(const Netdev *netdev, const char *name, ++ NetClientState *peer, Error **errp); ++ ++static const char *_vmnet_status_repr(vmnet_return_t status) ++{ ++ switch (status) { ++ case VMNET_SUCCESS: ++ return "success"; ++ case VMNET_FAILURE: ++ return "generic failure"; ++ case VMNET_MEM_FAILURE: ++ return "out of memory"; ++ case VMNET_INVALID_ARGUMENT: ++ return "invalid argument"; ++ case VMNET_SETUP_INCOMPLETE: ++ return "setup is incomplete"; ++ case VMNET_INVALID_ACCESS: ++ return "insufficient permissions"; ++ case VMNET_PACKET_TOO_BIG: ++ return "packet size exceeds MTU"; ++ case VMNET_BUFFER_EXHAUSTED: ++ return "kernel buffers temporarily exhausted"; ++ case VMNET_TOO_MANY_PACKETS: ++ return "number of packets exceeds system limit"; ++ /* This error code was introduced in macOS 11.0 */ ++#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 110000 ++ case VMNET_SHARING_SERVICE_BUSY: ++ return "sharing service busy"; ++#endif ++ default: ++ return "unknown status code"; ++ } ++} ++ ++static operating_modes_t _vmnet_operating_mode_enum_compat( ++ VmnetOperatingMode mode) ++{ ++ switch (mode) { ++ case VMNET_OPERATING_MODE_HOST: ++ return VMNET_HOST_MODE; ++ case VMNET_OPERATING_MODE_SHARED: ++ return VMNET_SHARED_MODE; ++ case VMNET_OPERATING_MODE_BRIDGED: ++ return VMNET_BRIDGED_MODE; ++ default: ++ /* Should never happen as the modes are parsed before we get here */ ++ assert(false); ++ } ++} ++ ++static bool vmnet_can_receive(NetClientState *nc) ++{ ++ vmnet_state_t *s = DO_UPCAST(vmnet_state_t, nc, nc); ++ return s->link_up; ++} ++ ++static ssize_t vmnet_receive_iov(NetClientState *nc, ++ const struct iovec *iovs, ++ int iovcnt) ++{ ++ vmnet_state_t *s = DO_UPCAST(vmnet_state_t, nc, nc); ++ ++ /* Combine the provided iovs into a single vmnet packet */ ++ struct vmpktdesc *packet = g_new0(struct vmpktdesc, 1); ++ packet->vm_pkt_iov = g_new0(struct iovec, iovcnt); ++ memcpy(packet->vm_pkt_iov, iovs, sizeof(struct iovec) * iovcnt); ++ packet->vm_pkt_iovcnt = iovcnt; ++ packet->vm_flags = 0; ++ ++ /* Figure out the packet size by iterating the iov's */ ++ for (int i = 0; i < iovcnt; i++) { ++ const struct iovec *iov = iovs + i; ++ packet->vm_pkt_size += iov->iov_len; ++ } ++ ++ /* Finally, write the packet to the vmnet interface */ ++ int packet_count = 1; ++ vmnet_return_t result = vmnet_write(s->vmnet_iface_ref, packet, ++ &packet_count); ++ if (result != VMNET_SUCCESS || packet_count != 1) { ++ error_printf("Failed to send packet to host: %s\n", ++ _vmnet_status_repr(result)); ++ } ++ ssize_t wrote_bytes = packet->vm_pkt_size; ++ g_free(packet->vm_pkt_iov); ++ g_free(packet); ++ return wrote_bytes; ++} ++ ++static void vmnet_send_completed(NetClientState *nc, ssize_t len) ++{ ++ vmnet_state_t *vmnet_client_state = DO_UPCAST(vmnet_state_t, nc, nc); ++ /* Ready to receive more packets! */ ++ vmnet_client_state->qemu_ready_to_receive = true; ++} ++ ++static NetClientInfo net_vmnet_macos_info = { ++ .type = NET_CLIENT_DRIVER_VMNET_MACOS, ++ .size = sizeof(vmnet_state_t), ++ .receive_iov = vmnet_receive_iov, ++ .can_receive = vmnet_can_receive, ++}; ++ ++static bool _validate_ifname_is_valid_bridge_target(const char *ifname) ++{ ++ /* Iterate available bridge interfaces, ensure the provided one is valid */ ++ xpc_object_t bridge_interfaces = vmnet_copy_shared_interface_list(); ++ bool failed_to_match_iface_name = xpc_array_apply( ++ bridge_interfaces, ++ ^bool(size_t index, xpc_object_t _Nonnull value) { ++ if (!strcmp(xpc_string_get_string_ptr(value), ifname)) { ++ /* The interface name is valid! Stop iterating */ ++ return false; ++ } ++ return true; ++ }); ++ ++ if (failed_to_match_iface_name) { ++ error_printf("Invalid bridge interface name provided: %s\n", ifname); ++ error_printf("Valid bridge interfaces:\n"); ++ xpc_array_apply( ++ vmnet_copy_shared_interface_list(), ++ ^bool(size_t index, xpc_object_t _Nonnull value) { ++ error_printf("\t%s\n", xpc_string_get_string_ptr(value)); ++ /* Keep iterating */ ++ return true; ++ }); ++ exit(1); ++ return false; ++ } ++ ++ return true; ++} ++ ++static xpc_object_t _construct_vmnet_interface_description( ++ const NetdevVmnetModeOptions *vmnet_opts) ++{ ++ operating_modes_t mode = _vmnet_operating_mode_enum_compat( ++ vmnet_opts->mode); ++ ++ /* Validate options */ ++ if (mode == VMNET_HOST_MODE || mode == VMNET_SHARED_MODE) { ++ NetdevVmnetModeOptionsHostOrShared mode_opts = vmnet_opts->u.host; ++ /* If one DHCP parameter is configured, all 3 are required */ ++ if (mode_opts.has_dhcp_start_address || ++ mode_opts.has_dhcp_end_address || ++ mode_opts.has_dhcp_subnet_mask) { ++ if (!(mode_opts.has_dhcp_start_address && ++ mode_opts.has_dhcp_end_address && ++ mode_opts.has_dhcp_subnet_mask)) { ++ error_printf("Incomplete DHCP configuration provided\n"); ++ exit(1); ++ } ++ } ++ } else if (mode == VMNET_BRIDGED_MODE) { ++ /* Nothing to validate */ ++ } else { ++ error_printf("Unknown vmnet mode %d\n", mode); ++ exit(1); ++ } ++ ++ xpc_object_t interface_desc = xpc_dictionary_create(NULL, NULL, 0); ++ xpc_dictionary_set_uint64( ++ interface_desc, ++ vmnet_operation_mode_key, ++ mode ++ ); ++ ++ if (mode == VMNET_BRIDGED_MODE) { ++ /* ++ * Configure the provided physical interface to act ++ * as a bridge with QEMU ++ */ ++ NetdevVmnetModeOptionsBridged mode_opts = vmnet_opts->u.bridged; ++ /* Bridge with en0 by default */ ++ const char *physical_ifname = mode_opts.has_ifname ? mode_opts.ifname : ++ "en0"; ++ _validate_ifname_is_valid_bridge_target(physical_ifname); ++ xpc_dictionary_set_string(interface_desc, ++ vmnet_shared_interface_name_key, ++ physical_ifname); ++ } else if (mode == VMNET_HOST_MODE || mode == VMNET_SHARED_MODE) { ++ /* Pass the DHCP configuration to vmnet, if the user provided one */ ++ NetdevVmnetModeOptionsHostOrShared mode_opts = vmnet_opts->u.host; ++ if (mode_opts.has_dhcp_start_address) { ++ /* All DHCP arguments are available, as per the checks above */ ++ xpc_dictionary_set_string(interface_desc, ++ vmnet_start_address_key, ++ mode_opts.dhcp_start_address); ++ xpc_dictionary_set_string(interface_desc, ++ vmnet_end_address_key, ++ mode_opts.dhcp_end_address); ++ xpc_dictionary_set_string(interface_desc, ++ vmnet_subnet_mask_key, ++ mode_opts.dhcp_subnet_mask); ++ } ++ } ++ ++ return interface_desc; ++} ++ ++int net_init_vmnet_macos(const Netdev *netdev, const char *name, ++ NetClientState *peer, Error **errp) ++{ ++ assert(netdev->type == NET_CLIENT_DRIVER_VMNET_MACOS); ++ ++ NetdevVmnetModeOptions *vmnet_opts = netdev->u.vmnet_macos.options; ++ xpc_object_t iface_desc = _construct_vmnet_interface_description(vmnet_opts); ++ ++ NetClientState *nc = qemu_new_net_client(&net_vmnet_macos_info, peer, ++ "vmnet", name); ++ vmnet_state_t *vmnet_client_state = DO_UPCAST(vmnet_state_t, nc, nc); ++ ++ dispatch_queue_t vmnet_dispatch_queue = dispatch_queue_create( ++ "org.qemu.vmnet.iface_queue", ++ DISPATCH_QUEUE_SERIAL ++ ); ++ ++ __block vmnet_return_t vmnet_start_status = 0; ++ __block uint64_t vmnet_iface_mtu = 0; ++ __block uint64_t vmnet_max_packet_size = 0; ++ __block const char *vmnet_mac_address = NULL; ++ /* ++ * We can't refer to an array type directly within a block, ++ * so hold a pointer instead. ++ */ ++ uuid_string_t vmnet_iface_uuid = {0}; ++ __block uuid_string_t *vmnet_iface_uuid_ptr = &vmnet_iface_uuid; ++ /* These are only provided in VMNET_HOST_MODE and VMNET_SHARED_MODE */ ++ bool vmnet_provides_dhcp_info = ( ++ vmnet_opts->mode == VMNET_OPERATING_MODE_HOST || ++ vmnet_opts->mode == VMNET_OPERATING_MODE_SHARED); ++ __block const char *vmnet_subnet_mask = NULL; ++ __block const char *vmnet_dhcp_range_start = NULL; ++ __block const char *vmnet_dhcp_range_end = NULL; ++ ++ /* Create the vmnet interface */ ++ dispatch_semaphore_t vmnet_iface_sem = dispatch_semaphore_create(0); ++ interface_ref vmnet_iface_ref = vmnet_start_interface( ++ iface_desc, ++ vmnet_dispatch_queue, ++ ^(vmnet_return_t status, xpc_object_t _Nullable interface_param) { ++ vmnet_start_status = status; ++ if (vmnet_start_status != VMNET_SUCCESS || !interface_param) { ++ /* Early return if the interface couldn't be started */ ++ dispatch_semaphore_signal(vmnet_iface_sem); ++ return; ++ } ++ ++ /* ++ * Read the configuration that vmnet provided us. ++ * The provided dictionary is owned by XPC and may be freed ++ * shortly after this block's execution. ++ * So, copy data buffers now. ++ */ ++ vmnet_iface_mtu = xpc_dictionary_get_uint64( ++ interface_param, ++ vmnet_mtu_key ++ ); ++ vmnet_max_packet_size = xpc_dictionary_get_uint64( ++ interface_param, ++ vmnet_max_packet_size_key ++ ); ++ vmnet_mac_address = strdup(xpc_dictionary_get_string( ++ interface_param, ++ vmnet_mac_address_key ++ )); ++ ++ const uint8_t *iface_uuid = xpc_dictionary_get_uuid( ++ interface_param, ++ vmnet_interface_id_key ++ ); ++ uuid_unparse_upper(iface_uuid, *vmnet_iface_uuid_ptr); ++ ++ /* If we're in a mode that provides DHCP info, read it out now */ ++ if (vmnet_provides_dhcp_info) { ++ vmnet_dhcp_range_start = strdup(xpc_dictionary_get_string( ++ interface_param, ++ vmnet_start_address_key ++ )); ++ vmnet_dhcp_range_end = strdup(xpc_dictionary_get_string( ++ interface_param, ++ vmnet_end_address_key ++ )); ++ vmnet_subnet_mask = strdup(xpc_dictionary_get_string( ++ interface_param, ++ vmnet_subnet_mask_key ++ )); ++ } ++ dispatch_semaphore_signal(vmnet_iface_sem); ++ }); ++ ++ /* And block until we receive a response from vmnet */ ++ dispatch_semaphore_wait(vmnet_iface_sem, DISPATCH_TIME_FOREVER); ++ ++ /* Did we manage to start the interface? */ ++ if (vmnet_start_status != VMNET_SUCCESS || !vmnet_iface_ref) { ++ error_printf("Failed to start interface: %s\n", ++ _vmnet_status_repr(vmnet_start_status)); ++ if (vmnet_start_status == VMNET_FAILURE) { ++ error_printf("Hint: vmnet requires running with root access\n"); ++ } ++ return -1; ++ } ++ ++ info_report("Started vmnet interface with configuration:"); ++ info_report("MTU: %llu", vmnet_iface_mtu); ++ info_report("Max packet size: %llu", vmnet_max_packet_size); ++ info_report("MAC: %s", vmnet_mac_address); ++ if (vmnet_provides_dhcp_info) { ++ info_report("DHCP IPv4 start: %s", vmnet_dhcp_range_start); ++ info_report("DHCP IPv4 end: %s", vmnet_dhcp_range_end); ++ info_report("IPv4 subnet mask: %s", vmnet_subnet_mask); ++ } ++ info_report("UUID: %s", vmnet_iface_uuid); ++ ++ /* The interface is up! Set a block to run when packets are received */ ++ vmnet_client_state->vmnet_iface_ref = vmnet_iface_ref; ++ vmnet_return_t event_cb_stat = vmnet_interface_set_event_callback( ++ vmnet_iface_ref, ++ VMNET_INTERFACE_PACKETS_AVAILABLE, ++ vmnet_dispatch_queue, ++ ^(interface_event_t event_mask, xpc_object_t _Nonnull event) { ++ if (event_mask != VMNET_INTERFACE_PACKETS_AVAILABLE) { ++ error_printf("Unknown vmnet interface event 0x%08x\n", event_mask); ++ return; ++ } ++ ++ /* If we're unable to handle more packets now, drop this packet */ ++ if (!vmnet_client_state->qemu_ready_to_receive) { ++ return; ++ } ++ ++ /* ++ * TODO(Phillip Tennen ): There may be more than ++ * one packet available. ++ * As an optimization, we could read ++ * vmnet_estimated_packets_available_key packets now. ++ */ ++ char *packet_buf = g_malloc0(vmnet_max_packet_size); ++ struct iovec *iov = g_new0(struct iovec, 1); ++ iov->iov_base = packet_buf; ++ iov->iov_len = vmnet_max_packet_size; ++ ++ int pktcnt = 1; ++ struct vmpktdesc *v = g_new0(struct vmpktdesc, pktcnt); ++ v->vm_pkt_size = vmnet_max_packet_size; ++ v->vm_pkt_iov = iov; ++ v->vm_pkt_iovcnt = 1; ++ v->vm_flags = 0; ++ ++ vmnet_return_t result = vmnet_read(vmnet_iface_ref, v, &pktcnt); ++ if (result != VMNET_SUCCESS) { ++ error_printf("Failed to read packet from host: %s\n", ++ _vmnet_status_repr(result)); ++ } ++ ++ /* Ensure we read exactly one packet */ ++ assert(pktcnt == 1); ++ ++ /* Dispatch this block to a global queue instead of the main queue, ++ * which is only created when the program has a Cocoa event loop. ++ * If QEMU is started with -nographic, no Cocoa event loop will be ++ * created and thus the main queue will be unavailable. ++ */ ++ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, ++ 0), ++ ^{ ++ qemu_mutex_lock_iothread(); ++ ++ /* ++ * Deliver the packet to the guest ++ * If the delivery succeeded synchronously, this returns the length ++ * of the sent packet. ++ */ ++ if (qemu_send_packet_async(nc, iov->iov_base, ++ v->vm_pkt_size, ++ vmnet_send_completed) == 0) { ++ vmnet_client_state->qemu_ready_to_receive = false; ++ } ++ ++ /* ++ * It's safe to free the packet buffers. ++ * Even if delivery needs to wait, qemu_net_queue_append copies ++ * the packet buffer. ++ */ ++ g_free(v); ++ g_free(iov); ++ g_free(packet_buf); ++ ++ qemu_mutex_unlock_iothread(); ++ }); ++ }); ++ ++ /* Did we manage to set an event callback? */ ++ if (event_cb_stat != VMNET_SUCCESS) { ++ error_printf("Failed to set up a callback to receive packets: %s\n", ++ _vmnet_status_repr(vmnet_start_status)); ++ exit(1); ++ } ++ ++ /* We're now ready to receive packets */ ++ vmnet_client_state->qemu_ready_to_receive = true; ++ vmnet_client_state->link_up = true; ++ ++ /* Include DHCP info if we're in a relevant mode */ ++ if (vmnet_provides_dhcp_info) { ++ snprintf(nc->info_str, sizeof(nc->info_str), ++ "dhcp_start=%s,dhcp_end=%s,mask=%s", ++ vmnet_dhcp_range_start, vmnet_dhcp_range_end, ++ vmnet_subnet_mask); ++ } else { ++ snprintf(nc->info_str, sizeof(nc->info_str), ++ "mac=%s", vmnet_mac_address); ++ } ++ ++ return 0; ++} +diff --git a/pc-bios/README b/pc-bios/README +index d344e3bc1b..db39d757b0 100644 +--- a/pc-bios/README ++++ b/pc-bios/README +@@ -14,7 +14,7 @@ + - SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware + implementation for certain IBM POWER hardware. The sources are at + https://github.com/aik/SLOF, and the image currently in qemu is +- built from git tag qemu-slof-20210217. ++ built from git tag qemu-slof-20210711. + + - VOF (Virtual Open Firmware) is a minimalistic firmware to work with + -machine pseries,x-vof=on. When enabled, the firmware acts as a slim shim and +diff --git a/qapi/block-core.json b/qapi/block-core.json +index c7a311798a..675d8265eb 100644 +--- a/qapi/block-core.json ++++ b/qapi/block-core.json +@@ -274,6 +274,9 @@ + # images in the chain)) before reaching one for which the + # range is allocated + # ++# @present: true if this layer provides the data, false if adding a backing ++# layer could impact this region (since 6.1) ++# + # @offset: if present, the image file stores the data for this range + # in raw format at the given (host) offset + # +@@ -284,8 +287,8 @@ + ## + { 'struct': 'MapEntry', + 'data': {'start': 'int', 'length': 'int', 'data': 'bool', +- 'zero': 'bool', 'depth': 'int', '*offset': 'int', +- '*filename': 'str' } } ++ 'zero': 'bool', 'depth': 'int', 'present': 'bool', ++ '*offset': 'int', '*filename': 'str' } } + + ## + # @BlockdevCacheInfo: +diff --git a/qapi/net.json b/qapi/net.json +index 7fab2e7cd8..e3b67f174f 100644 +--- a/qapi/net.json ++++ b/qapi/net.json +@@ -452,6 +452,115 @@ + '*vhostdev': 'str', + '*queues': 'int' } } + ++## ++# @VmnetOperatingMode: ++# ++# The operating modes in which a vmnet netdev can run ++# Only available on macOS ++# ++# @host: the guest may communicate with the host ++# and other guest network interfaces ++# ++# @shared: the guest may reach the Internet through a NAT, ++# and may communicate with the host and other guest ++# network interfaces ++# ++# @bridged: the guest's traffic is bridged with a ++# physical network interface of the host ++# ++# Since: 6.0 ++## ++{ 'enum': 'VmnetOperatingMode', ++ 'data': [ 'host', 'shared', 'bridged' ], ++ 'if': 'defined(CONFIG_VMNET)' } ++ ++## ++# @NetdevVmnetModeOptionsBridged: ++# ++# Options for the vmnet-macos netdev ++# that are only available in 'bridged' mode ++# Only available on macOS ++# ++# @ifname: the physical network interface to bridge with ++# (defaults to en0 if not specified) ++# ++# Since: 6.0 ++## ++{ 'struct': 'NetdevVmnetModeOptionsBridged', ++ 'data': { '*ifname': 'str' }, ++ 'if': 'defined(CONFIG_VMNET)' } ++ ++## ++# @NetdevVmnetModeOptionsHostOrShared: ++# ++# Options for the vmnet-macos netdev ++# that are only available in 'host' or 'shared' mode ++# Only available on macOS ++# ++# @dhcp-start-address: the gateway address to use for the interface. ++# The range to dhcp_end_address is placed in the DHCP pool. ++# (only valid with mode=host|shared) ++# (must be specified with dhcp-end-address and ++# dhcp-subnet-mask) ++# (allocated automatically if unset) ++# ++# @dhcp-end-address: the DHCP IPv4 range end address to use for the interface. ++# (only valid with mode=host|shared) ++# (must be specified with dhcp-start-address and ++# dhcp-subnet-mask) ++# (allocated automatically if unset) ++# ++# @dhcp-subnet-mask: the IPv4 subnet mask (string) to use on the interface. ++# (only valid with mode=host|shared) ++# (must be specified with dhcp-start-address and ++# dhcp-end-address) ++# (allocated automatically if unset) ++# ++# Since: 6.0 ++## ++{ 'struct': 'NetdevVmnetModeOptionsHostOrShared', ++ 'data': { ++ '*dhcp-start-address': 'str' , ++ '*dhcp-end-address': 'str', ++ '*dhcp-subnet-mask': 'str' }, ++ 'if': 'defined(CONFIG_VMNET)' } ++ ++## ++# @NetdevVmnetModeOptions: ++# ++# Options specific to different operating modes of a vmnet netdev ++# Only available on macOS ++# ++# @mode: the operating mode vmnet should run in ++# ++# Since: 6.0 ++## ++{ 'union': 'NetdevVmnetModeOptions', ++ 'base': { 'mode': 'VmnetOperatingMode' }, ++ 'discriminator': 'mode', ++ 'data': { ++ 'bridged': 'NetdevVmnetModeOptionsBridged', ++ 'host': 'NetdevVmnetModeOptionsHostOrShared', ++ 'shared': 'NetdevVmnetModeOptionsHostOrShared' }, ++ 'if': 'defined(CONFIG_VMNET)' } ++ ++## ++# @NetdevVmnetOptions: ++# ++# vmnet network backend ++# Only available on macOS ++# ++# @options: a structure specifying the mode and mode-specific options ++# (once QAPI supports a union type as a branch to another union type, ++# this structure can be changed to a union, and the contents of ++# NetdevVmnetModeOptions moved here) ++# ++# Since: 6.0 ++## ++{ 'struct': 'NetdevVmnetOptions', ++ 'data': {'options': 'NetdevVmnetModeOptions' }, ++ 'if': 'defined(CONFIG_VMNET)' } ++ + ## + # @NetClientDriver: + # +@@ -460,10 +569,13 @@ + # Since: 2.7 + # + # @vhost-vdpa since 5.1 ++# ++# @vmnet-macos since 6.0 (only available on macOS) + ## + { 'enum': 'NetClientDriver', + 'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'vde', +- 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa' ] } ++ 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa', ++ { 'name': 'vmnet-macos', 'if': 'defined(CONFIG_VMNET)' } ] } + + ## + # @Netdev: +@@ -477,6 +589,8 @@ + # Since: 1.2 + # + # 'l2tpv3' - since 2.1 ++# ++# 'vmnet-macos' since 6.0 (only available on macOS) + ## + { 'union': 'Netdev', + 'base': { 'id': 'str', 'type': 'NetClientDriver' }, +@@ -492,7 +606,9 @@ + 'hubport': 'NetdevHubPortOptions', + 'netmap': 'NetdevNetmapOptions', + 'vhost-user': 'NetdevVhostUserOptions', +- 'vhost-vdpa': 'NetdevVhostVDPAOptions' } } ++ 'vhost-vdpa': 'NetdevVhostVDPAOptions', ++ 'vmnet-macos': { 'type': 'NetdevVmnetOptions', ++ 'if': 'defined(CONFIG_VMNET)' } } } + + ## + # @RxState: +diff --git a/qapi/ui.json b/qapi/ui.json +index fd9677d48e..7d8d3b532e 100644 +--- a/qapi/ui.json ++++ b/qapi/ui.json +@@ -1092,6 +1092,27 @@ + { 'struct' : 'DisplayCurses', + 'data' : { '*charset' : 'str' } } + ++## ++# @DisplayCocoa: ++# ++# Cocoa display options. ++# ++# @full-grab: Capture all key presses, including system combos. This ++# requires accessibility permissions, since it performs ++# a global grab on key events. (default: off) ++# See https://support.apple.com/en-in/guide/mac-help/mh32356/mac ++# ++# @swap-option-command: Swap the Option and Command keys so that their key ++# codes match their position on non-Mac keyboards and ++# you can use Meta/Super and Alt where you expect them. ++# (default: off) ++# ++# Since: 6.1 ++## ++{ 'struct' : 'DisplayCocoa', ++ 'data' : { '*full-grab' : 'bool', ++ '*swap-option-command' : 'bool' } } ++ + ## + # @DisplayType: + # +@@ -1165,6 +1186,7 @@ + 'discriminator' : 'type', + 'data' : { + 'gtk': { 'type': 'DisplayGTK', 'if': 'defined(CONFIG_GTK)' }, ++ 'cocoa': { 'type': 'DisplayCocoa', 'if': 'defined(CONFIG_COCOA)' }, + 'curses': { 'type': 'DisplayCurses', 'if': 'defined(CONFIG_CURSES)' }, + 'egl-headless': { 'type': 'DisplayEGLHeadless', + 'if': 'defined(CONFIG_OPENGL) && defined(CONFIG_GBM)' } +diff --git a/qemu-img.c b/qemu-img.c +index 7c4fc60312..797742a443 100644 +--- a/qemu-img.c ++++ b/qemu-img.c +@@ -2982,8 +2982,9 @@ static int dump_map_entry(OutputFormat output_format, MapEntry *e, + break; + case OFORMAT_JSON: + printf("{ \"start\": %"PRId64", \"length\": %"PRId64"," +- " \"depth\": %"PRId64", \"zero\": %s, \"data\": %s", +- e->start, e->length, e->depth, ++ " \"depth\": %"PRId64", \"present\": %s, \"zero\": %s," ++ " \"data\": %s", e->start, e->length, e->depth, ++ e->present ? "true" : "false", + e->zero ? "true" : "false", + e->data ? "true" : "false"); + if (e->has_offset) { +@@ -3049,6 +3050,7 @@ static int get_block_status(BlockDriverState *bs, int64_t offset, + .offset = map, + .has_offset = has_offset, + .depth = depth, ++ .present = !!(ret & BDRV_BLOCK_ALLOCATED), + .has_filename = filename, + .filename = filename, + }; +@@ -3064,6 +3066,7 @@ static inline bool entry_mergeable(const MapEntry *curr, const MapEntry *next) + if (curr->zero != next->zero || + curr->data != next->data || + curr->depth != next->depth || ++ curr->present != next->present || + curr->has_filename != next->has_filename || + curr->has_offset != next->has_offset) { + return false; +diff --git a/qemu-options.hx b/qemu-options.hx +index 8965dabc83..5ff872b3e8 100644 +--- a/qemu-options.hx ++++ b/qemu-options.hx +@@ -1796,7 +1796,11 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, + #if defined(CONFIG_CURSES) + "-display curses[,charset=]\n" + #endif +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_COCOA) ++ "-display cocoa[,full-grab=on|off]\n" ++ " [,swap-option-command=on|off]\n" ++#endif ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + "-display egl-headless[,rendernode=]\n" + #endif + "-display none\n" +@@ -2605,6 +2609,15 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, + #ifdef __linux__ + "-netdev vhost-vdpa,id=str,vhostdev=/path/to/dev\n" + " configure a vhost-vdpa network,Establish a vhost-vdpa netdev\n" ++#endif ++#ifdef CONFIG_VMNET ++ "-netdev vmnet-macos,id=str,mode=bridged[,ifname=ifname]\n" ++ " configure a macOS-provided vmnet network in \"physical interface bridge\" mode\n" ++ " the physical interface to bridge with defaults to en0 if unspecified\n" ++ "-netdev vmnet-macos,id=str,mode=host|shared\n" ++ " [,dhcp_start_address=addr,dhcp_end_address=addr,dhcp_subnet_mask=mask]\n" ++ " configure a macOS-provided vmnet network in \"host\" or \"shared\" mode\n" ++ " the DHCP configuration will be set automatically if unspecified\n" + #endif + "-netdev hubport,id=str,hubid=n[,netdev=nd]\n" + " configure a hub port on the hub with ID 'n'\n", QEMU_ARCH_ALL) +diff --git a/scripts/qemu-trace-stap b/scripts/qemu-trace-stap +index 90527eb974..eb6e951ff2 100755 +--- a/scripts/qemu-trace-stap ++++ b/scripts/qemu-trace-stap +@@ -55,11 +55,6 @@ def tapset_dir(binary): + return os.path.realpath(tapset) + + +-def tapset_env(tapset_dir): +- tenv = copy.copy(os.environ) +- tenv["SYSTEMTAP_TAPSET"] = tapset_dir +- return tenv +- + def cmd_run(args): + prefix = probe_prefix(args.binary) + tapsets = tapset_dir(args.binary) +@@ -81,11 +76,11 @@ def cmd_run(args): + + # We request an 8MB buffer, since the stap default 1MB buffer + # can be easily overflowed by frequently firing QEMU traces +- stapargs = ["stap", "-s", "8"] ++ stapargs = ["stap", "-s", "8", "-I", tapsets ] + if args.pid is not None: + stapargs.extend(["-x", args.pid]) + stapargs.extend(["-e", script]) +- subprocess.call(stapargs, env=tapset_env(tapsets)) ++ subprocess.call(stapargs) + + + def cmd_list(args): +@@ -101,10 +96,9 @@ def cmd_list(args): + + if verbose: + print("Listing probes with name '%s'" % script) +- proc = subprocess.Popen(["stap", "-l", script], ++ proc = subprocess.Popen(["stap", "-I", tapsets, "-l", script], + stdout=subprocess.PIPE, +- universal_newlines=True, +- env=tapset_env(tapsets)) ++ universal_newlines=True) + out, err = proc.communicate() + if proc.returncode != 0: + print("No probes found, are the tapsets installed in %s" % tapset_dir(args.binary)) +diff --git a/target/arm/cpu.c b/target/arm/cpu.c +index 9cddfd6a44..2f8401bfaf 100644 +--- a/target/arm/cpu.c ++++ b/target/arm/cpu.c +@@ -39,6 +39,7 @@ + #include "sysemu/tcg.h" + #include "sysemu/hw_accel.h" + #include "kvm_arm.h" ++#include "hvf_arm.h" + #include "disas/capstone.h" + #include "fpu/softfloat.h" + +@@ -1079,8 +1080,8 @@ static void arm_cpu_initfn(Object *obj) + cpu->psci_version = 1; /* By default assume PSCI v0.1 */ + cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; + +- if (tcg_enabled()) { +- cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ ++ if (tcg_enabled() || hvf_enabled()) { ++ cpu->psci_version = 2; /* TCG and HVF implement PSCI 0.2 */ + } + } + +@@ -2021,15 +2022,19 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) + #endif /* CONFIG_TCG */ + } + +-#ifdef CONFIG_KVM ++#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + static void arm_host_initfn(Object *obj) + { + ARMCPU *cpu = ARM_CPU(obj); + ++#ifdef CONFIG_KVM + kvm_arm_set_cpu_features_from_host(cpu); + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + aarch64_add_sve_properties(obj); + } ++#else ++ hvf_arm_set_cpu_features_from_host(cpu); ++#endif + arm_cpu_post_init(obj); + } + +@@ -2089,7 +2094,7 @@ static void arm_cpu_register_types(void) + { + type_register_static(&arm_cpu_type_info); + +-#ifdef CONFIG_KVM ++#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + type_register_static(&host_arm_cpu_type_info); + #endif + } +diff --git a/target/arm/cpu.h b/target/arm/cpu.h +index be9a4dceae..131e82be9b 100644 +--- a/target/arm/cpu.h ++++ b/target/arm/cpu.h +@@ -2997,6 +2997,8 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); + #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) + #define CPU_RESOLVING_TYPE TYPE_ARM_CPU + ++#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU ++ + #define cpu_signal_handler cpu_arm_signal_handler + #define cpu_list arm_cpu_list + +diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c +new file mode 100644 +index 0000000000..be670af578 +--- /dev/null ++++ b/target/arm/hvf/hvf.c +@@ -0,0 +1,962 @@ ++/* ++ * QEMU Hypervisor.framework support for Apple Silicon ++ ++ * Copyright 2020 Alexander Graf ++ * Copyright 2020 Google LLC ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "qemu/error-report.h" ++ ++#include "sysemu/runstate.h" ++#include "sysemu/hvf.h" ++#include "sysemu/hvf_int.h" ++#include "sysemu/hw_accel.h" ++#include "hvf_arm.h" ++ ++#include ++ ++#include "exec/address-spaces.h" ++#include "hw/irq.h" ++#include "qemu/main-loop.h" ++#include "sysemu/cpus.h" ++#include "arm-powerctl.h" ++#include "target/arm/cpu.h" ++#include "target/arm/internals.h" ++#include "trace/trace-target_arm_hvf.h" ++ ++#define HVF_SYSREG(crn, crm, op0, op1, op2) \ ++ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) ++#define PL1_WRITE_MASK 0x4 ++ ++#define SYSREG(op0, op1, crn, crm, op2) \ ++ ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1)) ++#define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7) ++#define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) ++#define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0) ++ ++#define WFX_IS_WFE (1 << 0) ++ ++#define TMR_CTL_ENABLE (1 << 0) ++#define TMR_CTL_IMASK (1 << 1) ++#define TMR_CTL_ISTATUS (1 << 2) ++ ++static void hvf_wfi(CPUState *cpu); ++ ++typedef struct ARMHostCPUFeatures { ++ ARMISARegisters isar; ++ uint64_t features; ++ uint64_t midr; ++ uint32_t reset_sctlr; ++ const char *dtb_compatible; ++} ARMHostCPUFeatures; ++ ++static ARMHostCPUFeatures arm_host_cpu_features; ++ ++struct hvf_reg_match { ++ int reg; ++ uint64_t offset; ++}; ++ ++static const struct hvf_reg_match hvf_reg_match[] = { ++ { HV_REG_X0, offsetof(CPUARMState, xregs[0]) }, ++ { HV_REG_X1, offsetof(CPUARMState, xregs[1]) }, ++ { HV_REG_X2, offsetof(CPUARMState, xregs[2]) }, ++ { HV_REG_X3, offsetof(CPUARMState, xregs[3]) }, ++ { HV_REG_X4, offsetof(CPUARMState, xregs[4]) }, ++ { HV_REG_X5, offsetof(CPUARMState, xregs[5]) }, ++ { HV_REG_X6, offsetof(CPUARMState, xregs[6]) }, ++ { HV_REG_X7, offsetof(CPUARMState, xregs[7]) }, ++ { HV_REG_X8, offsetof(CPUARMState, xregs[8]) }, ++ { HV_REG_X9, offsetof(CPUARMState, xregs[9]) }, ++ { HV_REG_X10, offsetof(CPUARMState, xregs[10]) }, ++ { HV_REG_X11, offsetof(CPUARMState, xregs[11]) }, ++ { HV_REG_X12, offsetof(CPUARMState, xregs[12]) }, ++ { HV_REG_X13, offsetof(CPUARMState, xregs[13]) }, ++ { HV_REG_X14, offsetof(CPUARMState, xregs[14]) }, ++ { HV_REG_X15, offsetof(CPUARMState, xregs[15]) }, ++ { HV_REG_X16, offsetof(CPUARMState, xregs[16]) }, ++ { HV_REG_X17, offsetof(CPUARMState, xregs[17]) }, ++ { HV_REG_X18, offsetof(CPUARMState, xregs[18]) }, ++ { HV_REG_X19, offsetof(CPUARMState, xregs[19]) }, ++ { HV_REG_X20, offsetof(CPUARMState, xregs[20]) }, ++ { HV_REG_X21, offsetof(CPUARMState, xregs[21]) }, ++ { HV_REG_X22, offsetof(CPUARMState, xregs[22]) }, ++ { HV_REG_X23, offsetof(CPUARMState, xregs[23]) }, ++ { HV_REG_X24, offsetof(CPUARMState, xregs[24]) }, ++ { HV_REG_X25, offsetof(CPUARMState, xregs[25]) }, ++ { HV_REG_X26, offsetof(CPUARMState, xregs[26]) }, ++ { HV_REG_X27, offsetof(CPUARMState, xregs[27]) }, ++ { HV_REG_X28, offsetof(CPUARMState, xregs[28]) }, ++ { HV_REG_X29, offsetof(CPUARMState, xregs[29]) }, ++ { HV_REG_X30, offsetof(CPUARMState, xregs[30]) }, ++ { HV_REG_PC, offsetof(CPUARMState, pc) }, ++}; ++ ++static const struct hvf_reg_match hvf_fpreg_match[] = { ++ { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) }, ++ { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) }, ++ { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) }, ++ { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) }, ++ { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) }, ++ { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) }, ++ { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) }, ++ { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) }, ++ { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) }, ++ { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) }, ++ { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) }, ++ { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) }, ++ { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) }, ++ { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) }, ++ { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) }, ++ { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) }, ++ { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) }, ++ { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) }, ++ { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) }, ++ { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) }, ++ { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) }, ++ { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) }, ++ { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) }, ++ { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) }, ++ { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) }, ++ { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) }, ++ { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) }, ++ { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) }, ++ { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) }, ++ { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) }, ++ { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) }, ++ { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) }, ++}; ++ ++struct hvf_sreg_match { ++ int reg; ++ uint32_t key; ++}; ++ ++static const struct hvf_sreg_match hvf_sreg_match[] = { ++ { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) }, ++ ++ { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) }, ++ { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) }, ++ { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) }, ++ { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) }, ++ ++#ifdef SYNC_NO_RAW_REGS ++ /* ++ * The registers below are manually synced on init because they are ++ * marked as NO_RAW. We still list them to make number space sync easier. ++ */ ++ { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) }, ++ { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) }, ++ { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) }, ++ { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) }, ++#endif ++ { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) }, ++ { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) }, ++ { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) }, ++ { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) }, ++ { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) }, ++#ifdef SYNC_NO_MMFR0 ++ /* We keep the hardware MMFR0 around. HW limits are there anyway */ ++ { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) }, ++#endif ++ { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) }, ++ { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) }, ++ ++ { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) }, ++ { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) }, ++ { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) }, ++ { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) }, ++ { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) }, ++ { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) }, ++ ++ { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) }, ++ { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) }, ++ { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) }, ++ { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) }, ++ { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) }, ++ { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) }, ++ { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) }, ++ { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) }, ++ { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) }, ++ { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) }, ++ ++ { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 1, 0) }, ++ { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) }, ++ { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) }, ++ { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) }, ++ { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) }, ++ { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) }, ++ { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) }, ++ { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) }, ++ { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) }, ++ { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) }, ++ { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) }, ++ { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) }, ++ { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) }, ++ { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) }, ++ { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) }, ++ { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) }, ++ { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) }, ++ { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) }, ++ { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) }, ++ { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) }, ++}; ++ ++int hvf_get_registers(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ CPUARMState *env = &arm_cpu->env; ++ hv_return_t ret; ++ uint64_t val; ++ hv_simd_fp_uchar16_t fpval; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { ++ ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); ++ *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; ++ assert_hvf_ok(ret); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { ++ ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, ++ &fpval); ++ memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); ++ assert_hvf_ok(ret); ++ } ++ ++ val = 0; ++ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); ++ assert_hvf_ok(ret); ++ vfp_set_fpcr(env, val); ++ ++ val = 0; ++ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); ++ assert_hvf_ok(ret); ++ vfp_set_fpsr(env, val); ++ ++ ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); ++ assert_hvf_ok(ret); ++ pstate_write(env, val); ++ ++ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { ++ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); ++ assert_hvf_ok(ret); ++ ++ arm_cpu->cpreg_values[i] = val; ++ } ++ write_list_to_cpustate(arm_cpu); ++ ++ return 0; ++} ++ ++int hvf_put_registers(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ CPUARMState *env = &arm_cpu->env; ++ hv_return_t ret; ++ uint64_t val; ++ hv_simd_fp_uchar16_t fpval; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { ++ val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); ++ ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); ++ assert_hvf_ok(ret); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { ++ memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); ++ ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, ++ fpval); ++ assert_hvf_ok(ret); ++ } ++ ++ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); ++ assert_hvf_ok(ret); ++ ++ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); ++ assert_hvf_ok(ret); ++ ++ ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); ++ assert_hvf_ok(ret); ++ ++ write_cpustate_to_list(arm_cpu, false); ++ for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { ++ val = arm_cpu->cpreg_values[i]; ++ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); ++ assert_hvf_ok(ret); ++ } ++ ++ return 0; ++} ++ ++static void flush_cpu_state(CPUState *cpu) ++{ ++ if (cpu->vcpu_dirty) { ++ hvf_put_registers(cpu); ++ cpu->vcpu_dirty = false; ++ } ++} ++ ++static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) ++{ ++ hv_return_t r; ++ ++ flush_cpu_state(cpu); ++ ++ if (rt < 31) { ++ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); ++ assert_hvf_ok(r); ++ } ++} ++ ++static uint64_t hvf_get_reg(CPUState *cpu, int rt) ++{ ++ uint64_t val = 0; ++ hv_return_t r; ++ ++ flush_cpu_state(cpu); ++ ++ if (rt < 31) { ++ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); ++ assert_hvf_ok(r); ++ } ++ ++ return val; ++} ++ ++static void hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ++{ ++ ARMISARegisters host_isar; ++ const struct isar_regs { ++ int reg; ++ uint64_t *val; ++ } regs[] = { ++ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, ++ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, ++ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, ++ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, ++ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, ++ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, ++ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, ++ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, ++ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, ++ }; ++ hv_vcpu_t fd; ++ hv_vcpu_exit_t *exit; ++ int i; ++ ++ ahcf->dtb_compatible = "arm,arm-v8"; ++ ahcf->features = (1ULL << ARM_FEATURE_V8) | ++ (1ULL << ARM_FEATURE_NEON) | ++ (1ULL << ARM_FEATURE_AARCH64) | ++ (1ULL << ARM_FEATURE_PMU) | ++ (1ULL << ARM_FEATURE_GENERIC_TIMER); ++ ++ /* We set up a small vcpu to extract host registers */ ++ ++ assert_hvf_ok(hv_vcpu_create(&fd, &exit, NULL)); ++ for (i = 0; i < ARRAY_SIZE(regs); i++) { ++ assert_hvf_ok(hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val)); ++ } ++ assert_hvf_ok(hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr)); ++ assert_hvf_ok(hv_vcpu_destroy(fd)); ++ ++ ahcf->isar = host_isar; ++ ahcf->reset_sctlr = 0x00c50078; ++ ++ /* Make sure we don't advertise AArch32 support for EL0/EL1 */ ++ g_assert((host_isar.id_aa64pfr0 & 0xff) == 0x11); ++} ++ ++void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu) ++{ ++ if (!arm_host_cpu_features.dtb_compatible) { ++ if (!hvf_enabled()) { ++ cpu->host_cpu_probe_failed = true; ++ return; ++ } ++ hvf_arm_get_host_cpu_features(&arm_host_cpu_features); ++ } ++ ++ cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; ++ cpu->isar = arm_host_cpu_features.isar; ++ cpu->env.features = arm_host_cpu_features.features; ++ cpu->midr = arm_host_cpu_features.midr; ++ cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr; ++} ++ ++void hvf_arch_vcpu_destroy(CPUState *cpu) ++{ ++} ++ ++int hvf_arch_init_vcpu(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ CPUARMState *env = &arm_cpu->env; ++ uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match); ++ uint64_t pfr; ++ hv_return_t ret; ++ int i; ++ ++ env->aarch64 = 1; ++ asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz)); ++ ++ /* Allocate enough space for our sysreg sync */ ++ arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes, ++ sregs_match_len); ++ arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values, ++ sregs_match_len); ++ arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t, ++ arm_cpu->cpreg_vmstate_indexes, ++ sregs_match_len); ++ arm_cpu->cpreg_vmstate_values = g_renew(uint64_t, ++ arm_cpu->cpreg_vmstate_values, ++ sregs_match_len); ++ ++ memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t)); ++ arm_cpu->cpreg_array_len = sregs_match_len; ++ arm_cpu->cpreg_vmstate_array_len = sregs_match_len; ++ ++ /* Populate cp list for all known sysregs */ ++ for (i = 0; i < sregs_match_len; i++) { ++ const ARMCPRegInfo *ri; ++ ++ arm_cpu->cpreg_indexes[i] = cpreg_to_kvm_id(hvf_sreg_match[i].key); ++ ++ ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key); ++ if (ri) { ++ assert(!(ri->type & ARM_CP_NO_RAW)); ++ } ++ } ++ write_cpustate_to_list(arm_cpu, false); ++ ++ /* Set CP_NO_RAW system registers on init */ ++ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, ++ arm_cpu->midr); ++ assert_hvf_ok(ret); ++ ++ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, ++ arm_cpu->mp_affinity); ++ assert_hvf_ok(ret); ++ ++ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); ++ assert_hvf_ok(ret); ++ pfr |= env->gicv3state ? (1 << 24) : 0; ++ ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); ++ assert_hvf_ok(ret); ++ ++ /* We're limited to underlying hardware caps, override internal versions */ ++ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, ++ &arm_cpu->isar.id_aa64mmfr0); ++ assert_hvf_ok(ret); ++ ++ return 0; ++} ++ ++void hvf_kick_vcpu_thread(CPUState *cpu) ++{ ++ cpus_kick_thread(cpu); ++ hv_vcpus_exit(&cpu->hvf->fd, 1); ++} ++ ++static void hvf_raise_exception(CPUARMState *env, uint32_t excp, ++ uint32_t syndrome) ++{ ++ unsigned int new_el = 1; ++ unsigned int old_mode = pstate_read(env); ++ unsigned int new_mode = aarch64_pstate_mode(new_el, true); ++ target_ulong addr = env->cp15.vbar_el[new_el]; ++ ++ env->cp15.esr_el[new_el] = syndrome; ++ aarch64_save_sp(env, arm_current_el(env)); ++ env->elr_el[new_el] = env->pc; ++ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; ++ pstate_write(env, PSTATE_DAIF | new_mode); ++ aarch64_restore_sp(env, new_el); ++ env->pc = addr; ++} ++ ++static int hvf_psci_cpu_off(ARMCPU *arm_cpu) ++{ ++ int32_t ret = 0; ++ ret = arm_set_cpu_off(arm_cpu->mp_affinity); ++ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); ++ ++ return 0; ++} ++ ++static int hvf_handle_psci_call(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ CPUARMState *env = &arm_cpu->env; ++ uint64_t param[4] = { ++ env->xregs[0], ++ env->xregs[1], ++ env->xregs[2], ++ env->xregs[3] ++ }; ++ uint64_t context_id, mpidr; ++ bool target_aarch64 = true; ++ CPUState *target_cpu_state; ++ ARMCPU *target_cpu; ++ target_ulong entry; ++ int target_el = 1; ++ int32_t ret = 0; ++ ++ trace_hvf_psci_call(param[0], param[1], param[2], param[3], ++ arm_cpu->mp_affinity); ++ ++ switch (param[0]) { ++ case QEMU_PSCI_0_2_FN_PSCI_VERSION: ++ ret = QEMU_PSCI_0_2_RET_VERSION_0_2; ++ break; ++ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: ++ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ ++ break; ++ case QEMU_PSCI_0_2_FN_AFFINITY_INFO: ++ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: ++ mpidr = param[1]; ++ ++ switch (param[2]) { ++ case 0: ++ target_cpu_state = arm_get_cpu_by_id(mpidr); ++ if (!target_cpu_state) { ++ ret = QEMU_PSCI_RET_INVALID_PARAMS; ++ break; ++ } ++ target_cpu = ARM_CPU(target_cpu_state); ++ ++ ret = target_cpu->power_state; ++ break; ++ default: ++ /* Everything above affinity level 0 is always on. */ ++ ret = 0; ++ } ++ break; ++ case QEMU_PSCI_0_2_FN_SYSTEM_RESET: ++ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ++ /* QEMU reset and shutdown are async requests, but PSCI ++ * mandates that we never return from the reset/shutdown ++ * call, so power the CPU off now so it doesn't execute ++ * anything further. ++ */ ++ return hvf_psci_cpu_off(arm_cpu); ++ case QEMU_PSCI_0_2_FN_SYSTEM_OFF: ++ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); ++ return hvf_psci_cpu_off(arm_cpu); ++ case QEMU_PSCI_0_1_FN_CPU_ON: ++ case QEMU_PSCI_0_2_FN_CPU_ON: ++ case QEMU_PSCI_0_2_FN64_CPU_ON: ++ mpidr = param[1]; ++ entry = param[2]; ++ context_id = param[3]; ++ ret = arm_set_cpu_on(mpidr, entry, context_id, ++ target_el, target_aarch64); ++ break; ++ case QEMU_PSCI_0_1_FN_CPU_OFF: ++ case QEMU_PSCI_0_2_FN_CPU_OFF: ++ return hvf_psci_cpu_off(arm_cpu); ++ case QEMU_PSCI_0_1_FN_CPU_SUSPEND: ++ case QEMU_PSCI_0_2_FN_CPU_SUSPEND: ++ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: ++ /* Affinity levels are not supported in QEMU */ ++ if (param[1] & 0xfffe0000) { ++ ret = QEMU_PSCI_RET_INVALID_PARAMS; ++ break; ++ } ++ /* Powerdown is not supported, we always go into WFI */ ++ env->xregs[0] = 0; ++ hvf_wfi(cpu); ++ break; ++ case QEMU_PSCI_0_1_FN_MIGRATE: ++ case QEMU_PSCI_0_2_FN_MIGRATE: ++ ret = QEMU_PSCI_RET_NOT_SUPPORTED; ++ break; ++ default: ++ return 1; ++ } ++ ++ env->xregs[0] = ret; ++ return 0; ++} ++ ++static uint64_t hvf_sysreg_read(CPUState *cpu, uint32_t reg) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ uint64_t val = 0; ++ ++ switch (reg) { ++ case SYSREG_CNTPCT_EL0: ++ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / ++ gt_cntfrq_period_ns(arm_cpu); ++ break; ++ case SYSREG_PMCCNTR_EL0: ++ val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ++ break; ++ default: ++ trace_hvf_unhandled_sysreg_read(reg, ++ (reg >> 20) & 0x3, ++ (reg >> 14) & 0x7, ++ (reg >> 10) & 0xf, ++ (reg >> 1) & 0xf, ++ (reg >> 17) & 0x7); ++ break; ++ } ++ ++ return val; ++} ++ ++static void hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) ++{ ++ switch (reg) { ++ case SYSREG_CNTPCT_EL0: ++ break; ++ default: ++ trace_hvf_unhandled_sysreg_write(reg, ++ (reg >> 20) & 0x3, ++ (reg >> 14) & 0x7, ++ (reg >> 10) & 0xf, ++ (reg >> 1) & 0xf, ++ (reg >> 17) & 0x7); ++ break; ++ } ++} ++ ++static int hvf_inject_interrupts(CPUState *cpu) ++{ ++ if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { ++ trace_hvf_inject_fiq(); ++ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, ++ true); ++ } ++ ++ if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { ++ trace_hvf_inject_irq(); ++ hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, ++ true); ++ } ++ ++ return 0; ++} ++ ++static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) ++{ ++ /* ++ * Use pselect to sleep so that other threads can IPI us while we're ++ * sleeping. ++ */ ++ qatomic_mb_set(&cpu->thread_kicked, false); ++ qemu_mutex_unlock_iothread(); ++ pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); ++ qemu_mutex_lock_iothread(); ++} ++ ++static void hvf_wfi(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ hv_return_t r; ++ uint64_t ctl; ++ ++ if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { ++ /* Interrupt pending, no need to wait */ ++ return; ++ } ++ ++ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, ++ &ctl); ++ assert_hvf_ok(r); ++ ++ if (!(ctl & 1) || (ctl & 2)) { ++ /* Timer disabled or masked, just wait for an IPI. */ ++ hvf_wait_for_ipi(cpu, NULL); ++ return; ++ } ++ ++ uint64_t cval; ++ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, ++ &cval); ++ assert_hvf_ok(r); ++ ++ int64_t ticks_to_sleep = cval - mach_absolute_time(); ++ if (ticks_to_sleep < 0) { ++ return; ++ } ++ ++ uint64_t seconds = ticks_to_sleep / arm_cpu->gt_cntfrq_hz; ++ uint64_t nanos = ++ (ticks_to_sleep - arm_cpu->gt_cntfrq_hz * seconds) * ++ 1000000000 / arm_cpu->gt_cntfrq_hz; ++ ++ /* ++ * Don't sleep for less than the time a context switch would take, ++ * so that we can satisfy fast timer requests on the same CPU. ++ * Measurements on M1 show the sweet spot to be ~2ms. ++ */ ++ if (!seconds && nanos < 2000000) { ++ return; ++ } ++ ++ struct timespec ts = { seconds, nanos }; ++ hvf_wait_for_ipi(cpu, &ts); ++} ++ ++static void hvf_sync_vtimer(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ hv_return_t r; ++ uint64_t ctl; ++ bool irq_state; ++ ++ if (!cpu->hvf->vtimer_masked) { ++ /* We will get notified on vtimer changes by hvf, nothing to do */ ++ return; ++ } ++ ++ r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); ++ assert_hvf_ok(r); ++ ++ irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == ++ (TMR_CTL_ENABLE | TMR_CTL_ISTATUS); ++ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state); ++ ++ if (!irq_state) { ++ /* Timer no longer asserting, we can unmask it */ ++ hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); ++ cpu->hvf->vtimer_masked = false; ++ } ++} ++ ++int hvf_vcpu_exec(CPUState *cpu) ++{ ++ ARMCPU *arm_cpu = ARM_CPU(cpu); ++ CPUARMState *env = &arm_cpu->env; ++ hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; ++ hv_return_t r; ++ bool advance_pc = false; ++ ++ flush_cpu_state(cpu); ++ ++ hvf_sync_vtimer(cpu); ++ ++ if (hvf_inject_interrupts(cpu)) { ++ return EXCP_INTERRUPT; ++ } ++ ++ if (cpu->halted) { ++ /* On unhalt, we usually have CPU state changes. Prepare for them. */ ++ cpu_synchronize_state(cpu); ++ return EXCP_HLT; ++ } ++ ++ qemu_mutex_unlock_iothread(); ++ assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); ++ ++ /* handle VMEXIT */ ++ uint64_t exit_reason = hvf_exit->reason; ++ uint64_t syndrome = hvf_exit->exception.syndrome; ++ uint32_t ec = syn_get_ec(syndrome); ++ ++ qemu_mutex_lock_iothread(); ++ switch (exit_reason) { ++ case HV_EXIT_REASON_EXCEPTION: ++ /* This is the main one, handle below. */ ++ break; ++ case HV_EXIT_REASON_VTIMER_ACTIVATED: ++ qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); ++ cpu->hvf->vtimer_masked = true; ++ return 0; ++ case HV_EXIT_REASON_CANCELED: ++ /* we got kicked, no exit to process */ ++ return 0; ++ default: ++ assert(0); ++ } ++ ++ switch (ec) { ++ case EC_DATAABORT: { ++ bool isv = syndrome & ARM_EL_ISV; ++ bool iswrite = (syndrome >> 6) & 1; ++ bool s1ptw = (syndrome >> 7) & 1; ++ uint32_t sas = (syndrome >> 22) & 3; ++ uint32_t len = 1 << sas; ++ uint32_t srt = (syndrome >> 16) & 0x1f; ++ uint64_t val = 0; ++ ++ trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, ++ hvf_exit->exception.physical_address, isv, ++ iswrite, s1ptw, len, srt); ++ ++ assert(isv); ++ ++ if (iswrite) { ++ val = hvf_get_reg(cpu, srt); ++ address_space_write(&address_space_memory, ++ hvf_exit->exception.physical_address, ++ MEMTXATTRS_UNSPECIFIED, &val, len); ++ } else { ++ address_space_read(&address_space_memory, ++ hvf_exit->exception.physical_address, ++ MEMTXATTRS_UNSPECIFIED, &val, len); ++ hvf_set_reg(cpu, srt, val); ++ } ++ ++ advance_pc = true; ++ break; ++ } ++ case EC_SYSTEMREGISTERTRAP: { ++ bool isread = (syndrome >> 0) & 1; ++ uint32_t rt = (syndrome >> 5) & 0x1f; ++ uint32_t reg = syndrome & SYSREG_MASK; ++ uint64_t val = 0; ++ ++ if (isread) { ++ val = hvf_sysreg_read(cpu, reg); ++ trace_hvf_sysreg_read(reg, ++ (reg >> 20) & 0x3, ++ (reg >> 14) & 0x7, ++ (reg >> 10) & 0xf, ++ (reg >> 1) & 0xf, ++ (reg >> 17) & 0x7, ++ val); ++ hvf_set_reg(cpu, rt, val); ++ } else { ++ val = hvf_get_reg(cpu, rt); ++ trace_hvf_sysreg_write(reg, ++ (reg >> 20) & 0x3, ++ (reg >> 14) & 0x7, ++ (reg >> 10) & 0xf, ++ (reg >> 1) & 0xf, ++ (reg >> 17) & 0x7, ++ val); ++ hvf_sysreg_write(cpu, reg, val); ++ } ++ ++ advance_pc = true; ++ break; ++ } ++ case EC_WFX_TRAP: ++ advance_pc = true; ++ if (!(syndrome & WFX_IS_WFE)) { ++ hvf_wfi(cpu); ++ } ++ break; ++ case EC_AA64_HVC: ++ cpu_synchronize_state(cpu); ++ if (hvf_handle_psci_call(cpu)) { ++ trace_hvf_unknown_hvf(env->xregs[0]); ++ hvf_raise_exception(env, EXCP_UDEF, syn_uncategorized()); ++ } ++ break; ++ case EC_AA64_SMC: ++ cpu_synchronize_state(cpu); ++ if (!hvf_handle_psci_call(cpu)) { ++ advance_pc = true; ++ } else if (env->xregs[0] == QEMU_SMCCC_TC_WINDOWS10_BOOT) { ++ /* This special SMC is called by Windows 10 on boot. Return error */ ++ env->xregs[0] = -1; ++ advance_pc = true; ++ } else { ++ trace_hvf_unknown_smc(env->xregs[0]); ++ hvf_raise_exception(env, EXCP_UDEF, syn_uncategorized()); ++ } ++ break; ++ default: ++ cpu_synchronize_state(cpu); ++ trace_hvf_exit(syndrome, ec, env->pc); ++ error_report("0x%llx: unhandled exit 0x%llx", env->pc, exit_reason); ++ } ++ ++ if (advance_pc) { ++ uint64_t pc; ++ ++ flush_cpu_state(cpu); ++ ++ r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); ++ assert_hvf_ok(r); ++ pc += 4; ++ r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); ++ assert_hvf_ok(r); ++ } ++ ++ return 0; ++} +diff --git a/target/arm/hvf/meson.build b/target/arm/hvf/meson.build +new file mode 100644 +index 0000000000..855e6cce5a +--- /dev/null ++++ b/target/arm/hvf/meson.build +@@ -0,0 +1,3 @@ ++arm_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( ++ 'hvf.c', ++)) +diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events +new file mode 100644 +index 0000000000..278b88cc62 +--- /dev/null ++++ b/target/arm/hvf/trace-events +@@ -0,0 +1,11 @@ ++hvf_unhandled_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" ++hvf_unhandled_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" ++hvf_inject_fiq(void) "injecting FIQ" ++hvf_inject_irq(void) "injecting IRQ" ++hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" ++hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64 ++hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")" ++hvf_unknown_hvf(uint64_t x0) "unknown HVC! 0x%016"PRIx64 ++hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64 ++hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]" ++hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x" +diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h +new file mode 100644 +index 0000000000..603074a331 +--- /dev/null ++++ b/target/arm/hvf_arm.h +@@ -0,0 +1,19 @@ ++/* ++ * QEMU Hypervisor.framework (HVF) support -- ARM specifics ++ * ++ * Copyright (c) 2021 Alexander Graf ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#ifndef QEMU_HVF_ARM_H ++#define QEMU_HVF_ARM_H ++ ++#include "qemu/accel.h" ++#include "cpu.h" ++ ++void hvf_arm_set_cpu_features_from_host(struct ARMCPU *cpu); ++ ++#endif +diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h +index 580f1c1fee..4b64f98117 100644 +--- a/target/arm/kvm-consts.h ++++ b/target/arm/kvm-consts.h +@@ -85,6 +85,8 @@ MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND); + MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON); + MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE); + ++#define QEMU_SMCCC_TC_WINDOWS10_BOOT 0xc3000001 ++ + /* PSCI v0.2 return values used by TCG emulation of PSCI */ + + /* No Trusted OS migration to worry about when offlining CPUs */ +diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h +index 34f8daa377..828dca4a4a 100644 +--- a/target/arm/kvm_arm.h ++++ b/target/arm/kvm_arm.h +@@ -214,8 +214,6 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, + */ + void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); + +-#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU +- + /** + * ARMHostCPUFeatures: information about the host CPU (identified + * by asking the host kernel) +diff --git a/target/arm/meson.build b/target/arm/meson.build +index 25a02bf276..50f152214a 100644 +--- a/target/arm/meson.build ++++ b/target/arm/meson.build +@@ -60,5 +60,7 @@ arm_softmmu_ss.add(files( + 'psci.c', + )) + ++subdir('hvf') ++ + target_arch += {'arm': arm_ss} + target_softmmu_arch += {'arm': arm_softmmu_ss} +diff --git a/target/arm/psci.c b/target/arm/psci.c +index 6709e28013..4d11dd59c4 100644 +--- a/target/arm/psci.c ++++ b/target/arm/psci.c +@@ -69,6 +69,7 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type) + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: ++ case QEMU_SMCCC_TC_WINDOWS10_BOOT: + return true; + default: + return false; +@@ -194,6 +195,7 @@ void arm_handle_psci_call(ARMCPU *cpu) + break; + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: ++ case QEMU_SMCCC_TC_WINDOWS10_BOOT: + ret = QEMU_PSCI_RET_NOT_SUPPORTED; + break; + default: +diff --git a/target/mips/tcg/sysemu/mips-semi.c b/target/mips/tcg/sysemu/mips-semi.c +index 77108b0b1a..b4a383ae90 100644 +--- a/target/mips/tcg/sysemu/mips-semi.c ++++ b/target/mips/tcg/sysemu/mips-semi.c +@@ -74,25 +74,19 @@ enum UHIOpenFlags { + UHIOpen_EXCL = 0x800 + }; + +-/* Errno values taken from asm-mips/errno.h */ +-static const uint16_t host_to_mips_errno[] = { +- [ENAMETOOLONG] = 78, ++static int errno_mips(int host_errno) ++{ ++ /* Errno values taken from asm-mips/errno.h */ ++ switch (host_errno) { ++ case 0: return 0; ++ case ENAMETOOLONG: return 78; + #ifdef EOVERFLOW +- [EOVERFLOW] = 79, ++ case EOVERFLOW: return 79; + #endif + #ifdef ELOOP +- [ELOOP] = 90, ++ case ELOOP: return 90; + #endif +-}; +- +-static int errno_mips(int err) +-{ +- if (err < 0 || err >= ARRAY_SIZE(host_to_mips_errno)) { +- return EINVAL; +- } else if (host_to_mips_errno[err]) { +- return host_to_mips_errno[err]; +- } else { +- return err; ++ default: return EINVAL; + } + } + +diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c +index 47c967acbf..fd980ea966 100644 +--- a/target/mips/tcg/translate.c ++++ b/target/mips/tcg/translate.c +@@ -1179,7 +1179,6 @@ enum { + + enum { + MMI_OPC_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */ +- MMI_OPC_LQ = 0x1E << 26, /* Same as OPC_MSA */ + MMI_OPC_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */ + }; + +@@ -15166,11 +15165,6 @@ static void decode_mmi(CPUMIPSState *env, DisasContext *ctx) + } + } + +-static void gen_mmi_lq(CPUMIPSState *env, DisasContext *ctx) +-{ +- gen_reserved_instruction(ctx); /* TODO: MMI_OPC_LQ */ +-} +- + static void gen_mmi_sq(DisasContext *ctx, int base, int rt, int offset) + { + gen_reserved_instruction(ctx); /* TODO: MMI_OPC_SQ */ +@@ -16069,14 +16063,8 @@ static bool decode_opc_legacy(CPUMIPSState *env, DisasContext *ctx) + gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); + } + break; +- case OPC_MDMX: /* MMI_OPC_LQ */ +- if (ctx->insn_flags & INSN_R5900) { +-#if defined(TARGET_MIPS64) +- gen_mmi_lq(env, ctx); +-#endif +- } else { +- /* MDMX: Not implemented. */ +- } ++ case OPC_MDMX: ++ /* MDMX: Not implemented. */ + break; + case OPC_PCREL: + check_insn(ctx, ISA_MIPS_R6); +diff --git a/target/mips/tcg/tx79.decode b/target/mips/tcg/tx79.decode +index 0f748b53a6..03a25a5096 100644 +--- a/target/mips/tcg/tx79.decode ++++ b/target/mips/tcg/tx79.decode +@@ -13,6 +13,8 @@ + + &rtype rs rt rd sa + ++&itype base rt offset ++ + ########################################################################### + # Named instruction formats. These are generally used to + # reduce the amount of duplication between instruction patterns. +@@ -22,6 +24,8 @@ + @rs ...... rs:5 ..... .......... ...... &rtype rt=0 rd=0 sa=0 + @rd ...... .......... rd:5 ..... ...... &rtype rs=0 rt=0 sa=0 + ++@ldst ...... base:5 rt:5 offset:16 &itype ++ + ########################################################################### + + MFHI1 011100 0000000000 ..... 00000 010000 @rd +@@ -29,11 +33,41 @@ MTHI1 011100 ..... 0000000000 00000 010001 @rs + MFLO1 011100 0000000000 ..... 00000 010010 @rd + MTLO1 011100 ..... 0000000000 00000 010011 @rs + ++# MMI0 ++ ++PSUBW 011100 ..... ..... ..... 00001 001000 @rs_rt_rd ++PCGTW 011100 ..... ..... ..... 00010 001000 @rs_rt_rd ++PSUBH 011100 ..... ..... ..... 00101 001000 @rs_rt_rd ++PCGTH 011100 ..... ..... ..... 00110 001000 @rs_rt_rd ++PSUBB 011100 ..... ..... ..... 01001 001000 @rs_rt_rd ++PCGTB 011100 ..... ..... ..... 01010 001000 @rs_rt_rd ++PEXTLW 011100 ..... ..... ..... 10010 001000 @rs_rt_rd ++PPACW 011100 ..... ..... ..... 10011 001000 @rs_rt_rd ++PEXTLH 011100 ..... ..... ..... 10110 001000 @rs_rt_rd ++PEXTLB 011100 ..... ..... ..... 11010 001000 @rs_rt_rd ++ ++# MMI1 ++ ++PCEQW 011100 ..... ..... ..... 00010 101000 @rs_rt_rd ++PCEQH 011100 ..... ..... ..... 00110 101000 @rs_rt_rd ++PCEQB 011100 ..... ..... ..... 01010 101000 @rs_rt_rd ++PEXTUW 011100 ..... ..... ..... 10010 101000 @rs_rt_rd ++ + # MMI2 + + PCPYLD 011100 ..... ..... ..... 01110 001001 @rs_rt_rd ++PAND 011100 ..... ..... ..... 10010 001001 @rs_rt_rd ++PXOR 011100 ..... ..... ..... 10011 001001 @rs_rt_rd ++PROT3W 011100 00000 ..... ..... 11111 001001 @rt_rd + + # MMI3 + + PCPYUD 011100 ..... ..... ..... 01110 101001 @rs_rt_rd ++POR 011100 ..... ..... ..... 10010 101001 @rs_rt_rd ++PNOR 011100 ..... ..... ..... 10011 101001 @rs_rt_rd + PCPYH 011100 00000 ..... ..... 11011 101001 @rt_rd ++ ++# SPECIAL ++ ++LQ 011110 ..... ..... ................ @ldst ++SQ 011111 ..... ..... ................ @ldst +diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c +index ad83774b97..395d6afa1f 100644 +--- a/target/mips/tcg/tx79_translate.c ++++ b/target/mips/tcg/tx79_translate.c +@@ -2,12 +2,14 @@ + * Toshiba TX79-specific instructions translation routines + * + * Copyright (c) 2018 Fredrik Noring ++ * Copyright (c) 2021 Philippe Mathieu-Daudé + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + + #include "qemu/osdep.h" + #include "tcg/tcg-op.h" ++#include "tcg/tcg-op-gvec.h" + #include "exec/helper-gen.h" + #include "translate.h" + +@@ -114,6 +116,53 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) + * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word + */ + ++static bool trans_parallel_arith(DisasContext *ctx, arg_rtype *a, ++ void (*gen_logic_i64)(TCGv_i64, TCGv_i64, TCGv_i64)) ++{ ++ TCGv_i64 ax, bx; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ ax = tcg_temp_new_i64(); ++ bx = tcg_temp_new_i64(); ++ ++ /* Lower half */ ++ gen_load_gpr(ax, a->rs); ++ gen_load_gpr(bx, a->rt); ++ gen_logic_i64(cpu_gpr[a->rd], ax, bx); ++ ++ /* Upper half */ ++ gen_load_gpr_hi(ax, a->rs); ++ gen_load_gpr_hi(bx, a->rt); ++ gen_logic_i64(cpu_gpr_hi[a->rd], ax, bx); ++ ++ tcg_temp_free(bx); ++ tcg_temp_free(ax); ++ ++ return true; ++} ++ ++/* Parallel Subtract Byte */ ++static bool trans_PSUBB(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub8_i64); ++} ++ ++/* Parallel Subtract Halfword */ ++static bool trans_PSUBH(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub16_i64); ++} ++ ++/* Parallel Subtract Word */ ++static bool trans_PSUBW(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_vec_sub32_i64); ++} ++ + /* + * Min/Max (4 instructions) + * ------------------------ +@@ -139,6 +188,30 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) + * PNOR rd, rs, rt Parallel NOR + */ + ++/* Parallel And */ ++static bool trans_PAND(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_and_i64); ++} ++ ++/* Parallel Or */ ++static bool trans_POR(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_or_i64); ++} ++ ++/* Parallel Exclusive Or */ ++static bool trans_PXOR(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_xor_i64); ++} ++ ++/* Parallel Not Or */ ++static bool trans_PNOR(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_arith(ctx, a, tcg_gen_nor_i64); ++} ++ + /* + * Shift (9 instructions) + * ---------------------- +@@ -164,6 +237,90 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) + * PCEQW rd, rs, rt Parallel Compare for Equal Word + */ + ++static bool trans_parallel_compare(DisasContext *ctx, arg_rtype *a, ++ TCGCond cond, unsigned wlen) ++{ ++ TCGv_i64 c0, c1, ax, bx, t0, t1, t2; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ c0 = tcg_const_tl(0); ++ c1 = tcg_const_tl(0xffffffff); ++ ax = tcg_temp_new_i64(); ++ bx = tcg_temp_new_i64(); ++ t0 = tcg_temp_new_i64(); ++ t1 = tcg_temp_new_i64(); ++ t2 = tcg_temp_new_i64(); ++ ++ /* Lower half */ ++ gen_load_gpr(ax, a->rs); ++ gen_load_gpr(bx, a->rt); ++ for (int i = 0; i < (64 / wlen); i++) { ++ tcg_gen_sextract_i64(t0, ax, wlen * i, wlen); ++ tcg_gen_sextract_i64(t1, bx, wlen * i, wlen); ++ tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0); ++ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], t2, wlen * i, wlen); ++ } ++ /* Upper half */ ++ gen_load_gpr_hi(ax, a->rs); ++ gen_load_gpr_hi(bx, a->rt); ++ for (int i = 0; i < (64 / wlen); i++) { ++ tcg_gen_sextract_i64(t0, ax, wlen * i, wlen); ++ tcg_gen_sextract_i64(t1, bx, wlen * i, wlen); ++ tcg_gen_movcond_i64(cond, t2, t1, t0, c1, c0); ++ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], cpu_gpr_hi[a->rd], t2, wlen * i, wlen); ++ } ++ ++ tcg_temp_free(t2); ++ tcg_temp_free(t1); ++ tcg_temp_free(t0); ++ tcg_temp_free(bx); ++ tcg_temp_free(ax); ++ tcg_temp_free(c1); ++ tcg_temp_free(c0); ++ ++ return true; ++} ++ ++/* Parallel Compare for Greater Than Byte */ ++static bool trans_PCGTB(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_GE, 8); ++} ++ ++/* Parallel Compare for Equal Byte */ ++static bool trans_PCEQB(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 8); ++} ++ ++/* Parallel Compare for Greater Than Halfword */ ++static bool trans_PCGTH(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_GE, 16); ++} ++ ++/* Parallel Compare for Equal Halfword */ ++static bool trans_PCEQH(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 16); ++} ++ ++/* Parallel Compare for Greater Than Word */ ++static bool trans_PCGTW(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_GE, 32); ++} ++ ++/* Parallel Compare for Equal Word */ ++static bool trans_PCEQW(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_parallel_compare(ctx, a, TCG_COND_EQ, 32); ++} ++ + /* + * LZC (1 instruction) + * ------------------- +@@ -177,6 +334,68 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) + * SQ rt, offset(base) Store Quadword + */ + ++static bool trans_LQ(DisasContext *ctx, arg_itype *a) ++{ ++ TCGv_i64 t0; ++ TCGv addr; ++ ++ if (a->rt == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ t0 = tcg_temp_new_i64(); ++ addr = tcg_temp_new(); ++ ++ gen_base_offset_addr(ctx, addr, a->base, a->offset); ++ /* ++ * Clear least-significant four bits of the effective ++ * address, effectively creating an aligned address. ++ */ ++ tcg_gen_andi_tl(addr, addr, ~0xf); ++ ++ /* Lower half */ ++ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ); ++ gen_store_gpr(t0, a->rt); ++ ++ /* Upper half */ ++ tcg_gen_addi_i64(addr, addr, 8); ++ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ); ++ gen_store_gpr_hi(t0, a->rt); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(addr); ++ ++ return true; ++} ++ ++static bool trans_SQ(DisasContext *ctx, arg_itype *a) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv addr = tcg_temp_new(); ++ ++ gen_base_offset_addr(ctx, addr, a->base, a->offset); ++ /* ++ * Clear least-significant four bits of the effective ++ * address, effectively creating an aligned address. ++ */ ++ tcg_gen_andi_tl(addr, addr, ~0xf); ++ ++ /* Lower half */ ++ gen_load_gpr(t0, a->rt); ++ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ); ++ ++ /* Upper half */ ++ tcg_gen_addi_i64(addr, addr, 8); ++ gen_load_gpr_hi(t0, a->rt); ++ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ); ++ ++ tcg_temp_free(addr); ++ tcg_temp_free(t0); ++ ++ return true; ++} ++ + /* + * Multiply and Divide (19 instructions) + * ------------------------------------- +@@ -217,6 +436,141 @@ static bool trans_MTLO1(DisasContext *ctx, arg_rtype *a) + * PEXTLW rd, rs, rt Parallel Extend Lower from Word + */ + ++/* Parallel Pack to Word */ ++static bool trans_PPACW(DisasContext *ctx, arg_rtype *a) ++{ ++ TCGv_i64 a0, b0, t0; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ a0 = tcg_temp_new_i64(); ++ b0 = tcg_temp_new_i64(); ++ t0 = tcg_temp_new_i64(); ++ ++ gen_load_gpr(a0, a->rs); ++ gen_load_gpr(b0, a->rt); ++ ++ gen_load_gpr_hi(t0, a->rt); /* b1 */ ++ tcg_gen_deposit_i64(cpu_gpr[a->rd], b0, t0, 32, 32); ++ ++ gen_load_gpr_hi(t0, a->rs); /* a1 */ ++ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], a0, t0, 32, 32); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(b0); ++ tcg_temp_free(a0); ++ ++ return true; ++} ++ ++static void gen_pextw(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 a, TCGv_i64 b) ++{ ++ tcg_gen_deposit_i64(dl, b, a, 32, 32); ++ tcg_gen_shri_i64(b, b, 32); ++ tcg_gen_deposit_i64(dh, a, b, 0, 32); ++} ++ ++static bool trans_PEXTLx(DisasContext *ctx, arg_rtype *a, unsigned wlen) ++{ ++ TCGv_i64 ax, bx; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ ax = tcg_temp_new_i64(); ++ bx = tcg_temp_new_i64(); ++ ++ gen_load_gpr(ax, a->rs); ++ gen_load_gpr(bx, a->rt); ++ ++ /* Lower half */ ++ for (int i = 0; i < 64 / (2 * wlen); i++) { ++ tcg_gen_deposit_i64(cpu_gpr[a->rd], ++ cpu_gpr[a->rd], bx, 2 * wlen * i, wlen); ++ tcg_gen_deposit_i64(cpu_gpr[a->rd], ++ cpu_gpr[a->rd], ax, 2 * wlen * i + wlen, wlen); ++ tcg_gen_shri_i64(bx, bx, wlen); ++ tcg_gen_shri_i64(ax, ax, wlen); ++ } ++ /* Upper half */ ++ for (int i = 0; i < 64 / (2 * wlen); i++) { ++ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], ++ cpu_gpr_hi[a->rd], bx, 2 * wlen * i, wlen); ++ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], ++ cpu_gpr_hi[a->rd], ax, 2 * wlen * i + wlen, wlen); ++ tcg_gen_shri_i64(bx, bx, wlen); ++ tcg_gen_shri_i64(ax, ax, wlen); ++ } ++ ++ tcg_temp_free(bx); ++ tcg_temp_free(ax); ++ ++ return true; ++} ++ ++/* Parallel Extend Lower from Byte */ ++static bool trans_PEXTLB(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_PEXTLx(ctx, a, 8); ++} ++ ++/* Parallel Extend Lower from Halfword */ ++static bool trans_PEXTLH(DisasContext *ctx, arg_rtype *a) ++{ ++ return trans_PEXTLx(ctx, a, 16); ++} ++ ++/* Parallel Extend Lower from Word */ ++static bool trans_PEXTLW(DisasContext *ctx, arg_rtype *a) ++{ ++ TCGv_i64 ax, bx; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ ax = tcg_temp_new_i64(); ++ bx = tcg_temp_new_i64(); ++ ++ gen_load_gpr(ax, a->rs); ++ gen_load_gpr(bx, a->rt); ++ gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); ++ ++ tcg_temp_free(bx); ++ tcg_temp_free(ax); ++ ++ return true; ++} ++ ++/* Parallel Extend Upper from Word */ ++static bool trans_PEXTUW(DisasContext *ctx, arg_rtype *a) ++{ ++ TCGv_i64 ax, bx; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ ++ ax = tcg_temp_new_i64(); ++ bx = tcg_temp_new_i64(); ++ ++ gen_load_gpr_hi(ax, a->rs); ++ gen_load_gpr_hi(bx, a->rt); ++ gen_pextw(cpu_gpr[a->rd], cpu_gpr_hi[a->rd], ax, bx); ++ ++ tcg_temp_free(bx); ++ tcg_temp_free(ax); ++ ++ return true; ++} ++ + /* + * Others (16 instructions) + * ------------------------ +@@ -301,3 +655,31 @@ static bool trans_PCPYUD(DisasContext *s, arg_rtype *a) + + return true; + } ++ ++/* Parallel Rotate 3 Words Left */ ++static bool trans_PROT3W(DisasContext *ctx, arg_rtype *a) ++{ ++ TCGv_i64 ax; ++ ++ if (a->rd == 0) { ++ /* nop */ ++ return true; ++ } ++ if (a->rt == 0) { ++ tcg_gen_movi_i64(cpu_gpr[a->rd], 0); ++ tcg_gen_movi_i64(cpu_gpr_hi[a->rd], 0); ++ return true; ++ } ++ ++ ax = tcg_temp_new_i64(); ++ ++ tcg_gen_mov_i64(ax, cpu_gpr_hi[a->rt]); ++ tcg_gen_deposit_i64(cpu_gpr_hi[a->rd], ax, cpu_gpr[a->rt], 0, 32); ++ ++ tcg_gen_deposit_i64(cpu_gpr[a->rd], cpu_gpr[a->rt], ax, 0, 32); ++ tcg_gen_rotri_i64(cpu_gpr[a->rd], cpu_gpr[a->rd], 32); ++ ++ tcg_temp_free(ax); ++ ++ return true; ++} +diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c +index cc1330876b..08daf93ae1 100644 +--- a/target/s390x/arch_dump.c ++++ b/target/s390x/arch_dump.c +@@ -13,7 +13,7 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "elf.h" + #include "sysemu/dump.h" + +diff --git a/target/s390x/cc_helper.c b/target/s390x/cc_helper.c +deleted file mode 100644 +index e7a74d66dd..0000000000 +--- a/target/s390x/cc_helper.c ++++ /dev/null +@@ -1,538 +0,0 @@ +-/* +- * S/390 condition code helper routines +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2009 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "tcg_s390x.h" +-#include "exec/exec-all.h" +-#include "exec/helper-proto.h" +-#include "qemu/host-utils.h" +- +-/* #define DEBUG_HELPER */ +-#ifdef DEBUG_HELPER +-#define HELPER_LOG(x...) qemu_log(x) +-#else +-#define HELPER_LOG(x...) +-#endif +- +-static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) +-{ +- if (src == dst) { +- return 0; +- } else if (src < dst) { +- return 1; +- } else { +- return 2; +- } +-} +- +-static uint32_t cc_calc_ltgt0_32(int32_t dst) +-{ +- return cc_calc_ltgt_32(dst, 0); +-} +- +-static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst) +-{ +- if (src == dst) { +- return 0; +- } else if (src < dst) { +- return 1; +- } else { +- return 2; +- } +-} +- +-static uint32_t cc_calc_ltgt0_64(int64_t dst) +-{ +- return cc_calc_ltgt_64(dst, 0); +-} +- +-static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst) +-{ +- if (src == dst) { +- return 0; +- } else if (src < dst) { +- return 1; +- } else { +- return 2; +- } +-} +- +-static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst) +-{ +- if (src == dst) { +- return 0; +- } else if (src < dst) { +- return 1; +- } else { +- return 2; +- } +-} +- +-static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask) +-{ +- uint32_t r = val & mask; +- +- if (r == 0) { +- return 0; +- } else if (r == mask) { +- return 3; +- } else { +- return 1; +- } +-} +- +-static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask) +-{ +- uint64_t r = val & mask; +- +- if (r == 0) { +- return 0; +- } else if (r == mask) { +- return 3; +- } else { +- int top = clz64(mask); +- if ((int64_t)(val << top) < 0) { +- return 2; +- } else { +- return 1; +- } +- } +-} +- +-static uint32_t cc_calc_nz(uint64_t dst) +-{ +- return !!dst; +-} +- +-static uint32_t cc_calc_addu(uint64_t carry_out, uint64_t result) +-{ +- g_assert(carry_out <= 1); +- return (result != 0) + 2 * carry_out; +-} +- +-static uint32_t cc_calc_subu(uint64_t borrow_out, uint64_t result) +-{ +- return cc_calc_addu(borrow_out + 1, result); +-} +- +-static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) +-{ +- if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { +- return 3; /* overflow */ +- } else { +- if (ar < 0) { +- return 1; +- } else if (ar > 0) { +- return 2; +- } else { +- return 0; +- } +- } +-} +- +-static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) +-{ +- if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { +- return 3; /* overflow */ +- } else { +- if (ar < 0) { +- return 1; +- } else if (ar > 0) { +- return 2; +- } else { +- return 0; +- } +- } +-} +- +-static uint32_t cc_calc_abs_64(int64_t dst) +-{ +- if ((uint64_t)dst == 0x8000000000000000ULL) { +- return 3; +- } else if (dst) { +- return 2; +- } else { +- return 0; +- } +-} +- +-static uint32_t cc_calc_nabs_64(int64_t dst) +-{ +- return !!dst; +-} +- +-static uint32_t cc_calc_comp_64(int64_t dst) +-{ +- if ((uint64_t)dst == 0x8000000000000000ULL) { +- return 3; +- } else if (dst < 0) { +- return 1; +- } else if (dst > 0) { +- return 2; +- } else { +- return 0; +- } +-} +- +- +-static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) +-{ +- if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { +- return 3; /* overflow */ +- } else { +- if (ar < 0) { +- return 1; +- } else if (ar > 0) { +- return 2; +- } else { +- return 0; +- } +- } +-} +- +-static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) +-{ +- if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { +- return 3; /* overflow */ +- } else { +- if (ar < 0) { +- return 1; +- } else if (ar > 0) { +- return 2; +- } else { +- return 0; +- } +- } +-} +- +-static uint32_t cc_calc_abs_32(int32_t dst) +-{ +- if ((uint32_t)dst == 0x80000000UL) { +- return 3; +- } else if (dst) { +- return 2; +- } else { +- return 0; +- } +-} +- +-static uint32_t cc_calc_nabs_32(int32_t dst) +-{ +- return !!dst; +-} +- +-static uint32_t cc_calc_comp_32(int32_t dst) +-{ +- if ((uint32_t)dst == 0x80000000UL) { +- return 3; +- } else if (dst < 0) { +- return 1; +- } else if (dst > 0) { +- return 2; +- } else { +- return 0; +- } +-} +- +-/* calculate condition code for insert character under mask insn */ +-static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) +-{ +- if ((val & mask) == 0) { +- return 0; +- } else { +- int top = clz64(mask); +- if ((int64_t)(val << top) < 0) { +- return 1; +- } else { +- return 2; +- } +- } +-} +- +-static uint32_t cc_calc_sla_32(uint32_t src, int shift) +-{ +- uint32_t mask = ((1U << shift) - 1U) << (32 - shift); +- uint32_t sign = 1U << 31; +- uint32_t match; +- int32_t r; +- +- /* Check if the sign bit stays the same. */ +- if (src & sign) { +- match = mask; +- } else { +- match = 0; +- } +- if ((src & mask) != match) { +- /* Overflow. */ +- return 3; +- } +- +- r = ((src << shift) & ~sign) | (src & sign); +- if (r == 0) { +- return 0; +- } else if (r < 0) { +- return 1; +- } +- return 2; +-} +- +-static uint32_t cc_calc_sla_64(uint64_t src, int shift) +-{ +- uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); +- uint64_t sign = 1ULL << 63; +- uint64_t match; +- int64_t r; +- +- /* Check if the sign bit stays the same. */ +- if (src & sign) { +- match = mask; +- } else { +- match = 0; +- } +- if ((src & mask) != match) { +- /* Overflow. */ +- return 3; +- } +- +- r = ((src << shift) & ~sign) | (src & sign); +- if (r == 0) { +- return 0; +- } else if (r < 0) { +- return 1; +- } +- return 2; +-} +- +-static uint32_t cc_calc_flogr(uint64_t dst) +-{ +- return dst ? 2 : 0; +-} +- +-static uint32_t cc_calc_lcbb(uint64_t dst) +-{ +- return dst == 16 ? 0 : 3; +-} +- +-static uint32_t cc_calc_vc(uint64_t low, uint64_t high) +-{ +- if (high == -1ull && low == -1ull) { +- /* all elements match */ +- return 0; +- } else if (high == 0 && low == 0) { +- /* no elements match */ +- return 3; +- } else { +- /* some elements but not all match */ +- return 1; +- } +-} +- +-static uint32_t cc_calc_muls_32(int64_t res) +-{ +- const int64_t tmp = res >> 31; +- +- if (!res) { +- return 0; +- } else if (tmp && tmp != -1) { +- return 3; +- } else if (res < 0) { +- return 1; +- } +- return 2; +-} +- +-static uint64_t cc_calc_muls_64(int64_t res_high, uint64_t res_low) +-{ +- if (!res_high && !res_low) { +- return 0; +- } else if (res_high + (res_low >> 63) != 0) { +- return 3; +- } else if (res_high < 0) { +- return 1; +- } +- return 2; +-} +- +-static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, +- uint64_t src, uint64_t dst, uint64_t vr) +-{ +- uint32_t r = 0; +- +- switch (cc_op) { +- case CC_OP_CONST0: +- case CC_OP_CONST1: +- case CC_OP_CONST2: +- case CC_OP_CONST3: +- /* cc_op value _is_ cc */ +- r = cc_op; +- break; +- case CC_OP_LTGT0_32: +- r = cc_calc_ltgt0_32(dst); +- break; +- case CC_OP_LTGT0_64: +- r = cc_calc_ltgt0_64(dst); +- break; +- case CC_OP_LTGT_32: +- r = cc_calc_ltgt_32(src, dst); +- break; +- case CC_OP_LTGT_64: +- r = cc_calc_ltgt_64(src, dst); +- break; +- case CC_OP_LTUGTU_32: +- r = cc_calc_ltugtu_32(src, dst); +- break; +- case CC_OP_LTUGTU_64: +- r = cc_calc_ltugtu_64(src, dst); +- break; +- case CC_OP_TM_32: +- r = cc_calc_tm_32(src, dst); +- break; +- case CC_OP_TM_64: +- r = cc_calc_tm_64(src, dst); +- break; +- case CC_OP_NZ: +- r = cc_calc_nz(dst); +- break; +- case CC_OP_ADDU: +- r = cc_calc_addu(src, dst); +- break; +- case CC_OP_SUBU: +- r = cc_calc_subu(src, dst); +- break; +- case CC_OP_ADD_64: +- r = cc_calc_add_64(src, dst, vr); +- break; +- case CC_OP_SUB_64: +- r = cc_calc_sub_64(src, dst, vr); +- break; +- case CC_OP_ABS_64: +- r = cc_calc_abs_64(dst); +- break; +- case CC_OP_NABS_64: +- r = cc_calc_nabs_64(dst); +- break; +- case CC_OP_COMP_64: +- r = cc_calc_comp_64(dst); +- break; +- case CC_OP_MULS_64: +- r = cc_calc_muls_64(src, dst); +- break; +- +- case CC_OP_ADD_32: +- r = cc_calc_add_32(src, dst, vr); +- break; +- case CC_OP_SUB_32: +- r = cc_calc_sub_32(src, dst, vr); +- break; +- case CC_OP_ABS_32: +- r = cc_calc_abs_32(dst); +- break; +- case CC_OP_NABS_32: +- r = cc_calc_nabs_32(dst); +- break; +- case CC_OP_COMP_32: +- r = cc_calc_comp_32(dst); +- break; +- case CC_OP_MULS_32: +- r = cc_calc_muls_32(dst); +- break; +- +- case CC_OP_ICM: +- r = cc_calc_icm(src, dst); +- break; +- case CC_OP_SLA_32: +- r = cc_calc_sla_32(src, dst); +- break; +- case CC_OP_SLA_64: +- r = cc_calc_sla_64(src, dst); +- break; +- case CC_OP_FLOGR: +- r = cc_calc_flogr(dst); +- break; +- case CC_OP_LCBB: +- r = cc_calc_lcbb(dst); +- break; +- case CC_OP_VC: +- r = cc_calc_vc(src, dst); +- break; +- +- case CC_OP_NZ_F32: +- r = set_cc_nz_f32(dst); +- break; +- case CC_OP_NZ_F64: +- r = set_cc_nz_f64(dst); +- break; +- case CC_OP_NZ_F128: +- r = set_cc_nz_f128(make_float128(src, dst)); +- break; +- +- default: +- cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op)); +- } +- +- HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, +- cc_name(cc_op), src, dst, vr, r); +- return r; +-} +- +-uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, +- uint64_t vr) +-{ +- return do_calc_cc(env, cc_op, src, dst, vr); +-} +- +-uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, +- uint64_t dst, uint64_t vr) +-{ +- return do_calc_cc(env, cc_op, src, dst, vr); +-} +- +-#ifndef CONFIG_USER_ONLY +-void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) +-{ +- s390_cpu_set_psw(env, mask, addr); +- cpu_loop_exit(env_cpu(env)); +-} +- +-void HELPER(sacf)(CPUS390XState *env, uint64_t a1) +-{ +- HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); +- +- switch (a1 & 0xf00) { +- case 0x000: +- env->psw.mask &= ~PSW_MASK_ASC; +- env->psw.mask |= PSW_ASC_PRIMARY; +- break; +- case 0x100: +- env->psw.mask &= ~PSW_MASK_ASC; +- env->psw.mask |= PSW_ASC_SECONDARY; +- break; +- case 0x300: +- env->psw.mask &= ~PSW_MASK_ASC; +- env->psw.mask |= PSW_ASC_HOME; +- break; +- default: +- HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1); +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +-} +-#endif +diff --git a/target/s390x/cpu-dump.c b/target/s390x/cpu-dump.c +new file mode 100644 +index 0000000000..0f5c062994 +--- /dev/null ++++ b/target/s390x/cpu-dump.c +@@ -0,0 +1,134 @@ ++/* ++ * S/390 CPU dump to FILE ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2011 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "qemu/qemu-print.h" ++#include "sysemu/tcg.h" ++ ++void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ int i; ++ ++ qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64, ++ s390_cpu_get_psw_mask(env), env->psw.addr); ++ if (!tcg_enabled()) { ++ qemu_fprintf(f, "\n"); ++ } else if (env->cc_op > 3) { ++ qemu_fprintf(f, " cc %15s\n", cc_name(env->cc_op)); ++ } else { ++ qemu_fprintf(f, " cc %02x\n", env->cc_op); ++ } ++ ++ for (i = 0; i < 16; i++) { ++ qemu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]); ++ if ((i % 4) == 3) { ++ qemu_fprintf(f, "\n"); ++ } else { ++ qemu_fprintf(f, " "); ++ } ++ } ++ ++ if (flags & CPU_DUMP_FPU) { ++ if (s390_has_feat(S390_FEAT_VECTOR)) { ++ for (i = 0; i < 32; i++) { ++ qemu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c", ++ i, env->vregs[i][0], env->vregs[i][1], ++ i % 2 ? '\n' : ' '); ++ } ++ } else { ++ for (i = 0; i < 16; i++) { ++ qemu_fprintf(f, "F%02d=%016" PRIx64 "%c", ++ i, *get_freg(env, i), ++ (i % 4) == 3 ? '\n' : ' '); ++ } ++ } ++ } ++ ++#ifndef CONFIG_USER_ONLY ++ for (i = 0; i < 16; i++) { ++ qemu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]); ++ if ((i % 4) == 3) { ++ qemu_fprintf(f, "\n"); ++ } else { ++ qemu_fprintf(f, " "); ++ } ++ } ++#endif ++ ++#ifdef DEBUG_INLINE_BRANCHES ++ for (i = 0; i < CC_OP_MAX; i++) { ++ qemu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i), ++ inline_branch_miss[i], inline_branch_hit[i]); ++ } ++#endif ++ ++ qemu_fprintf(f, "\n"); ++} ++ ++const char *cc_name(enum cc_op cc_op) ++{ ++ static const char * const cc_names[] = { ++ [CC_OP_CONST0] = "CC_OP_CONST0", ++ [CC_OP_CONST1] = "CC_OP_CONST1", ++ [CC_OP_CONST2] = "CC_OP_CONST2", ++ [CC_OP_CONST3] = "CC_OP_CONST3", ++ [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", ++ [CC_OP_STATIC] = "CC_OP_STATIC", ++ [CC_OP_NZ] = "CC_OP_NZ", ++ [CC_OP_ADDU] = "CC_OP_ADDU", ++ [CC_OP_SUBU] = "CC_OP_SUBU", ++ [CC_OP_LTGT_32] = "CC_OP_LTGT_32", ++ [CC_OP_LTGT_64] = "CC_OP_LTGT_64", ++ [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", ++ [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", ++ [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", ++ [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", ++ [CC_OP_ADD_64] = "CC_OP_ADD_64", ++ [CC_OP_SUB_64] = "CC_OP_SUB_64", ++ [CC_OP_ABS_64] = "CC_OP_ABS_64", ++ [CC_OP_NABS_64] = "CC_OP_NABS_64", ++ [CC_OP_ADD_32] = "CC_OP_ADD_32", ++ [CC_OP_SUB_32] = "CC_OP_SUB_32", ++ [CC_OP_ABS_32] = "CC_OP_ABS_32", ++ [CC_OP_NABS_32] = "CC_OP_NABS_32", ++ [CC_OP_COMP_32] = "CC_OP_COMP_32", ++ [CC_OP_COMP_64] = "CC_OP_COMP_64", ++ [CC_OP_TM_32] = "CC_OP_TM_32", ++ [CC_OP_TM_64] = "CC_OP_TM_64", ++ [CC_OP_NZ_F32] = "CC_OP_NZ_F32", ++ [CC_OP_NZ_F64] = "CC_OP_NZ_F64", ++ [CC_OP_NZ_F128] = "CC_OP_NZ_F128", ++ [CC_OP_ICM] = "CC_OP_ICM", ++ [CC_OP_SLA_32] = "CC_OP_SLA_32", ++ [CC_OP_SLA_64] = "CC_OP_SLA_64", ++ [CC_OP_FLOGR] = "CC_OP_FLOGR", ++ [CC_OP_LCBB] = "CC_OP_LCBB", ++ [CC_OP_VC] = "CC_OP_VC", ++ [CC_OP_MULS_32] = "CC_OP_MULS_32", ++ [CC_OP_MULS_64] = "CC_OP_MULS_64", ++ }; ++ ++ return cc_names[cc_op]; ++} +diff --git a/target/s390x/cpu-sysemu.c b/target/s390x/cpu-sysemu.c +new file mode 100644 +index 0000000000..df2c6bf694 +--- /dev/null ++++ b/target/s390x/cpu-sysemu.c +@@ -0,0 +1,309 @@ ++/* ++ * QEMU S/390 CPU - System Emulation-only code ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2011 Alexander Graf ++ * Copyright (c) 2012 SUSE LINUX Products GmbH ++ * Copyright (c) 2012 IBM Corp. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" ++#include "sysemu/kvm.h" ++#include "sysemu/reset.h" ++#include "qemu/timer.h" ++#include "trace.h" ++#include "qapi/qapi-visit-run-state.h" ++#include "sysemu/hw_accel.h" ++ ++#include "hw/s390x/pv.h" ++#include "hw/boards.h" ++#include "sysemu/arch_init.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/tcg.h" ++#include "hw/core/sysemu-cpu-ops.h" ++ ++/* S390CPUClass::load_normal() */ ++static void s390_cpu_load_normal(CPUState *s) ++{ ++ S390CPU *cpu = S390_CPU(s); ++ uint64_t spsw; ++ ++ if (!s390_is_pv()) { ++ spsw = ldq_phys(s->as, 0); ++ cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL; ++ /* ++ * Invert short psw indication, so SIE will report a specification ++ * exception if it was not set. ++ */ ++ cpu->env.psw.mask ^= PSW_MASK_SHORTPSW; ++ cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR; ++ } else { ++ /* ++ * Firmware requires us to set the load state before we set ++ * the cpu to operating on protected guests. ++ */ ++ s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu); ++ } ++ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); ++} ++ ++void s390_cpu_machine_reset_cb(void *opaque) ++{ ++ S390CPU *cpu = opaque; ++ ++ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL); ++} ++ ++static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs) ++{ ++ GuestPanicInformation *panic_info; ++ S390CPU *cpu = S390_CPU(cs); ++ ++ cpu_synchronize_state(cs); ++ panic_info = g_malloc0(sizeof(GuestPanicInformation)); ++ ++ panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390; ++ panic_info->u.s390.core = cpu->env.core_id; ++ panic_info->u.s390.psw_mask = cpu->env.psw.mask; ++ panic_info->u.s390.psw_addr = cpu->env.psw.addr; ++ panic_info->u.s390.reason = cpu->env.crash_reason; ++ ++ return panic_info; ++} ++ ++static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v, ++ const char *name, void *opaque, ++ Error **errp) ++{ ++ CPUState *cs = CPU(obj); ++ GuestPanicInformation *panic_info; ++ ++ if (!cs->crash_occurred) { ++ error_setg(errp, "No crash occurred"); ++ return; ++ } ++ ++ panic_info = s390_cpu_get_crash_info(cs); ++ ++ visit_type_GuestPanicInformation(v, "crash-information", &panic_info, ++ errp); ++ qapi_free_GuestPanicInformation(panic_info); ++} ++ ++void s390_cpu_init_sysemu(Object *obj) ++{ ++ CPUState *cs = CPU(obj); ++ S390CPU *cpu = S390_CPU(obj); ++ ++ cs->start_powered_off = true; ++ object_property_add(obj, "crash-information", "GuestPanicInformation", ++ s390_cpu_get_crash_info_qom, NULL, NULL, NULL); ++ cpu->env.tod_timer = ++ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); ++ cpu->env.cpu_timer = ++ timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); ++ s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); ++} ++ ++bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp) ++{ ++ S390CPU *cpu = S390_CPU(dev); ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ unsigned int max_cpus = ms->smp.max_cpus; ++ ++ if (cpu->env.core_id >= max_cpus) { ++ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 ++ ", maximum core-id: %d", cpu->env.core_id, ++ max_cpus - 1); ++ return false; ++ } ++ ++ if (cpu_exists(cpu->env.core_id)) { ++ error_setg(errp, "Unable to add CPU with core-id: %" PRIu32 ++ ", it already exists", cpu->env.core_id); ++ return false; ++ } ++ ++ /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ ++ CPU(cpu)->cpu_index = cpu->env.core_id; ++ return true; ++} ++ ++void s390_cpu_finalize(Object *obj) ++{ ++ S390CPU *cpu = S390_CPU(obj); ++ ++ timer_free(cpu->env.tod_timer); ++ timer_free(cpu->env.cpu_timer); ++ ++ qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu); ++ g_free(cpu->irqstate); ++} ++ ++static const struct SysemuCPUOps s390_sysemu_ops = { ++ .get_phys_page_debug = s390_cpu_get_phys_page_debug, ++ .get_crash_info = s390_cpu_get_crash_info, ++ .write_elf64_note = s390_cpu_write_elf64_note, ++ .legacy_vmsd = &vmstate_s390_cpu, ++}; ++ ++void s390_cpu_class_init_sysemu(CPUClass *cc) ++{ ++ S390CPUClass *scc = S390_CPU_CLASS(cc); ++ ++ scc->load_normal = s390_cpu_load_normal; ++ cc->sysemu_ops = &s390_sysemu_ops; ++} ++ ++static bool disabled_wait(CPUState *cpu) ++{ ++ return cpu->halted && !(S390_CPU(cpu)->env.psw.mask & ++ (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK)); ++} ++ ++static unsigned s390_count_running_cpus(void) ++{ ++ CPUState *cpu; ++ int nr_running = 0; ++ ++ CPU_FOREACH(cpu) { ++ uint8_t state = S390_CPU(cpu)->env.cpu_state; ++ if (state == S390_CPU_STATE_OPERATING || ++ state == S390_CPU_STATE_LOAD) { ++ if (!disabled_wait(cpu)) { ++ nr_running++; ++ } ++ } ++ } ++ ++ return nr_running; ++} ++ ++unsigned int s390_cpu_halt(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ trace_cpu_halt(cs->cpu_index); ++ ++ if (!cs->halted) { ++ cs->halted = 1; ++ cs->exception_index = EXCP_HLT; ++ } ++ ++ return s390_count_running_cpus(); ++} ++ ++void s390_cpu_unhalt(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ trace_cpu_unhalt(cs->cpu_index); ++ ++ if (cs->halted) { ++ cs->halted = 0; ++ cs->exception_index = -1; ++ } ++} ++ ++unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) ++ { ++ trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state); ++ ++ switch (cpu_state) { ++ case S390_CPU_STATE_STOPPED: ++ case S390_CPU_STATE_CHECK_STOP: ++ /* halt the cpu for common infrastructure */ ++ s390_cpu_halt(cpu); ++ break; ++ case S390_CPU_STATE_OPERATING: ++ case S390_CPU_STATE_LOAD: ++ /* ++ * Starting a CPU with a PSW WAIT bit set: ++ * KVM: handles this internally and triggers another WAIT exit. ++ * TCG: will actually try to continue to run. Don't unhalt, will ++ * be done when the CPU actually has work (an interrupt). ++ */ ++ if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) { ++ s390_cpu_unhalt(cpu); ++ } ++ break; ++ default: ++ error_report("Requested CPU state is not a valid S390 CPU state: %u", ++ cpu_state); ++ exit(1); ++ } ++ if (kvm_enabled() && cpu->env.cpu_state != cpu_state) { ++ kvm_s390_set_cpu_state(cpu, cpu_state); ++ } ++ cpu->env.cpu_state = cpu_state; ++ ++ return s390_count_running_cpus(); ++} ++ ++int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) ++{ ++ if (kvm_enabled()) { ++ return kvm_s390_set_mem_limit(new_limit, hw_limit); ++ } ++ return 0; ++} ++ ++void s390_set_max_pagesize(uint64_t pagesize, Error **errp) ++{ ++ if (kvm_enabled()) { ++ kvm_s390_set_max_pagesize(pagesize, errp); ++ } ++} ++ ++void s390_cmma_reset(void) ++{ ++ if (kvm_enabled()) { ++ kvm_s390_cmma_reset(); ++ } ++} ++ ++int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id, ++ int vq, bool assign) ++{ ++ if (kvm_enabled()) { ++ return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign); ++ } else { ++ return 0; ++ } ++} ++ ++void s390_crypto_reset(void) ++{ ++ if (kvm_enabled()) { ++ kvm_s390_crypto_reset(); ++ } ++} ++ ++void s390_enable_css_support(S390CPU *cpu) ++{ ++ if (kvm_enabled()) { ++ kvm_s390_enable_css_support(cpu); ++ } ++} ++ ++void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg) ++{ ++ if (kvm_enabled()) { ++ kvm_s390_set_diag318(cs, arg.host_ulong); ++ } ++} +diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c +index 890f382a36..7b7b05f1d3 100644 +--- a/target/s390x/cpu.c ++++ b/target/s390x/cpu.c +@@ -23,31 +23,64 @@ + #include "qemu/osdep.h" + #include "qapi/error.h" + #include "cpu.h" +-#include "internal.h" +-#include "kvm_s390x.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" + #include "sysemu/kvm.h" + #include "sysemu/reset.h" +-#include "qemu/timer.h" +-#include "qemu/error-report.h" + #include "qemu/module.h" + #include "trace.h" +-#include "qapi/visitor.h" + #include "qapi/qapi-types-machine.h" +-#include "qapi/qapi-visit-run-state.h" + #include "sysemu/hw_accel.h" + #include "hw/qdev-properties.h" +-#ifndef CONFIG_USER_ONLY +-#include "hw/s390x/pv.h" +-#include "hw/boards.h" +-#include "sysemu/arch_init.h" +-#include "sysemu/tcg.h" +-#endif + #include "fpu/softfloat-helpers.h" + #include "disas/capstone.h" ++#include "sysemu/tcg.h" + + #define CR0_RESET 0xE0UL + #define CR14_RESET 0xC2000000UL; + ++void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) ++{ ++#ifndef CONFIG_USER_ONLY ++ uint64_t old_mask = env->psw.mask; ++#endif ++ ++ env->psw.addr = addr; ++ env->psw.mask = mask; ++ ++ /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ ++ if (!tcg_enabled()) { ++ return; ++ } ++ env->cc_op = (mask >> 44) & 3; ++ ++#ifndef CONFIG_USER_ONLY ++ if ((old_mask ^ mask) & PSW_MASK_PER) { ++ s390_cpu_recompute_watchpoints(env_cpu(env)); ++ } ++ ++ if (mask & PSW_MASK_WAIT) { ++ s390_handle_wait(env_archcpu(env)); ++ } ++#endif ++} ++ ++uint64_t s390_cpu_get_psw_mask(CPUS390XState *env) ++{ ++ uint64_t r = env->psw.mask; ++ ++ if (tcg_enabled()) { ++ uint64_t cc = calc_cc(env, env->cc_op, env->cc_src, ++ env->cc_dst, env->cc_vr); ++ ++ assert(cc <= 3); ++ r &= ~PSW_MASK_CC; ++ r |= cc << 44; ++ } ++ ++ return r; ++} ++ + static void s390_cpu_set_pc(CPUState *cs, vaddr value) + { + S390CPU *cpu = S390_CPU(cs); +@@ -72,33 +105,6 @@ static bool s390_cpu_has_work(CPUState *cs) + return s390_cpu_has_int(cpu); + } + +-#if !defined(CONFIG_USER_ONLY) +-/* S390CPUClass::load_normal() */ +-static void s390_cpu_load_normal(CPUState *s) +-{ +- S390CPU *cpu = S390_CPU(s); +- uint64_t spsw; +- +- if (!s390_is_pv()) { +- spsw = ldq_phys(s->as, 0); +- cpu->env.psw.mask = spsw & PSW_MASK_SHORT_CTRL; +- /* +- * Invert short psw indication, so SIE will report a specification +- * exception if it was not set. +- */ +- cpu->env.psw.mask ^= PSW_MASK_SHORTPSW; +- cpu->env.psw.addr = spsw & PSW_MASK_SHORT_ADDR; +- } else { +- /* +- * Firmware requires us to set the load state before we set +- * the cpu to operating on protected guests. +- */ +- s390_cpu_set_state(S390_CPU_STATE_LOAD, cpu); +- } +- s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu); +-} +-#endif +- + /* S390CPUClass::reset() */ + static void s390_cpu_reset(CPUState *s, cpu_reset_type type) + { +@@ -169,15 +175,6 @@ static void s390_cpu_reset(CPUState *s, cpu_reset_type type) + } + } + +-#if !defined(CONFIG_USER_ONLY) +-static void s390_cpu_machine_reset_cb(void *opaque) +-{ +- S390CPU *cpu = opaque; +- +- run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL); +-} +-#endif +- + static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) + { + info->mach = bfd_mach_s390_64; +@@ -191,9 +188,6 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) + { + CPUState *cs = CPU(dev); + S390CPUClass *scc = S390_CPU_GET_CLASS(dev); +-#if !defined(CONFIG_USER_ONLY) +- S390CPU *cpu = S390_CPU(dev); +-#endif + Error *err = NULL; + + /* the model has to be realized before qemu_init_vcpu() due to kvm */ +@@ -203,23 +197,9 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) + } + + #if !defined(CONFIG_USER_ONLY) +- MachineState *ms = MACHINE(qdev_get_machine()); +- unsigned int max_cpus = ms->smp.max_cpus; +- if (cpu->env.core_id >= max_cpus) { +- error_setg(&err, "Unable to add CPU with core-id: %" PRIu32 +- ", maximum core-id: %d", cpu->env.core_id, +- max_cpus - 1); +- goto out; +- } +- +- if (cpu_exists(cpu->env.core_id)) { +- error_setg(&err, "Unable to add CPU with core-id: %" PRIu32 +- ", it already exists", cpu->env.core_id); ++ if (!s390_cpu_realize_sysemu(dev, &err)) { + goto out; + } +- +- /* sync cs->cpu_index and env->core_id. The latter is needed for TCG. */ +- cs->cpu_index = cpu->env.core_id; + #endif + + cpu_exec_realizefn(cs, &err); +@@ -228,7 +208,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) + } + + #if !defined(CONFIG_USER_ONLY) +- qemu_register_reset(s390_cpu_machine_reset_cb, cpu); ++ qemu_register_reset(s390_cpu_machine_reset_cb, S390_CPU(dev)); + #endif + s390_cpu_gdb_init(cs); + qemu_init_vcpu(cs); +@@ -250,44 +230,6 @@ out: + error_propagate(errp, err); + } + +-#if !defined(CONFIG_USER_ONLY) +-static GuestPanicInformation *s390_cpu_get_crash_info(CPUState *cs) +-{ +- GuestPanicInformation *panic_info; +- S390CPU *cpu = S390_CPU(cs); +- +- cpu_synchronize_state(cs); +- panic_info = g_malloc0(sizeof(GuestPanicInformation)); +- +- panic_info->type = GUEST_PANIC_INFORMATION_TYPE_S390; +- panic_info->u.s390.core = cpu->env.core_id; +- panic_info->u.s390.psw_mask = cpu->env.psw.mask; +- panic_info->u.s390.psw_addr = cpu->env.psw.addr; +- panic_info->u.s390.reason = cpu->env.crash_reason; +- +- return panic_info; +-} +- +-static void s390_cpu_get_crash_info_qom(Object *obj, Visitor *v, +- const char *name, void *opaque, +- Error **errp) +-{ +- CPUState *cs = CPU(obj); +- GuestPanicInformation *panic_info; +- +- if (!cs->crash_occurred) { +- error_setg(errp, "No crash occurred"); +- return; +- } +- +- panic_info = s390_cpu_get_crash_info(cs); +- +- visit_type_GuestPanicInformation(v, "crash-information", &panic_info, +- errp); +- qapi_free_GuestPanicInformation(panic_info); +-} +-#endif +- + static void s390_cpu_initfn(Object *obj) + { + CPUState *cs = CPU(obj); +@@ -295,169 +237,12 @@ static void s390_cpu_initfn(Object *obj) + + cpu_set_cpustate_pointers(cpu); + cs->exception_index = EXCP_HLT; +-#if !defined(CONFIG_USER_ONLY) +- cs->start_powered_off = true; +- object_property_add(obj, "crash-information", "GuestPanicInformation", +- s390_cpu_get_crash_info_qom, NULL, NULL, NULL); +- cpu->env.tod_timer = +- timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); +- cpu->env.cpu_timer = +- timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); +- s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); +-#endif +-} + +-static void s390_cpu_finalize(Object *obj) +-{ + #if !defined(CONFIG_USER_ONLY) +- S390CPU *cpu = S390_CPU(obj); +- +- timer_free(cpu->env.tod_timer); +- timer_free(cpu->env.cpu_timer); +- +- qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu); +- g_free(cpu->irqstate); ++ s390_cpu_init_sysemu(obj); + #endif + } + +-#if !defined(CONFIG_USER_ONLY) +-static bool disabled_wait(CPUState *cpu) +-{ +- return cpu->halted && !(S390_CPU(cpu)->env.psw.mask & +- (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK)); +-} +- +-static unsigned s390_count_running_cpus(void) +-{ +- CPUState *cpu; +- int nr_running = 0; +- +- CPU_FOREACH(cpu) { +- uint8_t state = S390_CPU(cpu)->env.cpu_state; +- if (state == S390_CPU_STATE_OPERATING || +- state == S390_CPU_STATE_LOAD) { +- if (!disabled_wait(cpu)) { +- nr_running++; +- } +- } +- } +- +- return nr_running; +-} +- +-unsigned int s390_cpu_halt(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- trace_cpu_halt(cs->cpu_index); +- +- if (!cs->halted) { +- cs->halted = 1; +- cs->exception_index = EXCP_HLT; +- } +- +- return s390_count_running_cpus(); +-} +- +-void s390_cpu_unhalt(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- trace_cpu_unhalt(cs->cpu_index); +- +- if (cs->halted) { +- cs->halted = 0; +- cs->exception_index = -1; +- } +-} +- +-unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) +- { +- trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state); +- +- switch (cpu_state) { +- case S390_CPU_STATE_STOPPED: +- case S390_CPU_STATE_CHECK_STOP: +- /* halt the cpu for common infrastructure */ +- s390_cpu_halt(cpu); +- break; +- case S390_CPU_STATE_OPERATING: +- case S390_CPU_STATE_LOAD: +- /* +- * Starting a CPU with a PSW WAIT bit set: +- * KVM: handles this internally and triggers another WAIT exit. +- * TCG: will actually try to continue to run. Don't unhalt, will +- * be done when the CPU actually has work (an interrupt). +- */ +- if (!tcg_enabled() || !(cpu->env.psw.mask & PSW_MASK_WAIT)) { +- s390_cpu_unhalt(cpu); +- } +- break; +- default: +- error_report("Requested CPU state is not a valid S390 CPU state: %u", +- cpu_state); +- exit(1); +- } +- if (kvm_enabled() && cpu->env.cpu_state != cpu_state) { +- kvm_s390_set_cpu_state(cpu, cpu_state); +- } +- cpu->env.cpu_state = cpu_state; +- +- return s390_count_running_cpus(); +-} +- +-int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit) +-{ +- if (kvm_enabled()) { +- return kvm_s390_set_mem_limit(new_limit, hw_limit); +- } +- return 0; +-} +- +-void s390_set_max_pagesize(uint64_t pagesize, Error **errp) +-{ +- if (kvm_enabled()) { +- kvm_s390_set_max_pagesize(pagesize, errp); +- } +-} +- +-void s390_cmma_reset(void) +-{ +- if (kvm_enabled()) { +- kvm_s390_cmma_reset(); +- } +-} +- +-int s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch_id, +- int vq, bool assign) +-{ +- if (kvm_enabled()) { +- return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign); +- } else { +- return 0; +- } +-} +- +-void s390_crypto_reset(void) +-{ +- if (kvm_enabled()) { +- kvm_s390_crypto_reset(); +- } +-} +- +-void s390_enable_css_support(S390CPU *cpu) +-{ +- if (kvm_enabled()) { +- kvm_s390_enable_css_support(cpu); +- } +-} +- +-void s390_do_cpu_set_diag318(CPUState *cs, run_on_cpu_data arg) +-{ +- if (kvm_enabled()) { +- kvm_s390_set_diag318(cs, arg.host_ulong); +- } +-} +-#endif +- + static gchar *s390_gdb_arch_name(CPUState *cs) + { + return g_strdup("s390:64-bit"); +@@ -476,17 +261,6 @@ static void s390_cpu_reset_full(DeviceState *dev) + return s390_cpu_reset(s, S390_CPU_RESET_CLEAR); + } + +-#ifndef CONFIG_USER_ONLY +-#include "hw/core/sysemu-cpu-ops.h" +- +-static const struct SysemuCPUOps s390_sysemu_ops = { +- .get_phys_page_debug = s390_cpu_get_phys_page_debug, +- .get_crash_info = s390_cpu_get_crash_info, +- .write_elf64_note = s390_cpu_write_elf64_note, +- .legacy_vmsd = &vmstate_s390_cpu, +-}; +-#endif +- + #ifdef CONFIG_TCG + #include "hw/core/tcg-cpu-ops.h" + +@@ -515,9 +289,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) + dc->user_creatable = true; + + device_class_set_parent_reset(dc, s390_cpu_reset_full, &scc->parent_reset); +-#if !defined(CONFIG_USER_ONLY) +- scc->load_normal = s390_cpu_load_normal; +-#endif ++ + scc->reset = s390_cpu_reset; + cc->class_by_name = s390_cpu_class_by_name, + cc->has_work = s390_cpu_has_work; +@@ -526,7 +298,7 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data) + cc->gdb_read_register = s390_cpu_gdb_read_register; + cc->gdb_write_register = s390_cpu_gdb_write_register; + #ifndef CONFIG_USER_ONLY +- cc->sysemu_ops = &s390_sysemu_ops; ++ s390_cpu_class_init_sysemu(cc); + #endif + cc->disas_set_info = s390_cpu_disas_set_info; + cc->gdb_num_core_regs = S390_NUM_CORE_REGS; +@@ -546,7 +318,11 @@ static const TypeInfo s390_cpu_type_info = { + .instance_size = sizeof(S390CPU), + .instance_align = __alignof__(S390CPU), + .instance_init = s390_cpu_initfn, ++ ++#ifndef CONFIG_USER_ONLY + .instance_finalize = s390_cpu_finalize, ++#endif /* !CONFIG_USER_ONLY */ ++ + .abstract = true, + .class_size = sizeof(S390CPUClass), + .class_init = s390_cpu_class_init, +diff --git a/target/s390x/cpu_features_def.h.inc b/target/s390x/cpu_features_def.h.inc +index 7db3449e04..e86662bb3b 100644 +--- a/target/s390x/cpu_features_def.h.inc ++++ b/target/s390x/cpu_features_def.h.inc +@@ -109,6 +109,11 @@ DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH, "vxpdeh", STFL, 152, "Vector-Packed-Decimal- + DEF_FEAT(MSA_EXT_9, "msa9-base", STFL, 155, "Message-security-assist-extension-9 facility (excluding subfunctions)") + DEF_FEAT(ETOKEN, "etoken", STFL, 156, "Etoken facility") + DEF_FEAT(UNPACK, "unpack", STFL, 161, "Unpack facility") ++DEF_FEAT(NNPA, "nnpa", STFL, 165, "NNPA facility") ++DEF_FEAT(VECTOR_PACKED_DECIMAL_ENH2, "vxpdeh2", STFL, 192, "Vector-Packed-Decimal-Enhancement facility 2") ++DEF_FEAT(BEAR_ENH, "beareh", STFL, 193, "BEAR-enhancement facility") ++DEF_FEAT(RDP, "rdp", STFL, 194, "Reset-DAT-protection facility") ++DEF_FEAT(PAI, "pai", STFL, 196, "Processor-Activity-Instrumentation facility") + + /* Features exposed via SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ + DEF_FEAT(SIE_GSLS, "gsls", SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility") +diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c +index 94090a6e22..4e4598cc77 100644 +--- a/target/s390x/cpu_models.c ++++ b/target/s390x/cpu_models.c +@@ -12,24 +12,17 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" +-#include "kvm_s390x.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" + #include "sysemu/kvm.h" + #include "sysemu/tcg.h" + #include "qapi/error.h" + #include "qapi/visitor.h" +-#include "qemu/error-report.h" + #include "qemu/module.h" + #include "qemu/qemu-print.h" +-#include "qapi/qmp/qerror.h" +-#include "qapi/qobject-input-visitor.h" +-#include "qapi/qmp/qdict.h" + #ifndef CONFIG_USER_ONLY +-#include "sysemu/arch_init.h" + #include "sysemu/sysemu.h" +-#include "hw/pci/pci.h" + #endif +-#include "qapi/qapi-commands-machine-target.h" + #include "hw/s390x/pv.h" + + #define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \ +@@ -88,6 +81,8 @@ static S390CPUDef s390_cpu_defs[] = { + CPUDEF_INIT(0x3907, 14, 1, 47, 0x08000000U, "z14ZR1", "IBM z14 Model ZR1 GA1"), + CPUDEF_INIT(0x8561, 15, 1, 47, 0x08000000U, "gen15a", "IBM z15 T01 GA1"), + CPUDEF_INIT(0x8562, 15, 1, 47, 0x08000000U, "gen15b", "IBM z15 T02 GA1"), ++ CPUDEF_INIT(0x3931, 16, 1, 47, 0x08000000U, "gen16a", "IBM 3931 GA1"), ++ CPUDEF_INIT(0x3932, 16, 1, 47, 0x08000000U, "gen16b", "IBM 3932 GA1"), + }; + + #define QEMU_MAX_CPU_TYPE 0x3906 +@@ -414,381 +409,6 @@ void s390_cpu_list(void) + } + } + +-static S390CPUModel *get_max_cpu_model(Error **errp); +- +-#ifndef CONFIG_USER_ONLY +-static void list_add_feat(const char *name, void *opaque); +- +-static void check_unavailable_features(const S390CPUModel *max_model, +- const S390CPUModel *model, +- strList **unavailable) +-{ +- S390FeatBitmap missing; +- +- /* check general model compatibility */ +- if (max_model->def->gen < model->def->gen || +- (max_model->def->gen == model->def->gen && +- max_model->def->ec_ga < model->def->ec_ga)) { +- list_add_feat("type", unavailable); +- } +- +- /* detect missing features if any to properly report them */ +- bitmap_andnot(missing, model->features, max_model->features, +- S390_FEAT_MAX); +- if (!bitmap_empty(missing, S390_FEAT_MAX)) { +- s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat); +- } +-} +- +-struct CpuDefinitionInfoListData { +- CpuDefinitionInfoList *list; +- S390CPUModel *model; +-}; +- +-static void create_cpu_model_list(ObjectClass *klass, void *opaque) +-{ +- struct CpuDefinitionInfoListData *cpu_list_data = opaque; +- CpuDefinitionInfoList **cpu_list = &cpu_list_data->list; +- CpuDefinitionInfo *info; +- char *name = g_strdup(object_class_get_name(klass)); +- S390CPUClass *scc = S390_CPU_CLASS(klass); +- +- /* strip off the -s390x-cpu */ +- g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; +- info = g_new0(CpuDefinitionInfo, 1); +- info->name = name; +- info->has_migration_safe = true; +- info->migration_safe = scc->is_migration_safe; +- info->q_static = scc->is_static; +- info->q_typename = g_strdup(object_class_get_name(klass)); +- /* check for unavailable features */ +- if (cpu_list_data->model) { +- Object *obj; +- S390CPU *sc; +- obj = object_new_with_class(klass); +- sc = S390_CPU(obj); +- if (sc->model) { +- info->has_unavailable_features = true; +- check_unavailable_features(cpu_list_data->model, sc->model, +- &info->unavailable_features); +- } +- object_unref(obj); +- } +- +- QAPI_LIST_PREPEND(*cpu_list, info); +-} +- +-CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +-{ +- struct CpuDefinitionInfoListData list_data = { +- .list = NULL, +- }; +- +- list_data.model = get_max_cpu_model(NULL); +- +- object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false, +- &list_data); +- +- return list_data.list; +-} +- +-static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, +- Error **errp) +-{ +- Error *err = NULL; +- const QDict *qdict = NULL; +- const QDictEntry *e; +- Visitor *visitor; +- ObjectClass *oc; +- S390CPU *cpu; +- Object *obj; +- +- if (info->props) { +- qdict = qobject_to(QDict, info->props); +- if (!qdict) { +- error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); +- return; +- } +- } +- +- oc = cpu_class_by_name(TYPE_S390_CPU, info->name); +- if (!oc) { +- error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name); +- return; +- } +- if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) { +- error_setg(errp, "The CPU definition '%s' requires KVM", info->name); +- return; +- } +- obj = object_new_with_class(oc); +- cpu = S390_CPU(obj); +- +- if (!cpu->model) { +- error_setg(errp, "Details about the host CPU model are not available, " +- "it cannot be used."); +- object_unref(obj); +- return; +- } +- +- if (qdict) { +- visitor = qobject_input_visitor_new(info->props); +- if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) { +- visit_free(visitor); +- object_unref(obj); +- return; +- } +- for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { +- if (!object_property_set(obj, e->key, visitor, &err)) { +- break; +- } +- } +- if (!err) { +- visit_check_struct(visitor, &err); +- } +- visit_end_struct(visitor, NULL); +- visit_free(visitor); +- if (err) { +- error_propagate(errp, err); +- object_unref(obj); +- return; +- } +- } +- +- /* copy the model and throw the cpu away */ +- memcpy(model, cpu->model, sizeof(*model)); +- object_unref(obj); +-} +- +-static void qdict_add_disabled_feat(const char *name, void *opaque) +-{ +- qdict_put_bool(opaque, name, false); +-} +- +-static void qdict_add_enabled_feat(const char *name, void *opaque) +-{ +- qdict_put_bool(opaque, name, true); +-} +- +-/* convert S390CPUDef into a static CpuModelInfo */ +-static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model, +- bool delta_changes) +-{ +- QDict *qdict = qdict_new(); +- S390FeatBitmap bitmap; +- +- /* always fallback to the static base model */ +- info->name = g_strdup_printf("%s-base", model->def->name); +- +- if (delta_changes) { +- /* features deleted from the base feature set */ +- bitmap_andnot(bitmap, model->def->base_feat, model->features, +- S390_FEAT_MAX); +- if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { +- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); +- } +- +- /* features added to the base feature set */ +- bitmap_andnot(bitmap, model->features, model->def->base_feat, +- S390_FEAT_MAX); +- if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { +- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat); +- } +- } else { +- /* expand all features */ +- s390_feat_bitmap_to_ascii(model->features, qdict, +- qdict_add_enabled_feat); +- bitmap_complement(bitmap, model->features, S390_FEAT_MAX); +- s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); +- } +- +- if (!qdict_size(qdict)) { +- qobject_unref(qdict); +- } else { +- info->props = QOBJECT(qdict); +- info->has_props = true; +- } +-} +- +-CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, +- CpuModelInfo *model, +- Error **errp) +-{ +- Error *err = NULL; +- CpuModelExpansionInfo *expansion_info = NULL; +- S390CPUModel s390_model; +- bool delta_changes = false; +- +- /* convert it to our internal representation */ +- cpu_model_from_info(&s390_model, model, &err); +- if (err) { +- error_propagate(errp, err); +- return NULL; +- } +- +- if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) { +- delta_changes = true; +- } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) { +- error_setg(errp, "The requested expansion type is not supported."); +- return NULL; +- } +- +- /* convert it back to a static representation */ +- expansion_info = g_new0(CpuModelExpansionInfo, 1); +- expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); +- cpu_info_from_model(expansion_info->model, &s390_model, delta_changes); +- return expansion_info; +-} +- +-static void list_add_feat(const char *name, void *opaque) +-{ +- strList **last = (strList **) opaque; +- +- QAPI_LIST_PREPEND(*last, g_strdup(name)); +-} +- +-CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa, +- CpuModelInfo *infob, +- Error **errp) +-{ +- Error *err = NULL; +- CpuModelCompareResult feat_result, gen_result; +- CpuModelCompareInfo *compare_info; +- S390FeatBitmap missing, added; +- S390CPUModel modela, modelb; +- +- /* convert both models to our internal representation */ +- cpu_model_from_info(&modela, infoa, &err); +- if (err) { +- error_propagate(errp, err); +- return NULL; +- } +- cpu_model_from_info(&modelb, infob, &err); +- if (err) { +- error_propagate(errp, err); +- return NULL; +- } +- compare_info = g_new0(CpuModelCompareInfo, 1); +- +- /* check the cpu generation and ga level */ +- if (modela.def->gen == modelb.def->gen) { +- if (modela.def->ec_ga == modelb.def->ec_ga) { +- /* ec and corresponding bc are identical */ +- gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; +- } else if (modela.def->ec_ga < modelb.def->ec_ga) { +- gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; +- } else { +- gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; +- } +- } else if (modela.def->gen < modelb.def->gen) { +- gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; +- } else { +- gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; +- } +- if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) { +- /* both models cannot be made identical */ +- list_add_feat("type", &compare_info->responsible_properties); +- } +- +- /* check the feature set */ +- if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) { +- feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; +- } else { +- bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX); +- s390_feat_bitmap_to_ascii(missing, +- &compare_info->responsible_properties, +- list_add_feat); +- bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX); +- s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties, +- list_add_feat); +- if (bitmap_empty(missing, S390_FEAT_MAX)) { +- feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET; +- } else if (bitmap_empty(added, S390_FEAT_MAX)) { +- feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; +- } else { +- feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; +- } +- } +- +- /* combine the results */ +- if (gen_result == feat_result) { +- compare_info->result = gen_result; +- } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { +- compare_info->result = gen_result; +- } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { +- compare_info->result = feat_result; +- } else { +- compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; +- } +- return compare_info; +-} +- +-CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa, +- CpuModelInfo *infob, +- Error **errp) +-{ +- Error *err = NULL; +- CpuModelBaselineInfo *baseline_info; +- S390CPUModel modela, modelb, model; +- uint16_t cpu_type; +- uint8_t max_gen_ga; +- uint8_t max_gen; +- +- /* convert both models to our internal representation */ +- cpu_model_from_info(&modela, infoa, &err); +- if (err) { +- error_propagate(errp, err); +- return NULL; +- } +- +- cpu_model_from_info(&modelb, infob, &err); +- if (err) { +- error_propagate(errp, err); +- return NULL; +- } +- +- /* features both models support */ +- bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX); +- +- /* detect the maximum model not regarding features */ +- if (modela.def->gen == modelb.def->gen) { +- if (modela.def->type == modelb.def->type) { +- cpu_type = modela.def->type; +- } else { +- cpu_type = 0; +- } +- max_gen = modela.def->gen; +- max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga); +- } else if (modela.def->gen > modelb.def->gen) { +- cpu_type = modelb.def->type; +- max_gen = modelb.def->gen; +- max_gen_ga = modelb.def->ec_ga; +- } else { +- cpu_type = modela.def->type; +- max_gen = modela.def->gen; +- max_gen_ga = modela.def->ec_ga; +- } +- +- model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga, +- model.features); +- +- /* models without early base features (esan3) are bad */ +- if (!model.def) { +- error_setg(errp, "No compatible CPU model could be created as" +- " important base features are disabled"); +- return NULL; +- } +- +- /* strip off features not part of the max model */ +- bitmap_and(model.features, model.features, model.def->full_feat, +- S390_FEAT_MAX); +- +- baseline_info = g_new0(CpuModelBaselineInfo, 1); +- baseline_info->model = g_malloc0(sizeof(*baseline_info->model)); +- cpu_info_from_model(baseline_info->model, &model, true); +- return baseline_info; +-} +-#endif +- + static void check_consistency(const S390CPUModel *model) + { + static int dep[][2] = { +@@ -812,6 +432,8 @@ static void check_consistency(const S390CPUModel *model) + { S390_FEAT_MSA_EXT_9, S390_FEAT_MSA_EXT_4 }, + { S390_FEAT_MULTIPLE_EPOCH, S390_FEAT_TOD_CLOCK_STEERING }, + { S390_FEAT_VECTOR_PACKED_DECIMAL, S390_FEAT_VECTOR }, ++ { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH, S390_FEAT_VECTOR_PACKED_DECIMAL }, ++ { S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, S390_FEAT_VECTOR_PACKED_DECIMAL_ENH }, + { S390_FEAT_VECTOR_ENH, S390_FEAT_VECTOR }, + { S390_FEAT_INSTRUCTION_EXEC_PROT, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2 }, + { S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, S390_FEAT_ESOP }, +@@ -843,6 +465,8 @@ static void check_consistency(const S390CPUModel *model) + { S390_FEAT_PTFF_STOUE, S390_FEAT_MULTIPLE_EPOCH }, + { S390_FEAT_AP_QUEUE_INTERRUPT_CONTROL, S390_FEAT_AP }, + { S390_FEAT_DIAG_318, S390_FEAT_EXTENDED_LENGTH_SCCB }, ++ { S390_FEAT_NNPA, S390_FEAT_VECTOR }, ++ { S390_FEAT_RDP, S390_FEAT_LOCAL_TLB_CLEARING }, + }; + int i; + +@@ -900,7 +524,7 @@ static void check_compatibility(const S390CPUModel *max_model, + "available in the configuration: "); + } + +-static S390CPUModel *get_max_cpu_model(Error **errp) ++S390CPUModel *get_max_cpu_model(Error **errp) + { + Error *err = NULL; + static S390CPUModel max_model; +@@ -925,39 +549,6 @@ static S390CPUModel *get_max_cpu_model(Error **errp) + return &max_model; + } + +-static inline void apply_cpu_model(const S390CPUModel *model, Error **errp) +-{ +-#ifndef CONFIG_USER_ONLY +- Error *err = NULL; +- static S390CPUModel applied_model; +- static bool applied; +- +- /* +- * We have the same model for all VCPUs. KVM can only be configured before +- * any VCPUs are defined in KVM. +- */ +- if (applied) { +- if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { +- error_setg(errp, "Mixed CPU models are not supported on s390x."); +- } +- return; +- } +- +- if (kvm_enabled()) { +- kvm_s390_apply_cpu_model(model, &err); +- if (err) { +- error_propagate(errp, err); +- return; +- } +- } +- +- applied = true; +- if (model) { +- applied_model = *model; +- } +-#endif +-} +- + void s390_realize_cpu_model(CPUState *cs, Error **errp) + { + Error *err = NULL; +diff --git a/target/s390x/cpu_models_sysemu.c b/target/s390x/cpu_models_sysemu.c +new file mode 100644 +index 0000000000..05c3ccaaff +--- /dev/null ++++ b/target/s390x/cpu_models_sysemu.c +@@ -0,0 +1,426 @@ ++/* ++ * CPU models for s390x - System Emulation-only ++ * ++ * Copyright 2016 IBM Corp. ++ * ++ * Author(s): David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or (at ++ * your option) any later version. See the COPYING file in the top-level ++ * directory. ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" ++#include "sysemu/kvm.h" ++#include "sysemu/tcg.h" ++#include "qapi/error.h" ++#include "qapi/visitor.h" ++#include "qapi/qmp/qerror.h" ++#include "qapi/qobject-input-visitor.h" ++#include "qapi/qmp/qdict.h" ++#include "qapi/qapi-commands-machine-target.h" ++ ++static void list_add_feat(const char *name, void *opaque); ++ ++static void check_unavailable_features(const S390CPUModel *max_model, ++ const S390CPUModel *model, ++ strList **unavailable) ++{ ++ S390FeatBitmap missing; ++ ++ /* check general model compatibility */ ++ if (max_model->def->gen < model->def->gen || ++ (max_model->def->gen == model->def->gen && ++ max_model->def->ec_ga < model->def->ec_ga)) { ++ list_add_feat("type", unavailable); ++ } ++ ++ /* detect missing features if any to properly report them */ ++ bitmap_andnot(missing, model->features, max_model->features, ++ S390_FEAT_MAX); ++ if (!bitmap_empty(missing, S390_FEAT_MAX)) { ++ s390_feat_bitmap_to_ascii(missing, unavailable, list_add_feat); ++ } ++} ++ ++struct CpuDefinitionInfoListData { ++ CpuDefinitionInfoList *list; ++ S390CPUModel *model; ++}; ++ ++static void create_cpu_model_list(ObjectClass *klass, void *opaque) ++{ ++ struct CpuDefinitionInfoListData *cpu_list_data = opaque; ++ CpuDefinitionInfoList **cpu_list = &cpu_list_data->list; ++ CpuDefinitionInfo *info; ++ char *name = g_strdup(object_class_get_name(klass)); ++ S390CPUClass *scc = S390_CPU_CLASS(klass); ++ ++ /* strip off the -s390x-cpu */ ++ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; ++ info = g_new0(CpuDefinitionInfo, 1); ++ info->name = name; ++ info->has_migration_safe = true; ++ info->migration_safe = scc->is_migration_safe; ++ info->q_static = scc->is_static; ++ info->q_typename = g_strdup(object_class_get_name(klass)); ++ /* check for unavailable features */ ++ if (cpu_list_data->model) { ++ Object *obj; ++ S390CPU *sc; ++ obj = object_new_with_class(klass); ++ sc = S390_CPU(obj); ++ if (sc->model) { ++ info->has_unavailable_features = true; ++ check_unavailable_features(cpu_list_data->model, sc->model, ++ &info->unavailable_features); ++ } ++ object_unref(obj); ++ } ++ ++ QAPI_LIST_PREPEND(*cpu_list, info); ++} ++ ++CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) ++{ ++ struct CpuDefinitionInfoListData list_data = { ++ .list = NULL, ++ }; ++ ++ list_data.model = get_max_cpu_model(NULL); ++ ++ object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false, ++ &list_data); ++ ++ return list_data.list; ++} ++ ++static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info, ++ Error **errp) ++{ ++ Error *err = NULL; ++ const QDict *qdict = NULL; ++ const QDictEntry *e; ++ Visitor *visitor; ++ ObjectClass *oc; ++ S390CPU *cpu; ++ Object *obj; ++ ++ if (info->props) { ++ qdict = qobject_to(QDict, info->props); ++ if (!qdict) { ++ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict"); ++ return; ++ } ++ } ++ ++ oc = cpu_class_by_name(TYPE_S390_CPU, info->name); ++ if (!oc) { ++ error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name); ++ return; ++ } ++ if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) { ++ error_setg(errp, "The CPU definition '%s' requires KVM", info->name); ++ return; ++ } ++ obj = object_new_with_class(oc); ++ cpu = S390_CPU(obj); ++ ++ if (!cpu->model) { ++ error_setg(errp, "Details about the host CPU model are not available, " ++ "it cannot be used."); ++ object_unref(obj); ++ return; ++ } ++ ++ if (qdict) { ++ visitor = qobject_input_visitor_new(info->props); ++ if (!visit_start_struct(visitor, NULL, NULL, 0, errp)) { ++ visit_free(visitor); ++ object_unref(obj); ++ return; ++ } ++ for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { ++ if (!object_property_set(obj, e->key, visitor, &err)) { ++ break; ++ } ++ } ++ if (!err) { ++ visit_check_struct(visitor, &err); ++ } ++ visit_end_struct(visitor, NULL); ++ visit_free(visitor); ++ if (err) { ++ error_propagate(errp, err); ++ object_unref(obj); ++ return; ++ } ++ } ++ ++ /* copy the model and throw the cpu away */ ++ memcpy(model, cpu->model, sizeof(*model)); ++ object_unref(obj); ++} ++ ++static void qdict_add_disabled_feat(const char *name, void *opaque) ++{ ++ qdict_put_bool(opaque, name, false); ++} ++ ++static void qdict_add_enabled_feat(const char *name, void *opaque) ++{ ++ qdict_put_bool(opaque, name, true); ++} ++ ++/* convert S390CPUDef into a static CpuModelInfo */ ++static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model, ++ bool delta_changes) ++{ ++ QDict *qdict = qdict_new(); ++ S390FeatBitmap bitmap; ++ ++ /* always fallback to the static base model */ ++ info->name = g_strdup_printf("%s-base", model->def->name); ++ ++ if (delta_changes) { ++ /* features deleted from the base feature set */ ++ bitmap_andnot(bitmap, model->def->base_feat, model->features, ++ S390_FEAT_MAX); ++ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { ++ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); ++ } ++ ++ /* features added to the base feature set */ ++ bitmap_andnot(bitmap, model->features, model->def->base_feat, ++ S390_FEAT_MAX); ++ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) { ++ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat); ++ } ++ } else { ++ /* expand all features */ ++ s390_feat_bitmap_to_ascii(model->features, qdict, ++ qdict_add_enabled_feat); ++ bitmap_complement(bitmap, model->features, S390_FEAT_MAX); ++ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat); ++ } ++ ++ if (!qdict_size(qdict)) { ++ qobject_unref(qdict); ++ } else { ++ info->props = QOBJECT(qdict); ++ info->has_props = true; ++ } ++} ++ ++CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, ++ CpuModelInfo *model, ++ Error **errp) ++{ ++ Error *err = NULL; ++ CpuModelExpansionInfo *expansion_info = NULL; ++ S390CPUModel s390_model; ++ bool delta_changes = false; ++ ++ /* convert it to our internal representation */ ++ cpu_model_from_info(&s390_model, model, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return NULL; ++ } ++ ++ if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) { ++ delta_changes = true; ++ } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) { ++ error_setg(errp, "The requested expansion type is not supported."); ++ return NULL; ++ } ++ ++ /* convert it back to a static representation */ ++ expansion_info = g_new0(CpuModelExpansionInfo, 1); ++ expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); ++ cpu_info_from_model(expansion_info->model, &s390_model, delta_changes); ++ return expansion_info; ++} ++ ++static void list_add_feat(const char *name, void *opaque) ++{ ++ strList **last = (strList **) opaque; ++ ++ QAPI_LIST_PREPEND(*last, g_strdup(name)); ++} ++ ++CpuModelCompareInfo *qmp_query_cpu_model_comparison(CpuModelInfo *infoa, ++ CpuModelInfo *infob, ++ Error **errp) ++{ ++ Error *err = NULL; ++ CpuModelCompareResult feat_result, gen_result; ++ CpuModelCompareInfo *compare_info; ++ S390FeatBitmap missing, added; ++ S390CPUModel modela, modelb; ++ ++ /* convert both models to our internal representation */ ++ cpu_model_from_info(&modela, infoa, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return NULL; ++ } ++ cpu_model_from_info(&modelb, infob, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return NULL; ++ } ++ compare_info = g_new0(CpuModelCompareInfo, 1); ++ ++ /* check the cpu generation and ga level */ ++ if (modela.def->gen == modelb.def->gen) { ++ if (modela.def->ec_ga == modelb.def->ec_ga) { ++ /* ec and corresponding bc are identical */ ++ gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; ++ } else if (modela.def->ec_ga < modelb.def->ec_ga) { ++ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; ++ } else { ++ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; ++ } ++ } else if (modela.def->gen < modelb.def->gen) { ++ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET; ++ } else { ++ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; ++ } ++ if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) { ++ /* both models cannot be made identical */ ++ list_add_feat("type", &compare_info->responsible_properties); ++ } ++ ++ /* check the feature set */ ++ if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) { ++ feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL; ++ } else { ++ bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX); ++ s390_feat_bitmap_to_ascii(missing, ++ &compare_info->responsible_properties, ++ list_add_feat); ++ bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX); ++ s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties, ++ list_add_feat); ++ if (bitmap_empty(missing, S390_FEAT_MAX)) { ++ feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET; ++ } else if (bitmap_empty(added, S390_FEAT_MAX)) { ++ feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET; ++ } else { ++ feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; ++ } ++ } ++ ++ /* combine the results */ ++ if (gen_result == feat_result) { ++ compare_info->result = gen_result; ++ } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { ++ compare_info->result = gen_result; ++ } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) { ++ compare_info->result = feat_result; ++ } else { ++ compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE; ++ } ++ return compare_info; ++} ++ ++CpuModelBaselineInfo *qmp_query_cpu_model_baseline(CpuModelInfo *infoa, ++ CpuModelInfo *infob, ++ Error **errp) ++{ ++ Error *err = NULL; ++ CpuModelBaselineInfo *baseline_info; ++ S390CPUModel modela, modelb, model; ++ uint16_t cpu_type; ++ uint8_t max_gen_ga; ++ uint8_t max_gen; ++ ++ /* convert both models to our internal representation */ ++ cpu_model_from_info(&modela, infoa, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return NULL; ++ } ++ ++ cpu_model_from_info(&modelb, infob, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return NULL; ++ } ++ ++ /* features both models support */ ++ bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX); ++ ++ /* detect the maximum model not regarding features */ ++ if (modela.def->gen == modelb.def->gen) { ++ if (modela.def->type == modelb.def->type) { ++ cpu_type = modela.def->type; ++ } else { ++ cpu_type = 0; ++ } ++ max_gen = modela.def->gen; ++ max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga); ++ } else if (modela.def->gen > modelb.def->gen) { ++ cpu_type = modelb.def->type; ++ max_gen = modelb.def->gen; ++ max_gen_ga = modelb.def->ec_ga; ++ } else { ++ cpu_type = modela.def->type; ++ max_gen = modela.def->gen; ++ max_gen_ga = modela.def->ec_ga; ++ } ++ ++ model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga, ++ model.features); ++ ++ /* models without early base features (esan3) are bad */ ++ if (!model.def) { ++ error_setg(errp, "No compatible CPU model could be created as" ++ " important base features are disabled"); ++ return NULL; ++ } ++ ++ /* strip off features not part of the max model */ ++ bitmap_and(model.features, model.features, model.def->full_feat, ++ S390_FEAT_MAX); ++ ++ baseline_info = g_new0(CpuModelBaselineInfo, 1); ++ baseline_info->model = g_malloc0(sizeof(*baseline_info->model)); ++ cpu_info_from_model(baseline_info->model, &model, true); ++ return baseline_info; ++} ++ ++void apply_cpu_model(const S390CPUModel *model, Error **errp) ++{ ++ Error *err = NULL; ++ static S390CPUModel applied_model; ++ static bool applied; ++ ++ /* ++ * We have the same model for all VCPUs. KVM can only be configured before ++ * any VCPUs are defined in KVM. ++ */ ++ if (applied) { ++ if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) { ++ error_setg(errp, "Mixed CPU models are not supported on s390x."); ++ } ++ return; ++ } ++ ++ if (kvm_enabled()) { ++ kvm_s390_apply_cpu_model(model, &err); ++ if (err) { ++ error_propagate(errp, err); ++ return; ++ } ++ } ++ ++ applied = true; ++ if (model) { ++ applied_model = *model; ++ } ++} +diff --git a/target/s390x/cpu_models_user.c b/target/s390x/cpu_models_user.c +new file mode 100644 +index 0000000000..df24d12d9e +--- /dev/null ++++ b/target/s390x/cpu_models_user.c +@@ -0,0 +1,20 @@ ++/* ++ * CPU models for s390x - User-mode ++ * ++ * Copyright 2016 IBM Corp. ++ * ++ * Author(s): David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or (at ++ * your option) any later version. See the COPYING file in the top-level ++ * directory. ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "qapi/error.h" ++ ++void apply_cpu_model(const S390CPUModel *model, Error **errp) ++{ ++} +diff --git a/target/s390x/crypto_helper.c b/target/s390x/crypto_helper.c +deleted file mode 100644 +index ff3fbc3950..0000000000 +--- a/target/s390x/crypto_helper.c ++++ /dev/null +@@ -1,61 +0,0 @@ +-/* +- * s390x crypto helpers +- * +- * Copyright (c) 2017 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#include "qemu/osdep.h" +-#include "qemu/main-loop.h" +-#include "internal.h" +-#include "tcg_s390x.h" +-#include "exec/helper-proto.h" +-#include "exec/exec-all.h" +-#include "exec/cpu_ldst.h" +- +-uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, +- uint32_t type) +-{ +- const uintptr_t ra = GETPC(); +- const uint8_t mod = env->regs[0] & 0x80ULL; +- const uint8_t fc = env->regs[0] & 0x7fULL; +- uint8_t subfunc[16] = { 0 }; +- uint64_t param_addr; +- int i; +- +- switch (type) { +- case S390_FEAT_TYPE_KMAC: +- case S390_FEAT_TYPE_KIMD: +- case S390_FEAT_TYPE_KLMD: +- case S390_FEAT_TYPE_PCKMO: +- case S390_FEAT_TYPE_PCC: +- if (mod) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- break; +- } +- +- s390_get_feat_block(type, subfunc); +- if (!test_be_bit(fc, subfunc)) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- switch (fc) { +- case 0: /* query subfunction */ +- for (i = 0; i < 16; i++) { +- param_addr = wrap_address(env, env->regs[1] + i); +- cpu_stb_data_ra(env, param_addr, subfunc[i], ra); +- } +- break; +- default: +- /* we don't implement any other subfunction yet */ +- g_assert_not_reached(); +- } +- +- return 0; +-} +diff --git a/target/s390x/diag.c b/target/s390x/diag.c +index d620cd4bd4..76b01dcd68 100644 +--- a/target/s390x/diag.c ++++ b/target/s390x/diag.c +@@ -14,13 +14,14 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "hw/watchdog/wdt_diag288.h" + #include "sysemu/cpus.h" + #include "hw/s390x/ipl.h" + #include "hw/s390x/s390-virtio-ccw.h" + #include "hw/s390x/pv.h" +-#include "kvm_s390x.h" ++#include "sysemu/kvm.h" ++#include "kvm/kvm_s390x.h" + + int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3) + { +@@ -168,7 +169,7 @@ out: + return; + } + +- if (kvm_s390_get_hpage_1m()) { ++ if (kvm_enabled() && kvm_s390_get_hpage_1m()) { + error_report("Protected VMs can currently not be backed with " + "huge pages"); + env->regs[r1 + 1] = DIAG_308_RC_INVAL_FOR_PV; +diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c +deleted file mode 100644 +index 9c361428c8..0000000000 +--- a/target/s390x/excp_helper.c ++++ /dev/null +@@ -1,641 +0,0 @@ +-/* +- * s390x exception / interrupt helpers +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2011 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "exec/helper-proto.h" +-#include "qemu/timer.h" +-#include "exec/exec-all.h" +-#include "exec/cpu_ldst.h" +-#include "hw/s390x/ioinst.h" +-#include "exec/address-spaces.h" +-#include "tcg_s390x.h" +-#ifndef CONFIG_USER_ONLY +-#include "hw/s390x/s390_flic.h" +-#include "hw/boards.h" +-#endif +- +-void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, +- uint32_t code, uintptr_t ra) +-{ +- CPUState *cs = env_cpu(env); +- +- cpu_restore_state(cs, ra, true); +- qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", +- env->psw.addr); +- trigger_pgm_exception(env, code); +- cpu_loop_exit(cs); +-} +- +-void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, +- uintptr_t ra) +-{ +- g_assert(dxc <= 0xff); +-#if !defined(CONFIG_USER_ONLY) +- /* Store the DXC into the lowcore */ +- stl_phys(env_cpu(env)->as, +- env->psa + offsetof(LowCore, data_exc_code), dxc); +-#endif +- +- /* Store the DXC into the FPC if AFP is enabled */ +- if (env->cregs[0] & CR0_AFP) { +- env->fpc = deposit32(env->fpc, 8, 8, dxc); +- } +- tcg_s390_program_interrupt(env, PGM_DATA, ra); +-} +- +-void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, +- uintptr_t ra) +-{ +- g_assert(vxc <= 0xff); +-#if !defined(CONFIG_USER_ONLY) +- /* Always store the VXC into the lowcore, without AFP it is undefined */ +- stl_phys(env_cpu(env)->as, +- env->psa + offsetof(LowCore, data_exc_code), vxc); +-#endif +- +- /* Always store the VXC into the FPC, without AFP it is undefined */ +- env->fpc = deposit32(env->fpc, 8, 8, vxc); +- tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra); +-} +- +-void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) +-{ +- tcg_s390_data_exception(env, dxc, GETPC()); +-} +- +-#if defined(CONFIG_USER_ONLY) +- +-void s390_cpu_do_interrupt(CPUState *cs) +-{ +- cs->exception_index = -1; +-} +- +-bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, +- MMUAccessType access_type, int mmu_idx, +- bool probe, uintptr_t retaddr) +-{ +- S390CPU *cpu = S390_CPU(cs); +- +- trigger_pgm_exception(&cpu->env, PGM_ADDRESSING); +- /* On real machines this value is dropped into LowMem. Since this +- is userland, simply put this someplace that cpu_loop can find it. */ +- cpu->env.__excp_addr = address; +- cpu_loop_exit_restore(cs, retaddr); +-} +- +-#else /* !CONFIG_USER_ONLY */ +- +-static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) +-{ +- switch (mmu_idx) { +- case MMU_PRIMARY_IDX: +- return PSW_ASC_PRIMARY; +- case MMU_SECONDARY_IDX: +- return PSW_ASC_SECONDARY; +- case MMU_HOME_IDX: +- return PSW_ASC_HOME; +- default: +- abort(); +- } +-} +- +-bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, +- MMUAccessType access_type, int mmu_idx, +- bool probe, uintptr_t retaddr) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- target_ulong vaddr, raddr; +- uint64_t asc, tec; +- int prot, excp; +- +- qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", +- __func__, address, access_type, mmu_idx); +- +- vaddr = address; +- +- if (mmu_idx < MMU_REAL_IDX) { +- asc = cpu_mmu_idx_to_asc(mmu_idx); +- /* 31-Bit mode */ +- if (!(env->psw.mask & PSW_MASK_64)) { +- vaddr &= 0x7fffffff; +- } +- excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec); +- } else if (mmu_idx == MMU_REAL_IDX) { +- /* 31-Bit mode */ +- if (!(env->psw.mask & PSW_MASK_64)) { +- vaddr &= 0x7fffffff; +- } +- excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec); +- } else { +- g_assert_not_reached(); +- } +- +- /* check out of RAM access */ +- if (!excp && +- !address_space_access_valid(&address_space_memory, raddr, +- TARGET_PAGE_SIZE, access_type, +- MEMTXATTRS_UNSPECIFIED)) { +- MachineState *ms = MACHINE(qdev_get_machine()); +- qemu_log_mask(CPU_LOG_MMU, +- "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", +- __func__, (uint64_t)raddr, (uint64_t)ms->ram_size); +- excp = PGM_ADDRESSING; +- tec = 0; /* unused */ +- } +- +- env->tlb_fill_exc = excp; +- env->tlb_fill_tec = tec; +- +- if (!excp) { +- qemu_log_mask(CPU_LOG_MMU, +- "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", +- __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); +- tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot, +- mmu_idx, TARGET_PAGE_SIZE); +- return true; +- } +- if (probe) { +- return false; +- } +- +- if (excp != PGM_ADDRESSING) { +- stq_phys(env_cpu(env)->as, +- env->psa + offsetof(LowCore, trans_exc_code), tec); +- } +- +- /* +- * For data accesses, ILEN will be filled in from the unwind info, +- * within cpu_loop_exit_restore. For code accesses, retaddr == 0, +- * and so unwinding will not occur. However, ILEN is also undefined +- * for that case -- we choose to set ILEN = 2. +- */ +- env->int_pgm_ilen = 2; +- trigger_pgm_exception(env, excp); +- cpu_loop_exit_restore(cs, retaddr); +-} +- +-static void do_program_interrupt(CPUS390XState *env) +-{ +- uint64_t mask, addr; +- LowCore *lowcore; +- int ilen = env->int_pgm_ilen; +- +- assert(ilen == 2 || ilen == 4 || ilen == 6); +- +- switch (env->int_pgm_code) { +- case PGM_PER: +- if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { +- break; +- } +- /* FALL THROUGH */ +- case PGM_OPERATION: +- case PGM_PRIVILEGED: +- case PGM_EXECUTE: +- case PGM_PROTECTION: +- case PGM_ADDRESSING: +- case PGM_SPECIFICATION: +- case PGM_DATA: +- case PGM_FIXPT_OVERFLOW: +- case PGM_FIXPT_DIVIDE: +- case PGM_DEC_OVERFLOW: +- case PGM_DEC_DIVIDE: +- case PGM_HFP_EXP_OVERFLOW: +- case PGM_HFP_EXP_UNDERFLOW: +- case PGM_HFP_SIGNIFICANCE: +- case PGM_HFP_DIVIDE: +- case PGM_TRANS_SPEC: +- case PGM_SPECIAL_OP: +- case PGM_OPERAND: +- case PGM_HFP_SQRT: +- case PGM_PC_TRANS_SPEC: +- case PGM_ALET_SPEC: +- case PGM_MONITOR: +- /* advance the PSW if our exception is not nullifying */ +- env->psw.addr += ilen; +- break; +- } +- +- qemu_log_mask(CPU_LOG_INT, +- "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n", +- __func__, env->int_pgm_code, ilen, env->psw.mask, +- env->psw.addr); +- +- lowcore = cpu_map_lowcore(env); +- +- /* Signal PER events with the exception. */ +- if (env->per_perc_atmid) { +- env->int_pgm_code |= PGM_PER; +- lowcore->per_address = cpu_to_be64(env->per_address); +- lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); +- env->per_perc_atmid = 0; +- } +- +- lowcore->pgm_ilen = cpu_to_be16(ilen); +- lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); +- lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); +- lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); +- mask = be64_to_cpu(lowcore->program_new_psw.mask); +- addr = be64_to_cpu(lowcore->program_new_psw.addr); +- lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); +- +- cpu_unmap_lowcore(lowcore); +- +- s390_cpu_set_psw(env, mask, addr); +-} +- +-static void do_svc_interrupt(CPUS390XState *env) +-{ +- uint64_t mask, addr; +- LowCore *lowcore; +- +- lowcore = cpu_map_lowcore(env); +- +- lowcore->svc_code = cpu_to_be16(env->int_svc_code); +- lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); +- lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); +- lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); +- mask = be64_to_cpu(lowcore->svc_new_psw.mask); +- addr = be64_to_cpu(lowcore->svc_new_psw.addr); +- +- cpu_unmap_lowcore(lowcore); +- +- s390_cpu_set_psw(env, mask, addr); +- +- /* When a PER event is pending, the PER exception has to happen +- immediately after the SERVICE CALL one. */ +- if (env->per_perc_atmid) { +- env->int_pgm_code = PGM_PER; +- env->int_pgm_ilen = env->int_svc_ilen; +- do_program_interrupt(env); +- } +-} +- +-#define VIRTIO_SUBCODE_64 0x0D00 +- +-static void do_ext_interrupt(CPUS390XState *env) +-{ +- QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); +- S390CPU *cpu = env_archcpu(env); +- uint64_t mask, addr; +- uint16_t cpu_addr; +- LowCore *lowcore; +- +- if (!(env->psw.mask & PSW_MASK_EXT)) { +- cpu_abort(CPU(cpu), "Ext int w/o ext mask\n"); +- } +- +- lowcore = cpu_map_lowcore(env); +- +- if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && +- (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { +- MachineState *ms = MACHINE(qdev_get_machine()); +- unsigned int max_cpus = ms->smp.max_cpus; +- +- lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); +- cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); +- g_assert(cpu_addr < S390_MAX_CPUS); +- lowcore->cpu_addr = cpu_to_be16(cpu_addr); +- clear_bit(cpu_addr, env->emergency_signals); +- if (bitmap_empty(env->emergency_signals, max_cpus)) { +- env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; +- } +- } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && +- (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { +- lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL); +- lowcore->cpu_addr = cpu_to_be16(env->external_call_addr); +- env->pending_int &= ~INTERRUPT_EXTERNAL_CALL; +- } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && +- (env->cregs[0] & CR0_CKC_SC)) { +- lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); +- lowcore->cpu_addr = 0; +- env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; +- } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && +- (env->cregs[0] & CR0_CPU_TIMER_SC)) { +- lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); +- lowcore->cpu_addr = 0; +- env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; +- } else if (qemu_s390_flic_has_service(flic) && +- (env->cregs[0] & CR0_SERVICE_SC)) { +- uint32_t param; +- +- param = qemu_s390_flic_dequeue_service(flic); +- lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE); +- lowcore->ext_params = cpu_to_be32(param); +- lowcore->cpu_addr = 0; +- } else { +- g_assert_not_reached(); +- } +- +- mask = be64_to_cpu(lowcore->external_new_psw.mask); +- addr = be64_to_cpu(lowcore->external_new_psw.addr); +- lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); +- lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); +- +- cpu_unmap_lowcore(lowcore); +- +- s390_cpu_set_psw(env, mask, addr); +-} +- +-static void do_io_interrupt(CPUS390XState *env) +-{ +- QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); +- uint64_t mask, addr; +- QEMUS390FlicIO *io; +- LowCore *lowcore; +- +- g_assert(env->psw.mask & PSW_MASK_IO); +- io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); +- g_assert(io); +- +- lowcore = cpu_map_lowcore(env); +- +- lowcore->subchannel_id = cpu_to_be16(io->id); +- lowcore->subchannel_nr = cpu_to_be16(io->nr); +- lowcore->io_int_parm = cpu_to_be32(io->parm); +- lowcore->io_int_word = cpu_to_be32(io->word); +- lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); +- lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); +- mask = be64_to_cpu(lowcore->io_new_psw.mask); +- addr = be64_to_cpu(lowcore->io_new_psw.addr); +- +- cpu_unmap_lowcore(lowcore); +- g_free(io); +- +- s390_cpu_set_psw(env, mask, addr); +-} +- +-typedef struct MchkExtSaveArea { +- uint64_t vregs[32][2]; /* 0x0000 */ +- uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ +-} MchkExtSaveArea; +-QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); +- +-static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) +-{ +- hwaddr len = sizeof(MchkExtSaveArea); +- MchkExtSaveArea *sa; +- int i; +- +- sa = cpu_physical_memory_map(mcesao, &len, true); +- if (!sa) { +- return -EFAULT; +- } +- if (len != sizeof(MchkExtSaveArea)) { +- cpu_physical_memory_unmap(sa, len, 1, 0); +- return -EFAULT; +- } +- +- for (i = 0; i < 32; i++) { +- sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]); +- sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); +- } +- +- cpu_physical_memory_unmap(sa, len, 1, len); +- return 0; +-} +- +-static void do_mchk_interrupt(CPUS390XState *env) +-{ +- QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); +- uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP; +- uint64_t mask, addr, mcesao = 0; +- LowCore *lowcore; +- int i; +- +- /* for now we only support channel report machine checks (floating) */ +- g_assert(env->psw.mask & PSW_MASK_MCHECK); +- g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC); +- +- qemu_s390_flic_dequeue_crw_mchk(flic); +- +- lowcore = cpu_map_lowcore(env); +- +- /* extended save area */ +- if (mcic & MCIC_VB_VR) { +- /* length and alignment is 1024 bytes */ +- mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull; +- } +- +- /* try to store vector registers */ +- if (!mcesao || mchk_store_vregs(env, mcesao)) { +- mcic &= ~MCIC_VB_VR; +- } +- +- /* we are always in z/Architecture mode */ +- lowcore->ar_access_id = 1; +- +- for (i = 0; i < 16; i++) { +- lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i)); +- lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); +- lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); +- lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); +- } +- lowcore->prefixreg_save_area = cpu_to_be32(env->psa); +- lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); +- lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); +- lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm); +- lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); +- +- lowcore->mcic = cpu_to_be64(mcic); +- lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); +- lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); +- mask = be64_to_cpu(lowcore->mcck_new_psw.mask); +- addr = be64_to_cpu(lowcore->mcck_new_psw.addr); +- +- cpu_unmap_lowcore(lowcore); +- +- s390_cpu_set_psw(env, mask, addr); +-} +- +-void s390_cpu_do_interrupt(CPUState *cs) +-{ +- QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- bool stopped = false; +- +- qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n", +- __func__, cs->exception_index, env->psw.mask, env->psw.addr); +- +-try_deliver: +- /* handle machine checks */ +- if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) { +- cs->exception_index = EXCP_MCHK; +- } +- /* handle external interrupts */ +- if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) { +- cs->exception_index = EXCP_EXT; +- } +- /* handle I/O interrupts */ +- if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) { +- cs->exception_index = EXCP_IO; +- } +- /* RESTART interrupt */ +- if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) { +- cs->exception_index = EXCP_RESTART; +- } +- /* STOP interrupt has least priority */ +- if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) { +- cs->exception_index = EXCP_STOP; +- } +- +- switch (cs->exception_index) { +- case EXCP_PGM: +- do_program_interrupt(env); +- break; +- case EXCP_SVC: +- do_svc_interrupt(env); +- break; +- case EXCP_EXT: +- do_ext_interrupt(env); +- break; +- case EXCP_IO: +- do_io_interrupt(env); +- break; +- case EXCP_MCHK: +- do_mchk_interrupt(env); +- break; +- case EXCP_RESTART: +- do_restart_interrupt(env); +- break; +- case EXCP_STOP: +- do_stop_interrupt(env); +- stopped = true; +- break; +- } +- +- if (cs->exception_index != -1 && !stopped) { +- /* check if there are more pending interrupts to deliver */ +- cs->exception_index = -1; +- goto try_deliver; +- } +- cs->exception_index = -1; +- +- /* we might still have pending interrupts, but not deliverable */ +- if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { +- cs->interrupt_request &= ~CPU_INTERRUPT_HARD; +- } +- +- /* WAIT PSW during interrupt injection or STOP interrupt */ +- if ((env->psw.mask & PSW_MASK_WAIT) || stopped) { +- /* don't trigger a cpu_loop_exit(), use an interrupt instead */ +- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); +- } else if (cs->halted) { +- /* unhalt if we had a WAIT PSW somehwere in our injection chain */ +- s390_cpu_unhalt(cpu); +- } +-} +- +-bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +-{ +- if (interrupt_request & CPU_INTERRUPT_HARD) { +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- +- if (env->ex_value) { +- /* Execution of the target insn is indivisible from +- the parent EXECUTE insn. */ +- return false; +- } +- if (s390_cpu_has_int(cpu)) { +- s390_cpu_do_interrupt(cs); +- return true; +- } +- if (env->psw.mask & PSW_MASK_WAIT) { +- /* Woken up because of a floating interrupt but it has already +- * been delivered. Go back to sleep. */ +- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); +- } +- } +- return false; +-} +- +-void s390x_cpu_debug_excp_handler(CPUState *cs) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- CPUWatchpoint *wp_hit = cs->watchpoint_hit; +- +- if (wp_hit && wp_hit->flags & BP_CPU) { +- /* FIXME: When the storage-alteration-space control bit is set, +- the exception should only be triggered if the memory access +- is done using an address space with the storage-alteration-event +- bit set. We have no way to detect that with the current +- watchpoint code. */ +- cs->watchpoint_hit = NULL; +- +- env->per_address = env->psw.addr; +- env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); +- /* FIXME: We currently no way to detect the address space used +- to trigger the watchpoint. For now just consider it is the +- current default ASC. This turn to be true except when MVCP +- and MVCS instrutions are not used. */ +- env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; +- +- /* +- * Remove all watchpoints to re-execute the code. A PER exception +- * will be triggered, it will call s390_cpu_set_psw which will +- * recompute the watchpoints. +- */ +- cpu_watchpoint_remove_all(cs, BP_CPU); +- cpu_loop_exit_noexc(cs); +- } +-} +- +-/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, +- this is only for the atomic operations, for which we want to raise a +- specification exception. */ +-void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, +- MMUAccessType access_type, +- int mmu_idx, uintptr_t retaddr) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); +-} +- +-static void QEMU_NORETURN monitor_event(CPUS390XState *env, +- uint64_t monitor_code, +- uint8_t monitor_class, uintptr_t ra) +-{ +- /* Store the Monitor Code and the Monitor Class Number into the lowcore */ +- stq_phys(env_cpu(env)->as, +- env->psa + offsetof(LowCore, monitor_code), monitor_code); +- stw_phys(env_cpu(env)->as, +- env->psa + offsetof(LowCore, mon_class_num), monitor_class); +- +- tcg_s390_program_interrupt(env, PGM_MONITOR, ra); +-} +- +-void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code, +- uint32_t monitor_class) +-{ +- g_assert(monitor_class <= 0xff); +- +- if (env->cregs[8] & (0x8000 >> monitor_class)) { +- monitor_event(env, monitor_code, monitor_class, GETPC()); +- } +-} +- +-#endif /* !CONFIG_USER_ONLY */ +diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c +deleted file mode 100644 +index 13af158748..0000000000 +--- a/target/s390x/fpu_helper.c ++++ /dev/null +@@ -1,921 +0,0 @@ +-/* +- * S/390 FPU helper routines +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2009 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "tcg_s390x.h" +-#include "exec/exec-all.h" +-#include "exec/cpu_ldst.h" +-#include "exec/helper-proto.h" +-#include "fpu/softfloat.h" +- +-/* #define DEBUG_HELPER */ +-#ifdef DEBUG_HELPER +-#define HELPER_LOG(x...) qemu_log(x) +-#else +-#define HELPER_LOG(x...) +-#endif +- +-#define RET128(F) (env->retxl = F.low, F.high) +- +-uint8_t s390_softfloat_exc_to_ieee(unsigned int exc) +-{ +- uint8_t s390_exc = 0; +- +- s390_exc |= (exc & float_flag_invalid) ? S390_IEEE_MASK_INVALID : 0; +- s390_exc |= (exc & float_flag_divbyzero) ? S390_IEEE_MASK_DIVBYZERO : 0; +- s390_exc |= (exc & float_flag_overflow) ? S390_IEEE_MASK_OVERFLOW : 0; +- s390_exc |= (exc & float_flag_underflow) ? S390_IEEE_MASK_UNDERFLOW : 0; +- s390_exc |= (exc & float_flag_inexact) ? S390_IEEE_MASK_INEXACT : 0; +- +- return s390_exc; +-} +- +-/* Should be called after any operation that may raise IEEE exceptions. */ +-static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) +-{ +- unsigned s390_exc, qemu_exc; +- +- /* Get the exceptions raised by the current operation. Reset the +- fpu_status contents so that the next operation has a clean slate. */ +- qemu_exc = env->fpu_status.float_exception_flags; +- if (qemu_exc == 0) { +- return; +- } +- env->fpu_status.float_exception_flags = 0; +- s390_exc = s390_softfloat_exc_to_ieee(qemu_exc); +- +- /* +- * IEEE-Underflow exception recognition exists if a tininess condition +- * (underflow) exists and +- * - The mask bit in the FPC is zero and the result is inexact +- * - The mask bit in the FPC is one +- * So tininess conditions that are not inexact don't trigger any +- * underflow action in case the mask bit is not one. +- */ +- if (!(s390_exc & S390_IEEE_MASK_INEXACT) && +- !((env->fpc >> 24) & S390_IEEE_MASK_UNDERFLOW)) { +- s390_exc &= ~S390_IEEE_MASK_UNDERFLOW; +- } +- +- /* +- * FIXME: +- * 1. Right now, all inexact conditions are inidicated as +- * "truncated" (0) and never as "incremented" (1) in the DXC. +- * 2. Only traps due to invalid/divbyzero are suppressing. Other traps +- * are completing, meaning the target register has to be written! +- * This, however will mean that we have to write the register before +- * triggering the trap - impossible right now. +- */ +- +- /* +- * invalid/divbyzero cannot coexist with other conditions. +- * overflow/underflow however can coexist with inexact, we have to +- * handle it separatly. +- */ +- if (s390_exc & ~S390_IEEE_MASK_INEXACT) { +- if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { +- /* trap condition - inexact reported along */ +- tcg_s390_data_exception(env, s390_exc, retaddr); +- } +- /* nontrap condition - inexact handled differently */ +- env->fpc |= (s390_exc & ~S390_IEEE_MASK_INEXACT) << 16; +- } +- +- /* inexact handling */ +- if (s390_exc & S390_IEEE_MASK_INEXACT && !XxC) { +- /* trap condition - overflow/underflow _not_ reported along */ +- if (s390_exc & S390_IEEE_MASK_INEXACT & env->fpc >> 24) { +- tcg_s390_data_exception(env, s390_exc & S390_IEEE_MASK_INEXACT, +- retaddr); +- } +- /* nontrap condition */ +- env->fpc |= (s390_exc & S390_IEEE_MASK_INEXACT) << 16; +- } +-} +- +-int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare) +-{ +- switch (float_compare) { +- case float_relation_equal: +- return 0; +- case float_relation_less: +- return 1; +- case float_relation_greater: +- return 2; +- case float_relation_unordered: +- return 3; +- default: +- cpu_abort(env_cpu(env), "unknown return value for float compare\n"); +- } +-} +- +-/* condition codes for unary FP ops */ +-uint32_t set_cc_nz_f32(float32 v) +-{ +- if (float32_is_any_nan(v)) { +- return 3; +- } else if (float32_is_zero(v)) { +- return 0; +- } else if (float32_is_neg(v)) { +- return 1; +- } else { +- return 2; +- } +-} +- +-uint32_t set_cc_nz_f64(float64 v) +-{ +- if (float64_is_any_nan(v)) { +- return 3; +- } else if (float64_is_zero(v)) { +- return 0; +- } else if (float64_is_neg(v)) { +- return 1; +- } else { +- return 2; +- } +-} +- +-uint32_t set_cc_nz_f128(float128 v) +-{ +- if (float128_is_any_nan(v)) { +- return 3; +- } else if (float128_is_zero(v)) { +- return 0; +- } else if (float128_is_neg(v)) { +- return 1; +- } else { +- return 2; +- } +-} +- +-static inline uint8_t round_from_m34(uint32_t m34) +-{ +- return extract32(m34, 0, 4); +-} +- +-static inline bool xxc_from_m34(uint32_t m34) +-{ +- /* XxC is bit 1 of m4 */ +- return extract32(m34, 4 + 3 - 1, 1); +-} +- +-/* 32-bit FP addition */ +-uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float32 ret = float32_add(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP addition */ +-uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float64 ret = float64_add(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 128-bit FP addition */ +-uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- float128 ret = float128_add(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* 32-bit FP subtraction */ +-uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float32 ret = float32_sub(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP subtraction */ +-uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float64 ret = float64_sub(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 128-bit FP subtraction */ +-uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- float128 ret = float128_sub(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* 32-bit FP division */ +-uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float32 ret = float32_div(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP division */ +-uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float64 ret = float64_div(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 128-bit FP division */ +-uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- float128 ret = float128_div(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* 32-bit FP multiplication */ +-uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float32 ret = float32_mul(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP multiplication */ +-uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float64 ret = float64_mul(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64/32-bit FP multiplication */ +-uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- float64 ret = float32_to_float64(f2, &env->fpu_status); +- ret = float64_mul(f1, ret, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 128-bit FP multiplication */ +-uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- float128 ret = float128_mul(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* 128/64-bit FP multiplication */ +-uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t f2) +-{ +- float128 ret = float64_to_float128(f2, &env->fpu_status); +- ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* convert 32-bit float to 64-bit float */ +-uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2) +-{ +- float64 ret = float32_to_float64(f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* convert 128-bit float to 64-bit float */ +-uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 64-bit float to 128-bit float */ +-uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2) +-{ +- float128 ret = float64_to_float128(f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* convert 32-bit float to 128-bit float */ +-uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2) +-{ +- float128 ret = float32_to_float128(f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-/* convert 64-bit float to 32-bit float */ +-uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float32 ret = float64_to_float32(f2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 128-bit float to 32-bit float */ +-uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* 32-bit FP compare */ +-uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- FloatRelation cmp = float32_compare_quiet(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-/* 64-bit FP compare */ +-uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- FloatRelation cmp = float64_compare_quiet(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-/* 128-bit FP compare */ +-uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- FloatRelation cmp = float128_compare_quiet(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3) +-{ +- int ret = env->fpu_status.float_rounding_mode; +- +- switch (m3) { +- case 0: +- /* current mode */ +- break; +- case 1: +- /* round to nearest with ties away from 0 */ +- set_float_rounding_mode(float_round_ties_away, &env->fpu_status); +- break; +- case 3: +- /* round to prepare for shorter precision */ +- set_float_rounding_mode(float_round_to_odd, &env->fpu_status); +- break; +- case 4: +- /* round to nearest with ties to even */ +- set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); +- break; +- case 5: +- /* round to zero */ +- set_float_rounding_mode(float_round_to_zero, &env->fpu_status); +- break; +- case 6: +- /* round to +inf */ +- set_float_rounding_mode(float_round_up, &env->fpu_status); +- break; +- case 7: +- /* round to -inf */ +- set_float_rounding_mode(float_round_down, &env->fpu_status); +- break; +- default: +- g_assert_not_reached(); +- } +- return ret; +-} +- +-void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode) +-{ +- set_float_rounding_mode(old_mode, &env->fpu_status); +-} +- +-/* convert 64-bit int to 32-bit float */ +-uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float32 ret = int64_to_float32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 64-bit int to 64-bit float */ +-uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float64 ret = int64_to_float64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 64-bit int to 128-bit float */ +-uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float128 ret = int64_to_float128(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return RET128(ret); +-} +- +-/* convert 64-bit uint to 32-bit float */ +-uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float32 ret = uint64_to_float32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 64-bit uint to 64-bit float */ +-uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float64 ret = uint64_to_float64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* convert 64-bit uint to 128-bit float */ +-uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float128 ret = uint64_to_float128(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return RET128(ret); +-} +- +-/* convert 32-bit float to 64-bit int */ +-uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- int64_t ret = float32_to_int64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float32_is_any_nan(v2)) { +- return INT64_MIN; +- } +- return ret; +-} +- +-/* convert 64-bit float to 64-bit int */ +-uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- int64_t ret = float64_to_int64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float64_is_any_nan(v2)) { +- return INT64_MIN; +- } +- return ret; +-} +- +-/* convert 128-bit float to 64-bit int */ +-uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float128 v2 = make_float128(h, l); +- int64_t ret = float128_to_int64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float128_is_any_nan(v2)) { +- return INT64_MIN; +- } +- return ret; +-} +- +-/* convert 32-bit float to 32-bit int */ +-uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- int32_t ret = float32_to_int32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float32_is_any_nan(v2)) { +- return INT32_MIN; +- } +- return ret; +-} +- +-/* convert 64-bit float to 32-bit int */ +-uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- int32_t ret = float64_to_int32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float64_is_any_nan(v2)) { +- return INT32_MIN; +- } +- return ret; +-} +- +-/* convert 128-bit float to 32-bit int */ +-uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float128 v2 = make_float128(h, l); +- int32_t ret = float128_to_int32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float128_is_any_nan(v2)) { +- return INT32_MIN; +- } +- return ret; +-} +- +-/* convert 32-bit float to 64-bit uint */ +-uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint64_t ret = float32_to_uint64(v2, &env->fpu_status); +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float32_is_any_nan(v2)) { +- return 0; +- } +- return ret; +-} +- +-/* convert 64-bit float to 64-bit uint */ +-uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint64_t ret = float64_to_uint64(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float64_is_any_nan(v2)) { +- return 0; +- } +- return ret; +-} +- +-/* convert 128-bit float to 64-bit uint */ +-uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint64_t ret = float128_to_uint64(make_float128(h, l), &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float128_is_any_nan(make_float128(h, l))) { +- return 0; +- } +- return ret; +-} +- +-/* convert 32-bit float to 32-bit uint */ +-uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint32_t ret = float32_to_uint32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float32_is_any_nan(v2)) { +- return 0; +- } +- return ret; +-} +- +-/* convert 64-bit float to 32-bit uint */ +-uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint32_t ret = float64_to_uint32(v2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float64_is_any_nan(v2)) { +- return 0; +- } +- return ret; +-} +- +-/* convert 128-bit float to 32-bit uint */ +-uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- uint32_t ret = float128_to_uint32(make_float128(h, l), &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- if (float128_is_any_nan(make_float128(h, l))) { +- return 0; +- } +- return ret; +-} +- +-/* round to integer 32-bit */ +-uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float32 ret = float32_round_to_int(f2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* round to integer 64-bit */ +-uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float64 ret = float64_round_to_int(f2, &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return ret; +-} +- +-/* round to integer 128-bit */ +-uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint32_t m34) +-{ +- int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); +- float128 ret = float128_round_to_int(make_float128(ah, al), +- &env->fpu_status); +- +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_exceptions(env, xxc_from_m34(m34), GETPC()); +- return RET128(ret); +-} +- +-/* 32-bit FP compare and signal */ +-uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- FloatRelation cmp = float32_compare(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-/* 64-bit FP compare and signal */ +-uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) +-{ +- FloatRelation cmp = float64_compare(f1, f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-/* 128-bit FP compare and signal */ +-uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t bh, uint64_t bl) +-{ +- FloatRelation cmp = float128_compare(make_float128(ah, al), +- make_float128(bh, bl), +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return float_comp_to_cc(env, cmp); +-} +- +-/* 32-bit FP multiply and add */ +-uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, +- uint64_t f2, uint64_t f3) +-{ +- float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP multiply and add */ +-uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, +- uint64_t f2, uint64_t f3) +-{ +- float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 32-bit FP multiply and subtract */ +-uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, +- uint64_t f2, uint64_t f3) +-{ +- float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c, +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* 64-bit FP multiply and subtract */ +-uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, +- uint64_t f2, uint64_t f3) +-{ +- float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c, +- &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* The rightmost bit has the number 11. */ +-static inline uint16_t dcmask(int bit, bool neg) +-{ +- return 1 << (11 - bit - neg); +-} +- +-#define DEF_FLOAT_DCMASK(_TYPE) \ +-uint16_t _TYPE##_dcmask(CPUS390XState *env, _TYPE f1) \ +-{ \ +- const bool neg = _TYPE##_is_neg(f1); \ +- \ +- /* Sorted by most common cases - only one class is possible */ \ +- if (_TYPE##_is_normal(f1)) { \ +- return dcmask(2, neg); \ +- } else if (_TYPE##_is_zero(f1)) { \ +- return dcmask(0, neg); \ +- } else if (_TYPE##_is_denormal(f1)) { \ +- return dcmask(4, neg); \ +- } else if (_TYPE##_is_infinity(f1)) { \ +- return dcmask(6, neg); \ +- } else if (_TYPE##_is_quiet_nan(f1, &env->fpu_status)) { \ +- return dcmask(8, neg); \ +- } \ +- /* signaling nan, as last remaining case */ \ +- return dcmask(10, neg); \ +-} +-DEF_FLOAT_DCMASK(float32) +-DEF_FLOAT_DCMASK(float64) +-DEF_FLOAT_DCMASK(float128) +- +-/* test data class 32-bit */ +-uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2) +-{ +- return (m2 & float32_dcmask(env, f1)) != 0; +-} +- +-/* test data class 64-bit */ +-uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2) +-{ +- return (m2 & float64_dcmask(env, v1)) != 0; +-} +- +-/* test data class 128-bit */ +-uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t m2) +-{ +- return (m2 & float128_dcmask(env, make_float128(ah, al))) != 0; +-} +- +-/* square root 32-bit */ +-uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2) +-{ +- float32 ret = float32_sqrt(f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* square root 64-bit */ +-uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2) +-{ +- float64 ret = float64_sqrt(f2, &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return ret; +-} +- +-/* square root 128-bit */ +-uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al) +-{ +- float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status); +- handle_exceptions(env, false, GETPC()); +- return RET128(ret); +-} +- +-static const int fpc_to_rnd[8] = { +- float_round_nearest_even, +- float_round_to_zero, +- float_round_up, +- float_round_down, +- -1, +- -1, +- -1, +- float_round_to_odd, +-}; +- +-/* set fpc */ +-void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc) +-{ +- if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || +- (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +- +- /* Install everything in the main FPC. */ +- env->fpc = fpc; +- +- /* Install the rounding mode in the shadow fpu_status. */ +- set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); +-} +- +-/* set fpc and signal */ +-void HELPER(sfas)(CPUS390XState *env, uint64_t fpc) +-{ +- uint32_t signalling = env->fpc; +- uint32_t s390_exc; +- +- if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || +- (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +- +- /* +- * FPC is set to the FPC operand with a bitwise OR of the signalling +- * flags. +- */ +- env->fpc = fpc | (signalling & 0x00ff0000); +- set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); +- +- /* +- * If any signaling flag is enabled in the new FPC mask, a +- * simulated-iee-exception exception occurs. +- */ +- s390_exc = (signalling >> 16) & (fpc >> 24); +- if (s390_exc) { +- if (s390_exc & S390_IEEE_MASK_INVALID) { +- s390_exc = S390_IEEE_MASK_INVALID; +- } else if (s390_exc & S390_IEEE_MASK_DIVBYZERO) { +- s390_exc = S390_IEEE_MASK_DIVBYZERO; +- } else if (s390_exc & S390_IEEE_MASK_OVERFLOW) { +- s390_exc &= (S390_IEEE_MASK_OVERFLOW | S390_IEEE_MASK_INEXACT); +- } else if (s390_exc & S390_IEEE_MASK_UNDERFLOW) { +- s390_exc &= (S390_IEEE_MASK_UNDERFLOW | S390_IEEE_MASK_INEXACT); +- } else if (s390_exc & S390_IEEE_MASK_INEXACT) { +- s390_exc = S390_IEEE_MASK_INEXACT; +- } else if (s390_exc & S390_IEEE_MASK_QUANTUM) { +- s390_exc = S390_IEEE_MASK_QUANTUM; +- } +- tcg_s390_data_exception(env, s390_exc | 3, GETPC()); +- } +-} +- +-/* set bfp rounding mode */ +-void HELPER(srnm)(CPUS390XState *env, uint64_t rnd) +-{ +- if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +- +- env->fpc = deposit32(env->fpc, 0, 3, rnd); +- set_float_rounding_mode(fpc_to_rnd[rnd & 0x7], &env->fpu_status); +-} +diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c +index 5b4e38a13b..a5d69d0e0b 100644 +--- a/target/s390x/gdbstub.c ++++ b/target/s390x/gdbstub.c +@@ -20,7 +20,7 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "exec/exec-all.h" + #include "exec/gdbstub.h" + #include "qemu/bitops.h" +diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c +index 242c95ede4..7d85322d68 100644 +--- a/target/s390x/gen-features.c ++++ b/target/s390x/gen-features.c +@@ -424,6 +424,8 @@ static uint16_t base_GEN15_GA1[] = { + S390_FEAT_MISC_INSTRUCTION_EXT3, + }; + ++#define base_GEN16_GA1 EmptyFeat ++ + /* Full features (in order of release) + * Automatically includes corresponding base features. + * Full features are all features this hardware supports even if kvm/QEMU do not +@@ -567,6 +569,15 @@ static uint16_t full_GEN15_GA1[] = { + S390_FEAT_UNPACK, + }; + ++static uint16_t full_GEN16_GA1[] = { ++ S390_FEAT_NNPA, ++ S390_FEAT_VECTOR_PACKED_DECIMAL_ENH2, ++ S390_FEAT_BEAR_ENH, ++ S390_FEAT_RDP, ++ S390_FEAT_PAI, ++}; ++ ++ + /* Default features (in order of release) + * Automatically includes corresponding base features. + * Default features are all features this version of QEMU supports for this +@@ -652,6 +663,8 @@ static uint16_t default_GEN15_GA1[] = { + S390_FEAT_ETOKEN, + }; + ++#define default_GEN16_GA1 EmptyFeat ++ + /* QEMU (CPU model) features */ + + static uint16_t qemu_V2_11[] = { +@@ -785,6 +798,7 @@ static CpuFeatDefSpec CpuFeatDef[] = { + CPU_FEAT_INITIALIZER(GEN14_GA1), + CPU_FEAT_INITIALIZER(GEN14_GA2), + CPU_FEAT_INITIALIZER(GEN15_GA1), ++ CPU_FEAT_INITIALIZER(GEN16_GA1), + }; + + #define FEAT_GROUP_INITIALIZER(_name) \ +diff --git a/target/s390x/helper.c b/target/s390x/helper.c +index 1445b74451..6e35473c7f 100644 +--- a/target/s390x/helper.c ++++ b/target/s390x/helper.c +@@ -1,5 +1,5 @@ + /* +- * S/390 helpers ++ * S/390 helpers - sysemu only + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2011 Alexander Graf +@@ -20,19 +20,15 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "exec/gdbstub.h" + #include "qemu/timer.h" +-#include "qemu/qemu-print.h" + #include "hw/s390x/ioinst.h" + #include "hw/s390x/pv.h" + #include "sysemu/hw_accel.h" + #include "sysemu/runstate.h" +-#ifndef CONFIG_USER_ONLY + #include "sysemu/tcg.h" +-#endif + +-#ifndef CONFIG_USER_ONLY + void s390x_tod_timer(void *opaque) + { + cpu_inject_clock_comparator((S390CPU *) opaque); +@@ -285,157 +281,3 @@ int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) + cpu_physical_memory_unmap(sa, len, 1, len); + return 0; + } +-#else +-/* For user-only, tcg is always enabled. */ +-#define tcg_enabled() true +-#endif /* CONFIG_USER_ONLY */ +- +-void s390_cpu_set_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) +-{ +-#ifndef CONFIG_USER_ONLY +- uint64_t old_mask = env->psw.mask; +-#endif +- +- env->psw.addr = addr; +- env->psw.mask = mask; +- +- /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ +- if (!tcg_enabled()) { +- return; +- } +- env->cc_op = (mask >> 44) & 3; +- +-#ifndef CONFIG_USER_ONLY +- if ((old_mask ^ mask) & PSW_MASK_PER) { +- s390_cpu_recompute_watchpoints(env_cpu(env)); +- } +- +- if (mask & PSW_MASK_WAIT) { +- s390_handle_wait(env_archcpu(env)); +- } +-#endif +-} +- +-uint64_t s390_cpu_get_psw_mask(CPUS390XState *env) +-{ +- uint64_t r = env->psw.mask; +- +- if (tcg_enabled()) { +- uint64_t cc = calc_cc(env, env->cc_op, env->cc_src, +- env->cc_dst, env->cc_vr); +- +- assert(cc <= 3); +- r &= ~PSW_MASK_CC; +- r |= cc << 44; +- } +- +- return r; +-} +- +-void s390_cpu_dump_state(CPUState *cs, FILE *f, int flags) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- int i; +- +- qemu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64, +- s390_cpu_get_psw_mask(env), env->psw.addr); +- if (!tcg_enabled()) { +- qemu_fprintf(f, "\n"); +- } else if (env->cc_op > 3) { +- qemu_fprintf(f, " cc %15s\n", cc_name(env->cc_op)); +- } else { +- qemu_fprintf(f, " cc %02x\n", env->cc_op); +- } +- +- for (i = 0; i < 16; i++) { +- qemu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]); +- if ((i % 4) == 3) { +- qemu_fprintf(f, "\n"); +- } else { +- qemu_fprintf(f, " "); +- } +- } +- +- if (flags & CPU_DUMP_FPU) { +- if (s390_has_feat(S390_FEAT_VECTOR)) { +- for (i = 0; i < 32; i++) { +- qemu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64 "%c", +- i, env->vregs[i][0], env->vregs[i][1], +- i % 2 ? '\n' : ' '); +- } +- } else { +- for (i = 0; i < 16; i++) { +- qemu_fprintf(f, "F%02d=%016" PRIx64 "%c", +- i, *get_freg(env, i), +- (i % 4) == 3 ? '\n' : ' '); +- } +- } +- } +- +-#ifndef CONFIG_USER_ONLY +- for (i = 0; i < 16; i++) { +- qemu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]); +- if ((i % 4) == 3) { +- qemu_fprintf(f, "\n"); +- } else { +- qemu_fprintf(f, " "); +- } +- } +-#endif +- +-#ifdef DEBUG_INLINE_BRANCHES +- for (i = 0; i < CC_OP_MAX; i++) { +- qemu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i), +- inline_branch_miss[i], inline_branch_hit[i]); +- } +-#endif +- +- qemu_fprintf(f, "\n"); +-} +- +-const char *cc_name(enum cc_op cc_op) +-{ +- static const char * const cc_names[] = { +- [CC_OP_CONST0] = "CC_OP_CONST0", +- [CC_OP_CONST1] = "CC_OP_CONST1", +- [CC_OP_CONST2] = "CC_OP_CONST2", +- [CC_OP_CONST3] = "CC_OP_CONST3", +- [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC", +- [CC_OP_STATIC] = "CC_OP_STATIC", +- [CC_OP_NZ] = "CC_OP_NZ", +- [CC_OP_ADDU] = "CC_OP_ADDU", +- [CC_OP_SUBU] = "CC_OP_SUBU", +- [CC_OP_LTGT_32] = "CC_OP_LTGT_32", +- [CC_OP_LTGT_64] = "CC_OP_LTGT_64", +- [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32", +- [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64", +- [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32", +- [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64", +- [CC_OP_ADD_64] = "CC_OP_ADD_64", +- [CC_OP_SUB_64] = "CC_OP_SUB_64", +- [CC_OP_ABS_64] = "CC_OP_ABS_64", +- [CC_OP_NABS_64] = "CC_OP_NABS_64", +- [CC_OP_ADD_32] = "CC_OP_ADD_32", +- [CC_OP_SUB_32] = "CC_OP_SUB_32", +- [CC_OP_ABS_32] = "CC_OP_ABS_32", +- [CC_OP_NABS_32] = "CC_OP_NABS_32", +- [CC_OP_COMP_32] = "CC_OP_COMP_32", +- [CC_OP_COMP_64] = "CC_OP_COMP_64", +- [CC_OP_TM_32] = "CC_OP_TM_32", +- [CC_OP_TM_64] = "CC_OP_TM_64", +- [CC_OP_NZ_F32] = "CC_OP_NZ_F32", +- [CC_OP_NZ_F64] = "CC_OP_NZ_F64", +- [CC_OP_NZ_F128] = "CC_OP_NZ_F128", +- [CC_OP_ICM] = "CC_OP_ICM", +- [CC_OP_SLA_32] = "CC_OP_SLA_32", +- [CC_OP_SLA_64] = "CC_OP_SLA_64", +- [CC_OP_FLOGR] = "CC_OP_FLOGR", +- [CC_OP_LCBB] = "CC_OP_LCBB", +- [CC_OP_VC] = "CC_OP_VC", +- [CC_OP_MULS_32] = "CC_OP_MULS_32", +- [CC_OP_MULS_64] = "CC_OP_MULS_64", +- }; +- +- return cc_names[cc_op]; +-} +diff --git a/target/s390x/helper.h b/target/s390x/helper.h +index ba045f559d..6215ca00bc 100644 +--- a/target/s390x/helper.h ++++ b/target/s390x/helper.h +@@ -64,18 +64,18 @@ DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64) + DEF_HELPER_FLAGS_3(keb, TCG_CALL_NO_WG, i32, env, i64, i64) + DEF_HELPER_FLAGS_3(kdb, TCG_CALL_NO_WG, i32, env, i64, i64) + DEF_HELPER_FLAGS_5(kxb, TCG_CALL_NO_WG, i32, env, i64, i64, i64, i64) +-DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +-DEF_HELPER_FLAGS_3(cfeb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_3(cfdb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_4(cfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +-DEF_HELPER_FLAGS_3(clgeb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_3(clgdb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_4(clgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +-DEF_HELPER_FLAGS_3(clfeb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_3(clfdb, TCG_CALL_NO_WG, i64, env, i64, i32) +-DEF_HELPER_FLAGS_4(clfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) ++DEF_HELPER_3(cgeb, i64, env, i64, i32) ++DEF_HELPER_3(cgdb, i64, env, i64, i32) ++DEF_HELPER_4(cgxb, i64, env, i64, i64, i32) ++DEF_HELPER_3(cfeb, i64, env, i64, i32) ++DEF_HELPER_3(cfdb, i64, env, i64, i32) ++DEF_HELPER_4(cfxb, i64, env, i64, i64, i32) ++DEF_HELPER_3(clgeb, i64, env, i64, i32) ++DEF_HELPER_3(clgdb, i64, env, i64, i32) ++DEF_HELPER_4(clgxb, i64, env, i64, i64, i32) ++DEF_HELPER_3(clfeb, i64, env, i64, i32) ++DEF_HELPER_3(clfdb, i64, env, i64, i32) ++DEF_HELPER_4(clfxb, i64, env, i64, i64, i32) + DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32) + DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32) + DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32) +diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def +deleted file mode 100644 +index 3e5594210c..0000000000 +--- a/target/s390x/insn-data.def ++++ /dev/null +@@ -1,1398 +0,0 @@ +-/* +- * Arguments to the opcode prototypes +- * +- * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC) +- * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA) +- * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS) +- * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS) +- * +- * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode +- * NAME = name of the opcode, used internally +- * FMT = format of the opcode (defined in insn-format.def) +- * FAC = facility the opcode is available in (defined in DisasFacility) +- * I1 = func in1_xx fills o->in1 +- * I2 = func in2_xx fills o->in2 +- * P = func prep_xx initializes o->*out* +- * W = func wout_xx writes o->*out* somewhere +- * OP = func op_xx does the bulk of the operation +- * CC = func cout_xx defines how cc should get set +- * DATA = immediate argument to op_xx function +- * FLAGS = categorize the type of instruction (e.g. for advanced checks) +- * +- * The helpers get called in order: I1, I2, P, OP, W, CC +- */ +- +-/* ADD */ +- C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32) +- C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32) +- C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32) +- C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32) +- C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64) +- C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64) +- C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64) +- C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64) +- C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64) +- F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP) +- F(0xb31a, ADBR, RRE, Z, f1, f2, new, f1, adb, f64, IF_BFP) +- F(0xb34a, AXBR, RRE, Z, x2h, x2l, x1, x1, axb, f128, IF_BFP) +- F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP) +- F(0xed1a, ADB, RXE, Z, f1, m2_64, new, f1, adb, f64, IF_BFP) +-/* ADD HIGH */ +- C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32) +- C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32) +-/* ADD IMMEDIATE */ +- C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32) +- D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL) +- C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32) +- C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64) +- D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ) +- C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64) +-/* ADD IMMEDIATE HIGH */ +- C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32) +-/* ADD HALFWORD */ +- C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32) +- C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32) +- C(0xe338, AGH, RXY_a, MIE2,r1, m2_16s, r1, 0, add, adds64) +-/* ADD HALFWORD IMMEDIATE */ +- C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32) +- C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64) +- +-/* ADD LOGICAL */ +- C(0x1e00, ALR, RR_a, Z, r1_32u, r2_32u, new, r1_32, add, addu32) +- C(0xb9fa, ALRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, add, addu32) +- C(0x5e00, AL, RX_a, Z, r1_32u, m2_32u, new, r1_32, add, addu32) +- C(0xe35e, ALY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, add, addu32) +- C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, addu64, addu64) +- C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, addu64, addu64) +- C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, addu64, addu64) +- C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, addu64, addu64) +- C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, addu64, addu64) +-/* ADD LOGICAL HIGH */ +- C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32) +- C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, add, addu32) +-/* ADD LOGICAL IMMEDIATE */ +- C(0xc20b, ALFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, add, addu32) +- C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, addu64, addu64) +-/* ADD LOGICAL WITH SIGNED IMMEDIATE */ +- D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL) +- C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32) +- D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ) +- C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64) +-/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */ +- C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32) +- C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, 0) +-/* ADD LOGICAL WITH CARRY */ +- C(0xb998, ALCR, RRE, Z, r1_32u, r2_32u, new, r1_32, addc32, addu32) +- C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc64, addu64) +- C(0xe398, ALC, RXY_a, Z, r1_32u, m2_32u, new, r1_32, addc32, addu32) +- C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc64, addu64) +- +-/* AND */ +- C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32) +- C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32) +- C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32) +- C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32) +- C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64) +- C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64) +- C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64) +- C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0) +-/* AND IMMEDIATE */ +- D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020) +- D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000) +- D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030) +- D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020) +- D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010) +- D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000) +- D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB) +- D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB) +- +-/* BRANCH AND LINK */ +- C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0) +- C(0x4500, BAL, RX_a, Z, 0, a2, r1, 0, bal, 0) +-/* BRANCH AND SAVE */ +- C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0) +- C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0) +-/* BRANCH RELATIVE AND SAVE */ +- C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0) +- C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0) +-/* BRANCH INDIRECT ON CONDITION */ +- C(0xe347, BIC, RXY_b, MIE2,0, m2_64w, 0, 0, bc, 0) +-/* BRANCH ON CONDITION */ +- C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0) +- C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0) +-/* BRANCH RELATIVE ON CONDITION */ +- C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0) +- C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0) +-/* BRANCH ON COUNT */ +- C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0) +- C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0) +- C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0) +- C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0) +-/* BRANCH RELATIVE ON COUNT */ +- C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0) +- C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0) +-/* BRANCH RELATIVE ON COUNT HIGH */ +- C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0) +-/* BRANCH ON INDEX */ +- D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0) +- D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1) +- D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0) +- D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1) +-/* BRANCH RELATIVE ON INDEX */ +- D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0) +- D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1) +- D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0) +- D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1) +-/* BRANCH PREDICTION PRELOAD */ +- /* ??? Format is SMI, but implemented as NOP, so we need no fields. */ +- C(0xc700, BPP, E, EH, 0, 0, 0, 0, 0, 0) +-/* BRANCH PREDICTION RELATIVE PRELOAD */ +- /* ??? Format is MII, but implemented as NOP, so we need no fields. */ +- C(0xc500, BPRP, E, EH, 0, 0, 0, 0, 0, 0) +-/* NEXT INSTRUCTION ACCESS INTENT */ +- /* ??? Format is IE, but implemented as NOP, so we need no fields. */ +- C(0xb2fa, NIAI, E, EH, 0, 0, 0, 0, 0, 0) +- +-/* CHECKSUM */ +- C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0) +- +-/* COPY SIGN */ +- F(0xb372, CPSDR, RRF_b, FPSSH, f3, f2, new, f1, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3) +- +-/* COMPARE */ +- C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32) +- C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32) +- C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32) +- C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64) +- C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64) +- C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64) +- C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64) +- F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP) +- F(0xb319, CDBR, RRE, Z, f1, f2, 0, 0, cdb, 0, IF_BFP) +- F(0xb349, CXBR, RRE, Z, x2h, x2l, x1, 0, cxb, 0, IF_BFP) +- F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP) +- F(0xed19, CDB, RXE, Z, f1, m2_64, 0, 0, cdb, 0, IF_BFP) +-/* COMPARE AND SIGNAL */ +- F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP) +- F(0xb318, KDBR, RRE, Z, f1, f2, 0, 0, kdb, 0, IF_BFP) +- F(0xb348, KXBR, RRE, Z, x2h, x2l, x1, 0, kxb, 0, IF_BFP) +- F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP) +- F(0xed18, KDB, RXE, Z, f1, m2_64, 0, 0, kdb, 0, IF_BFP) +-/* COMPARE IMMEDIATE */ +- C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32) +- C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64) +-/* COMPARE RELATIVE LONG */ +- C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32) +- C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64) +- C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64) +-/* COMPARE HALFWORD */ +- C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32) +- C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32) +- C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64) +-/* COMPARE HALFWORD IMMEDIATE */ +- C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32) +- C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64) +- C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64) +- C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) +- C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) +-/* COMPARE HALFWORD RELATIVE LONG */ +- C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) +- C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) +-/* COMPARE HIGH */ +- C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) +- C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) +- C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32) +-/* COMPARE IMMEDIATE HIGH */ +- C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32) +- +-/* COMPARE LOGICAL */ +- C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32) +- C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32) +- C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32) +- C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64) +- C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64) +- C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64) +- C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64) +- C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0) +-/* COMPARE LOGICAL HIGH */ +- C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32) +- C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32) +- C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32) +-/* COMPARE LOGICAL IMMEDIATE */ +- C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32) +- C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64) +- C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64) +- C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64) +- C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64) +- C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64) +- C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64) +-/* COMPARE LOGICAL IMMEDIATE HIGH */ +- C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32) +-/* COMPARE LOGICAL RELATIVE LONG */ +- C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32) +- C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64) +- C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64) +- C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32) +- C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64) +-/* COMPARE LOGICAL LONG */ +- C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0) +-/* COMPARE LOGICAL LONG EXTENDED */ +- C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0) +-/* COMPARE LOGICAL LONG UNICODE */ +- C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0) +-/* COMPARE LOGICAL CHARACTERS UNDER MASK */ +- C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0) +- C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0) +- C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0) +-/* COMPARE LOGICAL STRING */ +- C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0) +- +-/* COMPARE AND BRANCH */ +- D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) +- D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) +- D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) +- D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) +- D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0) +- D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0) +- D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0) +- D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0) +-/* COMPARE LOGICAL AND BRANCH */ +- D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) +- D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) +- D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) +- D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) +- D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) +- D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) +- D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) +- D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) +- +-/* COMPARE AND SWAP */ +- D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) +- D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) +- D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) +-/* COMPARE DOUBLE AND SWAP */ +- D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) +- D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) +- C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) +-/* COMPARE AND SWAP AND STORE */ +- C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0) +- +-/* COMPARE AND TRAP */ +- D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0) +- D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0) +- D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0) +- D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0) +-/* COMPARE LOGICAL AND TRAP */ +- D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1) +- D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1) +- D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1) +- D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1) +- D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1) +- D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1) +- +-/* CONVERT TO DECIMAL */ +- C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) +- C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) +-/* CONVERT TO FIXED */ +- F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) +- F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) +- F(0xb39a, CFXBR, RRF_e, Z, x2h, x2l, new, r1_32, cfxb, 0, IF_BFP) +- F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP) +- F(0xb3a9, CGDBR, RRF_e, Z, 0, f2, r1, 0, cgdb, 0, IF_BFP) +- F(0xb3aa, CGXBR, RRF_e, Z, x2h, x2l, r1, 0, cgxb, 0, IF_BFP) +-/* CONVERT FROM FIXED */ +- F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP) +- F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, new, f1, cdgb, 0, IF_BFP) +- F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, new_P, x1, cxgb, 0, IF_BFP) +- F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP) +- F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, new, f1, cdgb, 0, IF_BFP) +- F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, new_P, x1, cxgb, 0, IF_BFP) +-/* CONVERT TO LOGICAL */ +- F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP) +- F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2, new, r1_32, clfdb, 0, IF_BFP) +- F(0xb39e, CLFXBR, RRF_e, FPE, x2h, x2l, new, r1_32, clfxb, 0, IF_BFP) +- F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP) +- F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2, r1, 0, clgdb, 0, IF_BFP) +- F(0xb3ae, CLGXBR, RRF_e, FPE, x2h, x2l, r1, 0, clgxb, 0, IF_BFP) +-/* CONVERT FROM LOGICAL */ +- F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP) +- F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, new, f1, cdlgb, 0, IF_BFP) +- F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, new_P, x1, cxlgb, 0, IF_BFP) +- F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP) +- F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, new, f1, cdlgb, 0, IF_BFP) +- F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, new_P, x1, cxlgb, 0, IF_BFP) +- +-/* CONVERT UTF-8 TO UTF-16 */ +- D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12) +-/* CONVERT UTF-8 TO UTF-32 */ +- D(0xb9b0, CU14, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 14) +-/* CONVERT UTF-16 to UTF-8 */ +- D(0xb2a6, CU21, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 21) +-/* CONVERT UTF-16 to UTF-32 */ +- D(0xb9b1, CU24, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 24) +-/* CONVERT UTF-32 to UTF-8 */ +- D(0xb9b2, CU41, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 41) +-/* CONVERT UTF-32 to UTF-16 */ +- D(0xb9b3, CU42, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 42) +- +-/* DIVIDE */ +- C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0) +- C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0) +- F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP) +- F(0xb31d, DDBR, RRE, Z, f1, f2, new, f1, ddb, 0, IF_BFP) +- F(0xb34d, DXBR, RRE, Z, x2h, x2l, x1, x1, dxb, 0, IF_BFP) +- F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP) +- F(0xed1d, DDB, RXE, Z, f1, m2_64, new, f1, ddb, 0, IF_BFP) +-/* DIVIDE LOGICAL */ +- C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0) +- C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0) +- C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0) +- C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0) +-/* DIVIDE SINGLE */ +- C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0) +- C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0) +- C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0) +- C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0) +- +-/* EXCLUSIVE OR */ +- C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32) +- C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32) +- C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32) +- C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32) +- C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64) +- C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64) +- C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64) +- C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0) +-/* EXCLUSIVE OR IMMEDIATE */ +- D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020) +- D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000) +- D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB) +- D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB) +- +-/* EXECUTE */ +- C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0) +-/* EXECUTE RELATIVE LONG */ +- C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0) +- +-/* EXTRACT ACCESS */ +- C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0) +-/* EXTRACT CPU ATTRIBUTE */ +- C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0) +-/* EXTRACT CPU TIME */ +- F(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0, IF_IO) +-/* EXTRACT FPC */ +- F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP) +-/* EXTRACT PSW */ +- C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0) +- +-/* FIND LEFTMOST ONE */ +- C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0) +- +-/* INSERT CHARACTER */ +- C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0) +- C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0) +-/* INSERT CHARACTERS UNDER MASK */ +- D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0) +- D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0) +- D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32) +-/* INSERT IMMEDIATE */ +- D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020) +- D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000) +- D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030) +- D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020) +- D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010) +- D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000) +-/* INSERT PROGRAM MASK */ +- C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0) +- +-/* LOAD */ +- C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0) +- C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0) +- C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0) +- C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0) +- C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) +- C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) +- C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) +- F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) +- F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) +- F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) +- F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2) +- F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) +- F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) +- F(0xb365, LXR, RRE, Z, x2h, x2l, 0, x1, movx, 0, IF_AFP1) +-/* LOAD IMMEDIATE */ +- C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) +-/* LOAD RELATIVE LONG */ +- C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0) +- C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0) +- C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0) +-/* LOAD ADDRESS */ +- C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0) +- C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0) +-/* LOAD ADDRESS EXTENDED */ +- C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0) +- C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0) +-/* LOAD ADDRESS RELATIVE LONG */ +- C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) +-/* LOAD AND ADD */ +- D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) +- D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) +-/* LOAD AND ADD LOGICAL */ +- D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) +- D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) +-/* LOAD AND AND */ +- D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) +- D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) +-/* LOAD AND EXCLUSIVE OR */ +- D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) +- D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) +-/* LOAD AND OR */ +- D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) +- D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) +-/* LOAD AND TEST */ +- C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) +- C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) +- C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64) +- C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) +- C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) +- C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) +- F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) +- F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP) +- F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1, movx, f128, IF_BFP) +-/* LOAD AND TRAP */ +- C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) +- C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) +-/* LOAD AND ZERO RIGHTMOST BYTE */ +- C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) +- C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) +-/* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ +- C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) +-/* LOAD BYTE */ +- C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0) +- C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0) +- C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0) +- C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0) +-/* LOAD BYTE HIGH */ +- C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0) +-/* LOAD COMPLEMENT */ +- C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32) +- C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64) +- C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64) +- F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP) +- F(0xb313, LCDBR, RRE, Z, 0, f2, new, f1, negf64, f64, IF_BFP) +- F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) +- F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) +-/* LOAD COUNT TO BLOCK BOUNDARY */ +- C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) +-/* LOAD HALFWORD */ +- C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) +- C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) +- C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0) +- C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0) +- C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0) +-/* LOAD HALFWORD HIGH */ +- C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0) +-/* LOAD HALFWORD IMMEDIATE */ +- C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0) +- C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0) +-/* LOAD HALFWORD RELATIVE LONG */ +- C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0) +- C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0) +-/* LOAD HIGH */ +- C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0) +-/* LOAG HIGH AND TRAP */ +- C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0) +-/* LOAD LOGICAL */ +- C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0) +- C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0) +-/* LOAD LOGICAL AND TRAP */ +- C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0) +-/* LOAD LOGICAL RELATIVE LONG */ +- C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0) +-/* LOAD LOGICAL CHARACTER */ +- C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0) +- C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0) +- C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0) +- C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0) +-/* LOAD LOGICAL CHARACTER HIGH */ +- C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0) +-/* LOAD LOGICAL HALFWORD */ +- C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0) +- C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0) +- C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0) +- C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0) +-/* LOAD LOGICAL HALFWORD HIGH */ +- C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0) +-/* LOAD LOGICAL HALFWORD RELATIVE LONG */ +- C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0) +- C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0) +-/* LOAD LOGICAL IMMEDATE */ +- D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32) +- D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0) +- D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48) +- D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32) +- D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16) +- D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0) +-/* LOAD LOGICAL THIRTY ONE BITS */ +- C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0) +- C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0) +-/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */ +- C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0) +- +-/* LOAD FPR FROM GR */ +- F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1) +-/* LOAD GR FROM FPR */ +- F(0xb3cd, LGDR, RRE, FPRGR, 0, f2, 0, r1, mov2, 0, IF_AFP2) +-/* LOAD NEGATIVE */ +- C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32) +- C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64) +- C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64) +- F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP) +- F(0xb311, LNDBR, RRE, Z, 0, f2, new, f1, nabsf64, f64, IF_BFP) +- F(0xb341, LNXBR, RRE, Z, x2h, x2l, new_P, x1, nabsf128, f128, IF_BFP) +- F(0xb371, LNDFR, RRE, FPSSH, 0, f2, new, f1, nabsf64, 0, IF_AFP1 | IF_AFP2) +-/* LOAD ON CONDITION */ +- C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0) +- C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) +- C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0) +- C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0) +-/* LOAD HALFWORD IMMEDIATE ON CONDITION */ +- C(0xec42, LOCHI, RIE_g, LOC2, r1, i2, new, r1_32, loc, 0) +- C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) +- C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) +-/* LOAD HIGH ON CONDITION */ +- C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) +- C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) +-/* LOAD PAIR DISJOINT */ +- D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) +- D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) +-/* LOAD PAIR FROM QUADWORD */ +- C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) +-/* LOAD POSITIVE */ +- C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) +- C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) +- C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64) +- F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP) +- F(0xb310, LPDBR, RRE, Z, 0, f2, new, f1, absf64, f64, IF_BFP) +- F(0xb340, LPXBR, RRE, Z, x2h, x2l, new_P, x1, absf128, f128, IF_BFP) +- F(0xb370, LPDFR, RRE, FPSSH, 0, f2, new, f1, absf64, 0, IF_AFP1 | IF_AFP2) +-/* LOAD REVERSED */ +- C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0) +- C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0) +- C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0) +- C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0) +- C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0) +-/* LOAD ZERO */ +- F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1) +- F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1) +- F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1) +- +-/* LOAD FPC */ +- F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP) +-/* LOAD FPC AND SIGNAL */ +- F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP) +-/* LOAD FP INTEGER */ +- F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP) +- F(0xb35f, FIDBR, RRF_e, Z, 0, f2, new, f1, fidb, 0, IF_BFP) +- F(0xb347, FIXBR, RRF_e, Z, x2h, x2l, new_P, x1, fixb, 0, IF_BFP) +- +-/* LOAD LENGTHENED */ +- F(0xb304, LDEBR, RRE, Z, 0, e2, new, f1, ldeb, 0, IF_BFP) +- F(0xb305, LXDBR, RRE, Z, 0, f2, new_P, x1, lxdb, 0, IF_BFP) +- F(0xb306, LXEBR, RRE, Z, 0, e2, new_P, x1, lxeb, 0, IF_BFP) +- F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) +- F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) +- F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) +- F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) +- F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) +-/* LOAD ROUNDED */ +- F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) +- F(0xb345, LDXBR, RRF_e, Z, x2h, x2l, new, f1, ldxb, 0, IF_BFP) +- F(0xb346, LEXBR, RRF_e, Z, x2h, x2l, new, e1, lexb, 0, IF_BFP) +- +-/* LOAD MULTIPLE */ +- C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0) +- C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0) +- C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0) +-/* LOAD MULTIPLE HIGH */ +- C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0) +-/* LOAD ACCESS MULTIPLE */ +- C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0) +- C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0) +- +-/* MONITOR CALL */ +- C(0xaf00, MC, SI, Z, la1, 0, 0, 0, mc, 0) +- +-/* MOVE */ +- C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0) +- C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0) +- C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0) +- C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0) +- C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0) +- C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0) +-/* MOVE INVERSE */ +- C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0) +-/* MOVE LONG */ +- C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0) +-/* MOVE LONG EXTENDED */ +- C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0) +-/* MOVE LONG UNICODE */ +- C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0) +-/* MOVE NUMERICS */ +- C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) +-/* MOVE PAGE */ +- C(0xb254, MVPG, RRE, Z, 0, 0, 0, 0, mvpg, 0) +-/* MOVE STRING */ +- C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0) +-/* MOVE WITH OPTIONAL SPECIFICATION */ +- C(0xc800, MVCOS, SSF, MVCOS, la1, a2, 0, 0, mvcos, 0) +-/* MOVE WITH OFFSET */ +- /* Really format SS_b, but we pack both lengths into one argument +- for the helper call, so we might as well leave one 8-bit field. */ +- C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0) +-/* MOVE ZONES */ +- C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0) +- +-/* MULTIPLY */ +- C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0) +- C(0xb9ec, MGRK, RRF_a, MIE2,r3_o, r2_o, r1_P, 0, muls128, 0) +- C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0) +- C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0) +- C(0xe384, MG, RXY_a, MIE2,r1p1_o, m2_64, r1_P, 0, muls128, 0) +- F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP) +- F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP) +- F(0xb34c, MXBR, RRE, Z, x2h, x2l, x1, x1, mxb, 0, IF_BFP) +- F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP) +- F(0xb307, MXDBR, RRE, Z, 0, f2, x1, x1, mxdb, 0, IF_BFP) +- F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP) +- F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP) +- F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP) +- F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, x1, mxdb, 0, IF_BFP) +-/* MULTIPLY HALFWORD */ +- C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0) +- C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0) +- C(0xe33c, MGH, RXY_a, MIE2,r1_o, m2_16s, r1, 0, mul, 0) +-/* MULTIPLY HALFWORD IMMEDIATE */ +- C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0) +- C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0) +-/* MULTIPLY LOGICAL */ +- C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0) +- C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0) +- C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0) +- C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0) +-/* MULTIPLY SINGLE */ +- C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0) +- C(0xb9fd, MSRKC, RRF_a, MIE2,r3_32s, r2_32s, new, r1_32, mul, muls32) +- C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0) +- C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0) +- C(0xe353, MSC, RXY_a, MIE2,r1_32s, m2_32s, new, r1_32, mul, muls32) +- C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0) +- C(0xb9ed, MSGRKC, RRF_a, MIE2,r3_o, r2_o, new_P, out2_r1, muls128, muls64) +- C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0) +- C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0) +- C(0xe383, MSGC, RXY_a, MIE2,r1_o, m2_64, new_P, out2_r1, muls128, muls64) +- C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0) +-/* MULTIPLY SINGLE IMMEDIATE */ +- C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0) +- C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0) +- +-/* MULTIPLY AND ADD */ +- F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP) +- F(0xb31e, MADBR, RRD, Z, f1, f2, new, f1, madb, 0, IF_BFP) +- F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP) +- F(0xed1e, MADB, RXF, Z, f1, m2_64, new, f1, madb, 0, IF_BFP) +-/* MULTIPLY AND SUBTRACT */ +- F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP) +- F(0xb31f, MSDBR, RRD, Z, f1, f2, new, f1, msdb, 0, IF_BFP) +- F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) +- F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP) +- +-/* OR */ +- C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) +- C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32) +- C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32) +- C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32) +- C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64) +- C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64) +- C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64) +- C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0) +-/* OR IMMEDIATE */ +- D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020) +- D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000) +- D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030) +- D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020) +- D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010) +- D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000) +- D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB) +- D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB) +- +-/* PACK */ +- /* Really format SS_b, but we pack both lengths into one argument +- for the helper call, so we might as well leave one 8-bit field. */ +- C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0) +-/* PACK ASCII */ +- C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0) +-/* PACK UNICODE */ +- C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0) +- +-/* PREFETCH */ +- /* Implemented as nops of course. */ +- C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0) +- C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0) +-/* PERFORM PROCESSOR ASSIST */ +- /* Implemented as nop of course. */ +- C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0) +- +-/* POPULATION COUNT */ +- C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) +- +-/* ROTATE LEFT SINGLE LOGICAL */ +- C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0) +- C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0) +- +-/* ROTATE THEN INSERT SELECTED BITS */ +- C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) +- C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0) +- C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) +- C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) +-/* ROTATE_THEN SELECTED BITS */ +- C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) +- C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) +- C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) +- +-/* SEARCH STRING */ +- C(0xb25e, SRST, RRE, Z, 0, 0, 0, 0, srst, 0) +-/* SEARCH STRING UNICODE */ +- C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0) +- +-/* SET ACCESS */ +- C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0) +-/* SET ADDRESSING MODE */ +- D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0) +- D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1) +- D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3) +-/* SET FPC */ +- F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP) +-/* SET FPC AND SIGNAL */ +- F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP) +-/* SET BFP ROUNDING MODE */ +- F(0xb299, SRNM, S, Z, la2, 0, 0, 0, srnm, 0, IF_BFP) +- F(0xb2b8, SRNMB, S, FPE, la2, 0, 0, 0, srnmb, 0, IF_BFP) +-/* SET DFP ROUNDING MODE */ +- F(0xb2b9, SRNMT, S, DFPR, la2, 0, 0, 0, srnmt, 0, IF_DFP) +-/* SET PROGRAM MASK */ +- C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) +- +-/* SHIFT LEFT SINGLE */ +- D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31) +- D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31) +- D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63) +-/* SHIFT LEFT SINGLE LOGICAL */ +- C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0) +- C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0) +- C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0) +-/* SHIFT RIGHT SINGLE */ +- C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32) +- C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32) +- C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64) +-/* SHIFT RIGHT SINGLE LOGICAL */ +- C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0) +- C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0) +- C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0) +-/* SHIFT LEFT DOUBLE */ +- D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31) +-/* SHIFT LEFT DOUBLE LOGICAL */ +- C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0) +-/* SHIFT RIGHT DOUBLE */ +- C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64) +-/* SHIFT RIGHT DOUBLE LOGICAL */ +- C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) +- +-/* SQUARE ROOT */ +- F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) +- F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP) +- F(0xb316, SQXBR, RRE, Z, x2h, x2l, new_P, x1, sqxb, 0, IF_BFP) +- F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) +- F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) +- +-/* STORE */ +- C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) +- C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) +- C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) +- F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1) +- F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1) +- F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) +- F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) +-/* STORE RELATIVE LONG */ +- C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) +- C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) +-/* STORE CHARACTER */ +- C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0) +- C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0) +-/* STORE CHARACTER HIGH */ +- C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0) +-/* STORE CHARACTERS UNDER MASK */ +- D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0) +- D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0) +- D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32) +-/* STORE HALFWORD */ +- C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0) +- C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0) +-/* STORE HALFWORD HIGH */ +- C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0) +-/* STORE HALFWORD RELATIVE LONG */ +- C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0) +-/* STORE HIGH */ +- C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0) +-/* STORE ON CONDITION */ +- D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0) +- D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1) +-/* STORE HIGH ON CONDITION */ +- D(0xebe1, STOCFH, RSY_b, LOC2, 0, 0, 0, 0, soc, 0, 2) +-/* STORE REVERSED */ +- C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0) +- C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0) +- C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0) +- +-/* STORE CLOCK */ +- F(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0, IF_IO) +- F(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0, IF_IO) +-/* STORE CLOCK EXTENDED */ +- F(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0, IF_IO) +- +-/* STORE FACILITY LIST EXTENDED */ +- C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) +-/* STORE FPC */ +- F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP) +- +-/* STORE MULTIPLE */ +- D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4) +- D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4) +- D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8) +-/* STORE MULTIPLE HIGH */ +- C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0) +-/* STORE ACCESS MULTIPLE */ +- C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0) +- C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0) +-/* STORE PAIR TO QUADWORD */ +- C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0) +- +-/* SUBTRACT */ +- C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32) +- C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32) +- C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32) +- C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32) +- C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64) +- C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64) +- C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64) +- C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64) +- C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64) +- F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP) +- F(0xb31b, SDBR, RRE, Z, f1, f2, new, f1, sdb, f64, IF_BFP) +- F(0xb34b, SXBR, RRE, Z, x2h, x2l, x1, x1, sxb, f128, IF_BFP) +- F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP) +- F(0xed1b, SDB, RXE, Z, f1, m2_64, new, f1, sdb, f64, IF_BFP) +-/* SUBTRACT HALFWORD */ +- C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32) +- C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32) +- C(0xe339, SGH, RXY_a, MIE2,r1, m2_16s, r1, 0, sub, subs64) +-/* SUBTRACT HIGH */ +- C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32) +- C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32) +-/* SUBTRACT LOGICAL */ +- C(0x1f00, SLR, RR_a, Z, r1_32u, r2_32u, new, r1_32, sub, subu32) +- C(0xb9fb, SLRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, sub, subu32) +- C(0x5f00, SL, RX_a, Z, r1_32u, m2_32u, new, r1_32, sub, subu32) +- C(0xe35f, SLY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, sub, subu32) +- C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, subu64, subu64) +- C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, subu64, subu64) +- C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, subu64, subu64) +- C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, subu64, subu64) +- C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, subu64, subu64) +-/* SUBTRACT LOCICAL HIGH */ +- C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32) +- C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, sub, subu32) +-/* SUBTRACT LOGICAL IMMEDIATE */ +- C(0xc205, SLFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, sub, subu32) +- C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, subu64, subu64) +-/* SUBTRACT LOGICAL WITH BORROW */ +- C(0xb999, SLBR, RRE, Z, r1_32u, r2_32u, new, r1_32, subb32, subu32) +- C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb64, subu64) +- C(0xe399, SLB, RXY_a, Z, r1_32u, m2_32u, new, r1_32, subb32, subu32) +- C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb64, subu64) +- +-/* SUPERVISOR CALL */ +- C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0) +- +-/* TEST ADDRESSING MODE */ +- C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0) +- +-/* TEST AND SET */ +- C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0) +- +-/* TEST DATA CLASS */ +- F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP) +- F(0xed11, TCDB, RXE, Z, f1, a2, 0, 0, tcdb, 0, IF_BFP) +- F(0xed12, TCXB, RXE, Z, 0, a2, x1, 0, tcxb, 0, IF_BFP) +- +-/* TEST DECIMAL */ +- C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0) +- +-/* TEST UNDER MASK */ +- C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32) +- C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32) +- D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48) +- D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32) +- D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16) +- D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0) +- +-/* TRANSLATE */ +- C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0) +-/* TRANSLATE AND TEST */ +- C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0) +-/* TRANSLATE AND TEST REVERSE */ +- C(0xd000, TRTR, SS_a, ETF3, la1, a2, 0, 0, trtr, 0) +-/* TRANSLATE EXTENDED */ +- C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0) +- +-/* TRANSLATE ONE TO ONE */ +- C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +-/* TRANSLATE ONE TO TWO */ +- C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +-/* TRANSLATE TWO TO ONE */ +- C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +-/* TRANSLATE TWO TO TWO */ +- C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) +- +-/* UNPACK */ +- /* Really format SS_b, but we pack both lengths into one argument +- for the helper call, so we might as well leave one 8-bit field. */ +- C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0) +-/* UNPACK ASCII */ +- C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0) +-/* UNPACK UNICODE */ +- C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0) +- +-/* MSA Instructions */ +- D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC) +- D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO) +- D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF) +- D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO) +- D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC) +- D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR) +- D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM) +- D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC) +- D(0xb929, KMA, RRF_b, MSA8, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMA) +- D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO) +- D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD) +- D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD) +- +-/* === Vector Support Instructions === */ +- +-/* VECTOR BIT PERMUTE */ +- E(0xe785, VBPERM, VRR_c, VE, 0, 0, 0, 0, vbperm, 0, 0, IF_VEC) +-/* VECTOR GATHER ELEMENT */ +- E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC) +- E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC) +-/* VECTOR GENERATE BYTE MASK */ +- F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC) +-/* VECTOR GENERATE MASK */ +- F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC) +-/* VECTOR LOAD */ +- F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC) +- F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC) +-/* VECTOR LOAD AND REPLICATE */ +- F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC) +-/* VECTOR LOAD ELEMENT */ +- E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC) +- E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC) +- E(0xe703, VLEF, VRX, V, la2, 0, 0, 0, vle, 0, ES_32, IF_VEC) +- E(0xe702, VLEG, VRX, V, la2, 0, 0, 0, vle, 0, ES_64, IF_VEC) +-/* VECTOR LOAD ELEMENT IMMEDIATE */ +- E(0xe740, VLEIB, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_8, IF_VEC) +- E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC) +- E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC) +- E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC) +-/* VECTOR LOAD GR FROM VR ELEMENT */ +- F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC) +-/* VECTOR LOAD LOGICAL ELEMENT AND ZERO */ +- F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC) +-/* VECTOR LOAD MULTIPLE */ +- F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC) +-/* VECTOR LOAD TO BLOCK BOUNDARY */ +- F(0xe707, VLBB, VRX, V, la2, 0, 0, 0, vlbb, 0, IF_VEC) +-/* VECTOR LOAD VR ELEMENT FROM GR */ +- F(0xe722, VLVG, VRS_b, V, la2, r3, 0, 0, vlvg, 0, IF_VEC) +-/* VECTOR LOAD VR FROM GRS DISJOINT */ +- F(0xe762, VLVGP, VRR_f, V, r2, r3, 0, 0, vlvgp, 0, IF_VEC) +-/* VECTOR LOAD WITH LENGTH */ +- F(0xe737, VLL, VRS_b, V, la2, r3_32u, 0, 0, vll, 0, IF_VEC) +-/* VECTOR MERGE HIGH */ +- F(0xe761, VMRH, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) +-/* VECTOR MERGE LOW */ +- F(0xe760, VMRL, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) +-/* VECTOR PACK */ +- F(0xe794, VPK, VRR_c, V, 0, 0, 0, 0, vpk, 0, IF_VEC) +-/* VECTOR PACK SATURATE */ +- F(0xe797, VPKS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) +-/* VECTOR PACK LOGICAL SATURATE */ +- F(0xe795, VPKLS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) +- F(0xe78c, VPERM, VRR_e, V, 0, 0, 0, 0, vperm, 0, IF_VEC) +-/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ +- F(0xe784, VPDI, VRR_c, V, 0, 0, 0, 0, vpdi, 0, IF_VEC) +-/* VECTOR REPLICATE */ +- F(0xe74d, VREP, VRI_c, V, 0, 0, 0, 0, vrep, 0, IF_VEC) +-/* VECTOR REPLICATE IMMEDIATE */ +- F(0xe745, VREPI, VRI_a, V, 0, 0, 0, 0, vrepi, 0, IF_VEC) +-/* VECTOR SCATTER ELEMENT */ +- E(0xe71b, VSCEF, VRV, V, la2, 0, 0, 0, vsce, 0, ES_32, IF_VEC) +- E(0xe71a, VSCEG, VRV, V, la2, 0, 0, 0, vsce, 0, ES_64, IF_VEC) +-/* VECTOR SELECT */ +- F(0xe78d, VSEL, VRR_e, V, 0, 0, 0, 0, vsel, 0, IF_VEC) +-/* VECTOR SIGN EXTEND TO DOUBLEWORD */ +- F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC) +-/* VECTOR STORE */ +- F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC) +-/* VECTOR STORE ELEMENT */ +- E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC) +- E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC) +- E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC) +- E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC) +-/* VECTOR STORE MULTIPLE */ +- F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC) +-/* VECTOR STORE WITH LENGTH */ +- F(0xe73f, VSTL, VRS_b, V, la2, r3_32u, 0, 0, vstl, 0, IF_VEC) +-/* VECTOR UNPACK HIGH */ +- F(0xe7d7, VUPH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +-/* VECTOR UNPACK LOGICAL HIGH */ +- F(0xe7d5, VUPLH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +-/* VECTOR UNPACK LOW */ +- F(0xe7d6, VUPL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +-/* VECTOR UNPACK LOGICAL LOW */ +- F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) +- +-/* === Vector Integer Instructions === */ +- +-/* VECTOR ADD */ +- F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC) +-/* VECTOR ADD COMPUTE CARRY */ +- F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC) +-/* VECTOR ADD WITH CARRY */ +- F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC) +-/* VECTOR ADD WITH CARRY COMPUTE CARRY */ +- F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC) +-/* VECTOR AND */ +- F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC) +-/* VECTOR AND WITH COMPLEMENT */ +- F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC) +-/* VECTOR AVERAGE */ +- F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC) +-/* VECTOR AVERAGE LOGICAL */ +- F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC) +-/* VECTOR CHECKSUM */ +- F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC) +-/* VECTOR ELEMENT COMPARE */ +- F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC) +-/* VECTOR ELEMENT COMPARE LOGICAL */ +- F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC) +-/* VECTOR COMPARE EQUAL */ +- E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC) +-/* VECTOR COMPARE HIGH */ +- E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC) +-/* VECTOR COMPARE HIGH LOGICAL */ +- E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC) +-/* VECTOR COUNT LEADING ZEROS */ +- F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC) +-/* VECTOR COUNT TRAILING ZEROS */ +- F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC) +-/* VECTOR EXCLUSIVE OR */ +- F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC) +-/* VECTOR GALOIS FIELD MULTIPLY SUM */ +- F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC) +-/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ +- F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC) +-/* VECTOR LOAD COMPLEMENT */ +- F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC) +-/* VECTOR LOAD POSITIVE */ +- F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC) +-/* VECTOR MAXIMUM */ +- F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +-/* VECTOR MAXIMUM LOGICAL */ +- F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +-/* VECTOR MINIMUM */ +- F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +-/* VECTOR MINIMUM LOGICAL */ +- F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD LOW */ +- F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD HIGH */ +- F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD LOGICAL HIGH */ +- F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD EVEN */ +- F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD LOGICAL EVEN */ +- F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD ODD */ +- F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY AND ADD LOGICAL ODD */ +- F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) +-/* VECTOR MULTIPLY HIGH */ +- F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY LOGICAL HIGH */ +- F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY LOW */ +- F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY EVEN */ +- F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY LOGICAL EVEN */ +- F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY ODD */ +- F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY LOGICAL ODD */ +- F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) +-/* VECTOR MULTIPLY SUM LOGICAL */ +- F(0xe7b8, VMSL, VRR_d, VE, 0, 0, 0, 0, vmsl, 0, IF_VEC) +-/* VECTOR NAND */ +- F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) +-/* VECTOR NOR */ +- F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC) +-/* VECTOR NOT EXCLUSIVE OR */ +- F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC) +-/* VECTOR OR */ +- F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC) +-/* VECTOR OR WITH COMPLEMENT */ +- F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC) +-/* VECTOR POPULATION COUNT */ +- F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC) +-/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ +- F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) +- F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +-/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ +- F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC) +-/* VECTOR ELEMENT SHIFT LEFT */ +- F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) +- F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +-/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ +- F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) +- F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +-/* VECTOR ELEMENT SHIFT RIGHT LOGICAL */ +- F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) +- F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) +-/* VECTOR SHIFT LEFT */ +- F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +-/* VECTOR SHIFT LEFT BY BYTE */ +- F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) +-/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ +- F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) +-/* VECTOR SHIFT RIGHT ARITHMETIC */ +- F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +-/* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ +- F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) +-/* VECTOR SHIFT RIGHT LOGICAL */ +- F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +-/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ +- F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) +-/* VECTOR SUBTRACT */ +- F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) +-/* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ +- F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC) +-/* VECTOR SUBTRACT WITH BORROW INDICATION */ +- F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC) +-/* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */ +- F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC) +-/* VECTOR SUM ACROSS DOUBLEWORD */ +- F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC) +-/* VECTOR SUM ACROSS QUADWORD */ +- F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC) +-/* VECTOR SUM ACROSS WORD */ +- F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC) +-/* VECTOR TEST UNDER MASK */ +- F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC) +- +-/* === Vector String Instructions === */ +- +-/* VECTOR FIND ANY ELEMENT EQUAL */ +- F(0xe782, VFAE, VRR_b, V, 0, 0, 0, 0, vfae, 0, IF_VEC) +-/* VECTOR FIND ELEMENT EQUAL */ +- F(0xe780, VFEE, VRR_b, V, 0, 0, 0, 0, vfee, 0, IF_VEC) +-/* VECTOR FIND ELEMENT NOT EQUAL */ +- F(0xe781, VFENE, VRR_b, V, 0, 0, 0, 0, vfene, 0, IF_VEC) +-/* VECTOR ISOLATE STRING */ +- F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC) +-/* VECTOR STRING RANGE COMPARE */ +- F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC) +- +-/* === Vector Floating-Point Instructions */ +- +-/* VECTOR FP ADD */ +- F(0xe7e3, VFA, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +-/* VECTOR FP COMPARE SCALAR */ +- F(0xe7cb, WFC, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) +-/* VECTOR FP COMPARE AND SIGNAL SCALAR */ +- F(0xe7ca, WFK, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) +-/* VECTOR FP COMPARE EQUAL */ +- F(0xe7e8, VFCE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +-/* VECTOR FP COMPARE HIGH */ +- F(0xe7eb, VFCH, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +-/* VECTOR FP COMPARE HIGH OR EQUAL */ +- F(0xe7ea, VFCHE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) +-/* VECTOR FP CONVERT FROM FIXED 64-BIT */ +- F(0xe7c3, VCDG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP CONVERT FROM LOGICAL 64-BIT */ +- F(0xe7c1, VCDLG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP CONVERT TO FIXED 64-BIT */ +- F(0xe7c2, VCGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP CONVERT TO LOGICAL 64-BIT */ +- F(0xe7c0, VCLGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP DIVIDE */ +- F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +-/* VECTOR LOAD FP INTEGER */ +- F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP LOAD LENGTHENED */ +- F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC) +-/* VECTOR FP LOAD ROUNDED */ +- F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) +-/* VECTOR FP MAXIMUM */ +- F(0xe7ef, VFMAX, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) +-/* VECTOR FP MINIMUM */ +- F(0xe7ee, VFMIN, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) +-/* VECTOR FP MULTIPLY */ +- F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +-/* VECTOR FP MULTIPLY AND ADD */ +- F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) +-/* VECTOR FP MULTIPLY AND SUBTRACT */ +- F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) +-/* VECTOR FP NEGATIVE MULTIPLY AND ADD */ +- F(0xe79f, VFNMA, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) +-/* VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */ +- F(0xe79e, VFNMS, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) +-/* VECTOR FP PERFORM SIGN OPERATION */ +- F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC) +-/* VECTOR FP SQUARE ROOT */ +- F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC) +-/* VECTOR FP SUBTRACT */ +- F(0xe7e2, VFS, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) +-/* VECTOR FP TEST DATA CLASS IMMEDIATE */ +- F(0xe74a, VFTCI, VRI_e, V, 0, 0, 0, 0, vftci, 0, IF_VEC) +- +-#ifndef CONFIG_USER_ONLY +-/* COMPARE AND SWAP AND PURGE */ +- E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) +- E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) +-/* DIAGNOSE (KVM hypercall) */ +- F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO) +-/* INSERT STORAGE KEY EXTENDED */ +- F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV) +-/* INVALIDATE DAT TABLE ENTRY */ +- F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV) +-/* INVALIDATE PAGE TABLE ENTRY */ +- F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV) +-/* LOAD CONTROL */ +- F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV) +- F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV) +-/* LOAD PROGRAM PARAMETER */ +- F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV) +-/* LOAD PSW */ +- F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV) +-/* LOAD PSW EXTENDED */ +- F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV) +-/* LOAD REAL ADDRESS */ +- F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) +- F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV) +- F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) +-/* LOAD USING REAL ADDRESS */ +- E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) +- E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV) +-/* MOVE TO PRIMARY */ +- F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) +-/* MOVE TO SECONDARY */ +- F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) +-/* PURGE TLB */ +- F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) +-/* RESET REFERENCE BIT EXTENDED */ +- F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV) +-/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ +- F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV | IF_IO) +-/* SET ADDRESS SPACE CONTROL FAST */ +- F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) +-/* SET CLOCK */ +- F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV | IF_IO) +-/* SET CLOCK COMPARATOR */ +- F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV | IF_IO) +-/* SET CLOCK PROGRAMMABLE FIELD */ +- F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV) +-/* SET CPU TIMER */ +- F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV | IF_IO) +-/* SET PREFIX */ +- F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV) +-/* SET PSW KEY FROM ADDRESS */ +- F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV) +-/* SET STORAGE KEY EXTENDED */ +- F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV) +-/* SET SYSTEM MASK */ +- F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV) +-/* SIGNAL PROCESSOR */ +- F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV | IF_IO) +-/* STORE CLOCK COMPARATOR */ +- F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV) +-/* STORE CONTROL */ +- F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV) +- F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV) +-/* STORE CPU ADDRESS */ +- F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV) +-/* STORE CPU ID */ +- F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV) +-/* STORE CPU TIMER */ +- F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV | IF_IO) +-/* STORE FACILITY LIST */ +- F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV) +-/* STORE PREFIX */ +- F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV) +-/* STORE SYSTEM INFORMATION */ +- F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV) +-/* STORE THEN AND SYSTEM MASK */ +- F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) +-/* STORE THEN OR SYSTEM MASK */ +- F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) +-/* STORE USING REAL ADDRESS */ +- E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV) +- E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV) +-/* TEST BLOCK */ +- F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) +-/* TEST PROTECTION */ +- C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0) +- +-/* CCW I/O Instructions */ +- F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV | IF_IO) +- F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV | IF_IO) +- F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV | IF_IO) +- F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV | IF_IO) +- F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV | IF_IO) +- F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV | IF_IO) +- F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV | IF_IO) +- F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV | IF_IO) +- F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV | IF_IO) +- F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV | IF_IO) +- F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV | IF_IO) +- F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV | IF_IO) +- F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV | IF_IO) +- F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV | IF_IO) +- F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV | IF_IO) +- /* ??? Not listed in PoO ninth edition, but there's a linux driver that +- uses it: "A CHSC subchannel is usually present on LPAR only." */ +- F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV | IF_IO) +- +-/* zPCI Instructions */ +- /* None of these instructions are documented in the PoP, so this is all +- based upon target/s390x/kvm.c and Linux code and likely incomplete */ +- F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV | IF_IO) +- F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV | IF_IO) +- F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV | IF_IO) +- F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV | IF_IO) +- F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV | IF_IO) +- F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV | IF_IO) +- F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV | IF_IO) +- F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV | IF_IO) +- +-#endif /* CONFIG_USER_ONLY */ +diff --git a/target/s390x/insn-format.def b/target/s390x/insn-format.def +deleted file mode 100644 +index 6253edbd19..0000000000 +--- a/target/s390x/insn-format.def ++++ /dev/null +@@ -1,81 +0,0 @@ +-/* Description of s390 insn formats. */ +-/* NAME F1, F2... */ +-F0(E) +-F1(I, I(1, 8, 8)) +-F2(RI_a, R(1, 8), I(2,16,16)) +-F2(RI_b, R(1, 8), I(2,16,16)) +-F2(RI_c, M(1, 8), I(2,16,16)) +-F3(RIE_a, R(1, 8), I(2,16,16), M(3,32)) +-F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16)) +-F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16)) +-F3(RIE_d, R(1, 8), I(2,16,16), R(3,12)) +-F3(RIE_e, R(1, 8), I(2,16,16), R(3,12)) +-F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8)) +-F3(RIE_g, R(1, 8), I(2,16,16), M(3,12)) +-F2(RIL_a, R(1, 8), I(2,16,32)) +-F2(RIL_b, R(1, 8), I(2,16,32)) +-F2(RIL_c, M(1, 8), I(2,16,32)) +-F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20)) +-/* ??? The PoO does not call out subtypes _a and _b for RR, as it does +- for e.g. RX. Our checking requires this for e.g. BCR. */ +-F2(RR_a, R(1, 8), R(2,12)) +-F2(RR_b, M(1, 8), R(2,12)) +-F2(RRE, R(1,24), R(2,28)) +-F3(RRD, R(1,16), R(2,28), R(3,24)) +-F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20)) +-F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20)) +-F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20)) +-F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20)) +-F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20)) +-F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20)) +-F3(RS_a, R(1, 8), BD(2,16,20), R(3,12)) +-F3(RS_b, R(1, 8), BD(2,16,20), M(3,12)) +-F3(RSI, R(1, 8), I(2,16,16), R(3,12)) +-F2(RSL, L(1, 8, 4), BD(1,16,20)) +-F3(RSY_a, R(1, 8), BDL(2), R(3,12)) +-F3(RSY_b, R(1, 8), BDL(2), M(3,12)) +-F2(RX_a, R(1, 8), BXD(2)) +-F2(RX_b, M(1, 8), BXD(2)) +-F3(RXE, R(1, 8), BXD(2), M(3,32)) +-F3(RXF, R(1,32), BXD(2), R(3, 8)) +-F2(RXY_a, R(1, 8), BXDL(2)) +-F2(RXY_b, M(1, 8), BXDL(2)) +-F1(S, BD(2,16,20)) +-F2(SI, BD(1,16,20), I(2,8,8)) +-F2(SIL, BD(1,16,20), I(2,32,16)) +-F2(SIY, BDL(1), I(2, 8, 8)) +-F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36)) +-F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36)) +-F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4)) +-/* ??? Odd man out. The L1 field here is really a register, but the +- easy way to compress the fields has R1 and B1 overlap. */ +-F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12)) +-F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36)) +-F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36)) +-F2(SSE, BD(1,16,20), BD(2,32,36)) +-F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8)) +-F3(VRI_a, V(1,8), I(2,16,16), M(3,32)) +-F4(VRI_b, V(1,8), I(2,16,8), I(3,24,8), M(4,32)) +-F4(VRI_c, V(1,8), V(3,12), I(2,16,16), M(4,32)) +-F5(VRI_d, V(1,8), V(2,12), V(3,16), I(4,24,8), M(5,32)) +-F5(VRI_e, V(1,8), V(2,12), I(3,16,12), M(5,28), M(4,32)) +-F5(VRI_f, V(1,8), V(2,12), V(3,16), M(5,24), I(4,28,8)) +-F5(VRI_g, V(1,8), V(2,12), I(4,16,8), M(5,24), I(3,28,8)) +-F3(VRI_h, V(1,8), I(2,16,16), I(3,32,4)) +-F4(VRI_i, V(1,8), R(2,12), M(4,24), I(3,28,8)) +-F5(VRR_a, V(1,8), V(2,12), M(5,24), M(4,28), M(3,32)) +-F5(VRR_b, V(1,8), V(2,12), V(3,16), M(5,24), M(4,32)) +-F6(VRR_c, V(1,8), V(2,12), V(3,16), M(6,24), M(5,28), M(4,32)) +-F6(VRR_d, V(1,8), V(2,12), V(3,16), M(5,20), M(6,24), V(4,32)) +-F6(VRR_e, V(1,8), V(2,12), V(3,16), M(6,20), M(5,28), V(4,32)) +-F3(VRR_f, V(1,8), R(2,12), R(3,16)) +-F1(VRR_g, V(1,12)) +-F3(VRR_h, V(1,12), V(2,16), M(3,24)) +-F3(VRR_i, R(1,8), V(2,12), M(3,24)) +-F4(VRS_a, V(1,8), V(3,12), BD(2,16,20), M(4,32)) +-F4(VRS_b, V(1,8), R(3,12), BD(2,16,20), M(4,32)) +-F4(VRS_c, R(1,8), V(3,12), BD(2,16,20), M(4,32)) +-F3(VRS_d, R(3,12), BD(2,16,20), V(1,32)) +-F4(VRV, V(1,8), V(2,12), BD(2,16,20), M(3,32)) +-F3(VRX, V(1,8), BXD(2), M(3,32)) +-F3(VSI, I(3,8,8), BD(2,16,20), V(1,32)) +diff --git a/target/s390x/int_helper.c b/target/s390x/int_helper.c +deleted file mode 100644 +index 658507dd6d..0000000000 +--- a/target/s390x/int_helper.c ++++ /dev/null +@@ -1,148 +0,0 @@ +-/* +- * S/390 integer helper routines +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2009 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "tcg_s390x.h" +-#include "exec/exec-all.h" +-#include "qemu/host-utils.h" +-#include "exec/helper-proto.h" +- +-/* #define DEBUG_HELPER */ +-#ifdef DEBUG_HELPER +-#define HELPER_LOG(x...) qemu_log(x) +-#else +-#define HELPER_LOG(x...) +-#endif +- +-/* 64/32 -> 32 signed division */ +-int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64) +-{ +- int32_t ret, b = b64; +- int64_t q; +- +- if (b == 0) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- +- ret = q = a / b; +- env->retxl = a % b; +- +- /* Catch non-representable quotient. */ +- if (ret != q) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- +- return ret; +-} +- +-/* 64/32 -> 32 unsigned division */ +-uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64) +-{ +- uint32_t ret, b = b64; +- uint64_t q; +- +- if (b == 0) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- +- ret = q = a / b; +- env->retxl = a % b; +- +- /* Catch non-representable quotient. */ +- if (ret != q) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- +- return ret; +-} +- +-/* 64/64 -> 64 signed division */ +-int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b) +-{ +- /* Catch divide by zero, and non-representable quotient (MIN / -1). */ +- if (b == 0 || (b == -1 && a == (1ll << 63))) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- env->retxl = a % b; +- return a / b; +-} +- +-/* 128 -> 64/64 unsigned division */ +-uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, +- uint64_t b) +-{ +- uint64_t ret; +- /* Signal divide by zero. */ +- if (b == 0) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +- if (ah == 0) { +- /* 64 -> 64/64 case */ +- env->retxl = al % b; +- ret = al / b; +- } else { +- /* ??? Move i386 idivq helper to host-utils. */ +-#ifdef CONFIG_INT128 +- __uint128_t a = ((__uint128_t)ah << 64) | al; +- __uint128_t q = a / b; +- env->retxl = a % b; +- ret = q; +- if (ret != q) { +- tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); +- } +-#else +- /* 32-bit hosts would need special wrapper functionality - just abort if +- we encounter such a case; it's very unlikely anyways. */ +- cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n"); +-#endif +- } +- return ret; +-} +- +-uint64_t HELPER(cvd)(int32_t reg) +-{ +- /* positive 0 */ +- uint64_t dec = 0x0c; +- int64_t bin = reg; +- int shift; +- +- if (bin < 0) { +- bin = -bin; +- dec = 0x0d; +- } +- +- for (shift = 4; (shift < 64) && bin; shift += 4) { +- dec |= (bin % 10) << shift; +- bin /= 10; +- } +- +- return dec; +-} +- +-uint64_t HELPER(popcnt)(uint64_t val) +-{ +- /* Note that we don't fold past bytes. */ +- val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); +- val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); +- val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL; +- return val; +-} +diff --git a/target/s390x/internal.h b/target/s390x/internal.h +deleted file mode 100644 +index 9256275376..0000000000 +--- a/target/s390x/internal.h ++++ /dev/null +@@ -1,388 +0,0 @@ +-/* +- * s390x internal definitions and helpers +- * +- * Copyright (c) 2009 Ulrich Hecht +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#ifndef S390X_INTERNAL_H +-#define S390X_INTERNAL_H +- +-#include "cpu.h" +- +-#ifndef CONFIG_USER_ONLY +-typedef struct LowCore { +- /* prefix area: defined by architecture */ +- uint32_t ccw1[2]; /* 0x000 */ +- uint32_t ccw2[4]; /* 0x008 */ +- uint8_t pad1[0x80 - 0x18]; /* 0x018 */ +- uint32_t ext_params; /* 0x080 */ +- uint16_t cpu_addr; /* 0x084 */ +- uint16_t ext_int_code; /* 0x086 */ +- uint16_t svc_ilen; /* 0x088 */ +- uint16_t svc_code; /* 0x08a */ +- uint16_t pgm_ilen; /* 0x08c */ +- uint16_t pgm_code; /* 0x08e */ +- uint32_t data_exc_code; /* 0x090 */ +- uint16_t mon_class_num; /* 0x094 */ +- uint16_t per_perc_atmid; /* 0x096 */ +- uint64_t per_address; /* 0x098 */ +- uint8_t exc_access_id; /* 0x0a0 */ +- uint8_t per_access_id; /* 0x0a1 */ +- uint8_t op_access_id; /* 0x0a2 */ +- uint8_t ar_access_id; /* 0x0a3 */ +- uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */ +- uint64_t trans_exc_code; /* 0x0a8 */ +- uint64_t monitor_code; /* 0x0b0 */ +- uint16_t subchannel_id; /* 0x0b8 */ +- uint16_t subchannel_nr; /* 0x0ba */ +- uint32_t io_int_parm; /* 0x0bc */ +- uint32_t io_int_word; /* 0x0c0 */ +- uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */ +- uint32_t stfl_fac_list; /* 0x0c8 */ +- uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */ +- uint64_t mcic; /* 0x0e8 */ +- uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */ +- uint32_t external_damage_code; /* 0x0f4 */ +- uint64_t failing_storage_address; /* 0x0f8 */ +- uint8_t pad6[0x110 - 0x100]; /* 0x100 */ +- uint64_t per_breaking_event_addr; /* 0x110 */ +- uint8_t pad7[0x120 - 0x118]; /* 0x118 */ +- PSW restart_old_psw; /* 0x120 */ +- PSW external_old_psw; /* 0x130 */ +- PSW svc_old_psw; /* 0x140 */ +- PSW program_old_psw; /* 0x150 */ +- PSW mcck_old_psw; /* 0x160 */ +- PSW io_old_psw; /* 0x170 */ +- uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */ +- PSW restart_new_psw; /* 0x1a0 */ +- PSW external_new_psw; /* 0x1b0 */ +- PSW svc_new_psw; /* 0x1c0 */ +- PSW program_new_psw; /* 0x1d0 */ +- PSW mcck_new_psw; /* 0x1e0 */ +- PSW io_new_psw; /* 0x1f0 */ +- uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */ +- +- uint64_t mcesad; /* 0x11B0 */ +- +- /* 64 bit extparam used for pfault, diag 250 etc */ +- uint64_t ext_params2; /* 0x11B8 */ +- +- uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */ +- +- /* System info area */ +- +- uint64_t floating_pt_save_area[16]; /* 0x1200 */ +- uint64_t gpregs_save_area[16]; /* 0x1280 */ +- uint32_t st_status_fixed_logout[4]; /* 0x1300 */ +- uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */ +- uint32_t prefixreg_save_area; /* 0x1318 */ +- uint32_t fpt_creg_save_area; /* 0x131c */ +- uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */ +- uint32_t tod_progreg_save_area; /* 0x1324 */ +- uint64_t cpu_timer_save_area; /* 0x1328 */ +- uint64_t clock_comp_save_area; /* 0x1330 */ +- uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */ +- uint32_t access_regs_save_area[16]; /* 0x1340 */ +- uint64_t cregs_save_area[16]; /* 0x1380 */ +- +- /* align to the top of the prefix area */ +- +- uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */ +-} QEMU_PACKED LowCore; +-QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192); +-#endif /* CONFIG_USER_ONLY */ +- +-#define MAX_ILEN 6 +- +-/* While the PoO talks about ILC (a number between 1-3) what is actually +- stored in LowCore is shifted left one bit (an even between 2-6). As +- this is the actual length of the insn and therefore more useful, that +- is what we want to pass around and manipulate. To make sure that we +- have applied this distinction universally, rename the "ILC" to "ILEN". */ +-static inline int get_ilen(uint8_t opc) +-{ +- switch (opc >> 6) { +- case 0: +- return 2; +- case 1: +- case 2: +- return 4; +- default: +- return 6; +- } +-} +- +-/* Compute the ATMID field that is stored in the per_perc_atmid lowcore +- entry when a PER exception is triggered. */ +-static inline uint8_t get_per_atmid(CPUS390XState *env) +-{ +- return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) | +- (1 << 6) | +- ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) | +- ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) | +- ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) | +- ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0); +-} +- +-static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a) +-{ +- if (!(env->psw.mask & PSW_MASK_64)) { +- if (!(env->psw.mask & PSW_MASK_32)) { +- /* 24-Bit mode */ +- a &= 0x00ffffff; +- } else { +- /* 31-Bit mode */ +- a &= 0x7fffffff; +- } +- } +- return a; +-} +- +-/* CC optimization */ +- +-/* Instead of computing the condition codes after each x86 instruction, +- * QEMU just stores the result (called CC_DST), the type of operation +- * (called CC_OP) and whatever operands are needed (CC_SRC and possibly +- * CC_VR). When the condition codes are needed, the condition codes can +- * be calculated using this information. Condition codes are not generated +- * if they are only needed for conditional branches. +- */ +-enum cc_op { +- CC_OP_CONST0 = 0, /* CC is 0 */ +- CC_OP_CONST1, /* CC is 1 */ +- CC_OP_CONST2, /* CC is 2 */ +- CC_OP_CONST3, /* CC is 3 */ +- +- CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */ +- CC_OP_STATIC, /* CC value is env->cc_op */ +- +- CC_OP_NZ, /* env->cc_dst != 0 */ +- CC_OP_ADDU, /* dst != 0, src = carry out (0,1) */ +- CC_OP_SUBU, /* dst != 0, src = borrow out (0,-1) */ +- +- CC_OP_LTGT_32, /* signed less/greater than (32bit) */ +- CC_OP_LTGT_64, /* signed less/greater than (64bit) */ +- CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */ +- CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */ +- CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */ +- CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */ +- +- CC_OP_ADD_64, /* overflow on add (64bit) */ +- CC_OP_SUB_64, /* overflow on subtraction (64bit) */ +- CC_OP_ABS_64, /* sign eval on abs (64bit) */ +- CC_OP_NABS_64, /* sign eval on nabs (64bit) */ +- CC_OP_MULS_64, /* overflow on signed multiply (64bit) */ +- +- CC_OP_ADD_32, /* overflow on add (32bit) */ +- CC_OP_SUB_32, /* overflow on subtraction (32bit) */ +- CC_OP_ABS_32, /* sign eval on abs (64bit) */ +- CC_OP_NABS_32, /* sign eval on nabs (64bit) */ +- CC_OP_MULS_32, /* overflow on signed multiply (32bit) */ +- +- CC_OP_COMP_32, /* complement */ +- CC_OP_COMP_64, /* complement */ +- +- CC_OP_TM_32, /* test under mask (32bit) */ +- CC_OP_TM_64, /* test under mask (64bit) */ +- +- CC_OP_NZ_F32, /* FP dst != 0 (32bit) */ +- CC_OP_NZ_F64, /* FP dst != 0 (64bit) */ +- CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ +- +- CC_OP_ICM, /* insert characters under mask */ +- CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ +- CC_OP_SLA_64, /* Calculate shift left signed (64bit) */ +- CC_OP_FLOGR, /* find leftmost one */ +- CC_OP_LCBB, /* load count to block boundary */ +- CC_OP_VC, /* vector compare result */ +- CC_OP_MAX +-}; +- +-#ifndef CONFIG_USER_ONLY +- +-static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, +- uint8_t *ar) +-{ +- hwaddr addr = 0; +- uint8_t reg; +- +- reg = ipb >> 28; +- if (reg > 0) { +- addr = env->regs[reg]; +- } +- addr += (ipb >> 16) & 0xfff; +- if (ar) { +- *ar = reg; +- } +- +- return addr; +-} +- +-/* Base/displacement are at the same locations. */ +-#define decode_basedisp_rs decode_basedisp_s +- +-#endif /* CONFIG_USER_ONLY */ +- +-/* arch_dump.c */ +-int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, +- int cpuid, void *opaque); +- +- +-/* cc_helper.c */ +-const char *cc_name(enum cc_op cc_op); +-uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, +- uint64_t vr); +- +-/* cpu.c */ +-#ifndef CONFIG_USER_ONLY +-unsigned int s390_cpu_halt(S390CPU *cpu); +-void s390_cpu_unhalt(S390CPU *cpu); +-#else +-static inline unsigned int s390_cpu_halt(S390CPU *cpu) +-{ +- return 0; +-} +- +-static inline void s390_cpu_unhalt(S390CPU *cpu) +-{ +-} +-#endif /* CONFIG_USER_ONLY */ +- +- +-/* cpu_models.c */ +-void s390_cpu_model_class_register_props(ObjectClass *oc); +-void s390_realize_cpu_model(CPUState *cs, Error **errp); +-ObjectClass *s390_cpu_class_by_name(const char *name); +- +- +-/* excp_helper.c */ +-void s390x_cpu_debug_excp_handler(CPUState *cs); +-void s390_cpu_do_interrupt(CPUState *cpu); +-bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); +-bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, +- MMUAccessType access_type, int mmu_idx, +- bool probe, uintptr_t retaddr); +-void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, +- MMUAccessType access_type, +- int mmu_idx, uintptr_t retaddr); +- +- +-/* fpu_helper.c */ +-uint32_t set_cc_nz_f32(float32 v); +-uint32_t set_cc_nz_f64(float64 v); +-uint32_t set_cc_nz_f128(float128 v); +-#define S390_IEEE_MASK_INVALID 0x80 +-#define S390_IEEE_MASK_DIVBYZERO 0x40 +-#define S390_IEEE_MASK_OVERFLOW 0x20 +-#define S390_IEEE_MASK_UNDERFLOW 0x10 +-#define S390_IEEE_MASK_INEXACT 0x08 +-#define S390_IEEE_MASK_QUANTUM 0x04 +-uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); +-int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); +-void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); +-int float_comp_to_cc(CPUS390XState *env, int float_compare); +- +-#define DCMASK_ZERO 0x0c00 +-#define DCMASK_NORMAL 0x0300 +-#define DCMASK_SUBNORMAL 0x00c0 +-#define DCMASK_INFINITY 0x0030 +-#define DCMASK_QUIET_NAN 0x000c +-#define DCMASK_SIGNALING_NAN 0x0003 +-#define DCMASK_NAN 0x000f +-#define DCMASK_NEGATIVE 0x0555 +-uint16_t float32_dcmask(CPUS390XState *env, float32 f1); +-uint16_t float64_dcmask(CPUS390XState *env, float64 f1); +-uint16_t float128_dcmask(CPUS390XState *env, float128 f1); +- +- +-/* gdbstub.c */ +-int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); +-int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +-void s390_cpu_gdb_init(CPUState *cs); +- +- +-/* helper.c */ +-void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags); +-void do_restart_interrupt(CPUS390XState *env); +-#ifndef CONFIG_USER_ONLY +-void s390_cpu_recompute_watchpoints(CPUState *cs); +-void s390x_tod_timer(void *opaque); +-void s390x_cpu_timer(void *opaque); +-void s390_handle_wait(S390CPU *cpu); +-hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +-hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); +-#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) +-int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch); +-int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len); +-LowCore *cpu_map_lowcore(CPUS390XState *env); +-void cpu_unmap_lowcore(LowCore *lowcore); +-#endif /* CONFIG_USER_ONLY */ +- +- +-/* interrupt.c */ +-void trigger_pgm_exception(CPUS390XState *env, uint32_t code); +-void cpu_inject_clock_comparator(S390CPU *cpu); +-void cpu_inject_cpu_timer(S390CPU *cpu); +-void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr); +-int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr); +-bool s390_cpu_has_io_int(S390CPU *cpu); +-bool s390_cpu_has_ext_int(S390CPU *cpu); +-bool s390_cpu_has_mcck_int(S390CPU *cpu); +-bool s390_cpu_has_int(S390CPU *cpu); +-bool s390_cpu_has_restart_int(S390CPU *cpu); +-bool s390_cpu_has_stop_int(S390CPU *cpu); +-void cpu_inject_restart(S390CPU *cpu); +-void cpu_inject_stop(S390CPU *cpu); +- +- +-/* ioinst.c */ +-void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +-void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +-void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +-void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, +- uintptr_t ra); +-void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, +- uintptr_t ra); +-void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra); +-void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, +- uintptr_t ra); +-int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); +-void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra); +-void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, +- uint32_t ipb, uintptr_t ra); +-void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +-void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +-void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra); +- +- +-/* mem_helper.c */ +-target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr); +-void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, +- uintptr_t ra); +- +- +-/* mmu_helper.c */ +-int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, +- target_ulong *raddr, int *flags, uint64_t *tec); +-int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, +- target_ulong *addr, int *flags, uint64_t *tec); +- +- +-/* misc_helper.c */ +-int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3); +-void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, +- uintptr_t ra); +- +- +-/* translate.c */ +-void s390x_translate_init(void); +- +- +-/* sigp.c */ +-int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); +-void do_stop_interrupt(CPUS390XState *env); +- +-#endif /* S390X_INTERNAL_H */ +diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c +index 9b4d08f2be..5195f060ec 100644 +--- a/target/s390x/interrupt.c ++++ b/target/s390x/interrupt.c +@@ -9,13 +9,13 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "kvm_s390x.h" +-#include "internal.h" ++#include "kvm/kvm_s390x.h" ++#include "s390x-internal.h" + #include "exec/exec-all.h" + #include "sysemu/kvm.h" + #include "sysemu/tcg.h" + #include "hw/s390x/ioinst.h" +-#include "tcg_s390x.h" ++#include "tcg/tcg_s390x.h" + #if !defined(CONFIG_USER_ONLY) + #include "hw/s390x/s390_flic.h" + #endif +diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c +index 1ee11522e1..4eb0a7a9f8 100644 +--- a/target/s390x/ioinst.c ++++ b/target/s390x/ioinst.c +@@ -12,7 +12,7 @@ + #include "qemu/osdep.h" + + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "hw/s390x/ioinst.h" + #include "trace.h" + #include "hw/s390x/s390-pci-bus.h" +diff --git a/target/s390x/kvm-stub.c b/target/s390x/kvm-stub.c +deleted file mode 100644 +index 8a308cfebb..0000000000 +--- a/target/s390x/kvm-stub.c ++++ /dev/null +@@ -1,121 +0,0 @@ +-/* +- * QEMU KVM support -- s390x specific function stubs. +- * +- * Copyright (c) 2009 Ulrich Hecht +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "kvm_s390x.h" +- +-void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) +-{ +-} +- +-int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, +- int len, bool is_write) +-{ +- return -ENOSYS; +-} +- +-void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) +-{ +-} +- +-int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) +-{ +- return -ENOSYS; +-} +- +-void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) +-{ +-} +- +-int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) +-{ +- return 0; +-} +- +-int kvm_s390_get_hpage_1m(void) +-{ +- return 0; +-} +- +-int kvm_s390_get_ri(void) +-{ +- return 0; +-} +- +-int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) +-{ +- return -ENOSYS; +-} +- +-int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) +-{ +- return -ENOSYS; +-} +- +-int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) +-{ +- return -ENOSYS; +-} +- +-int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) +-{ +- return -ENOSYS; +-} +- +-void kvm_s390_enable_css_support(S390CPU *cpu) +-{ +-} +- +-int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, +- int vq, bool assign) +-{ +- return -ENOSYS; +-} +- +-void kvm_s390_cmma_reset(void) +-{ +-} +- +-void kvm_s390_reset_vcpu_initial(S390CPU *cpu) +-{ +-} +- +-void kvm_s390_reset_vcpu_clear(S390CPU *cpu) +-{ +-} +- +-void kvm_s390_reset_vcpu_normal(S390CPU *cpu) +-{ +-} +- +-int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) +-{ +- return 0; +-} +- +-void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) +-{ +-} +- +-void kvm_s390_crypto_reset(void) +-{ +-} +- +-void kvm_s390_stop_interrupt(S390CPU *cpu) +-{ +-} +- +-void kvm_s390_restart_interrupt(S390CPU *cpu) +-{ +-} +- +-void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) +-{ +-} +diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c +deleted file mode 100644 +index 2388924587..0000000000 +--- a/target/s390x/kvm.c ++++ /dev/null +@@ -1,2564 +0,0 @@ +-/* +- * QEMU S390x KVM implementation +- * +- * Copyright (c) 2009 Alexander Graf +- * Copyright IBM Corp. 2012 +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License as published by +- * the Free Software Foundation; either version 2 of the License, or +- * (at your option) any later version. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. +- * +- * You should have received a copy of the GNU General Public License +- * along with this program; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include +- +-#include +-#include +- +-#include "qemu-common.h" +-#include "cpu.h" +-#include "internal.h" +-#include "kvm_s390x.h" +-#include "sysemu/kvm_int.h" +-#include "qemu/cutils.h" +-#include "qapi/error.h" +-#include "qemu/error-report.h" +-#include "qemu/timer.h" +-#include "qemu/units.h" +-#include "qemu/main-loop.h" +-#include "qemu/mmap-alloc.h" +-#include "qemu/log.h" +-#include "sysemu/sysemu.h" +-#include "sysemu/hw_accel.h" +-#include "sysemu/runstate.h" +-#include "sysemu/device_tree.h" +-#include "exec/gdbstub.h" +-#include "exec/ram_addr.h" +-#include "trace.h" +-#include "hw/s390x/s390-pci-inst.h" +-#include "hw/s390x/s390-pci-bus.h" +-#include "hw/s390x/ipl.h" +-#include "hw/s390x/ebcdic.h" +-#include "exec/memattrs.h" +-#include "hw/s390x/s390-virtio-ccw.h" +-#include "hw/s390x/s390-virtio-hcall.h" +-#include "hw/s390x/pv.h" +- +-#ifndef DEBUG_KVM +-#define DEBUG_KVM 0 +-#endif +- +-#define DPRINTF(fmt, ...) do { \ +- if (DEBUG_KVM) { \ +- fprintf(stderr, fmt, ## __VA_ARGS__); \ +- } \ +-} while (0) +- +-#define kvm_vm_check_mem_attr(s, attr) \ +- kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) +- +-#define IPA0_DIAG 0x8300 +-#define IPA0_SIGP 0xae00 +-#define IPA0_B2 0xb200 +-#define IPA0_B9 0xb900 +-#define IPA0_EB 0xeb00 +-#define IPA0_E3 0xe300 +- +-#define PRIV_B2_SCLP_CALL 0x20 +-#define PRIV_B2_CSCH 0x30 +-#define PRIV_B2_HSCH 0x31 +-#define PRIV_B2_MSCH 0x32 +-#define PRIV_B2_SSCH 0x33 +-#define PRIV_B2_STSCH 0x34 +-#define PRIV_B2_TSCH 0x35 +-#define PRIV_B2_TPI 0x36 +-#define PRIV_B2_SAL 0x37 +-#define PRIV_B2_RSCH 0x38 +-#define PRIV_B2_STCRW 0x39 +-#define PRIV_B2_STCPS 0x3a +-#define PRIV_B2_RCHP 0x3b +-#define PRIV_B2_SCHM 0x3c +-#define PRIV_B2_CHSC 0x5f +-#define PRIV_B2_SIGA 0x74 +-#define PRIV_B2_XSCH 0x76 +- +-#define PRIV_EB_SQBS 0x8a +-#define PRIV_EB_PCISTB 0xd0 +-#define PRIV_EB_SIC 0xd1 +- +-#define PRIV_B9_EQBS 0x9c +-#define PRIV_B9_CLP 0xa0 +-#define PRIV_B9_PCISTG 0xd0 +-#define PRIV_B9_PCILG 0xd2 +-#define PRIV_B9_RPCIT 0xd3 +- +-#define PRIV_E3_MPCIFC 0xd0 +-#define PRIV_E3_STPCIFC 0xd4 +- +-#define DIAG_TIMEREVENT 0x288 +-#define DIAG_IPL 0x308 +-#define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 +-#define DIAG_KVM_HYPERCALL 0x500 +-#define DIAG_KVM_BREAKPOINT 0x501 +- +-#define ICPT_INSTRUCTION 0x04 +-#define ICPT_PROGRAM 0x08 +-#define ICPT_EXT_INT 0x14 +-#define ICPT_WAITPSW 0x1c +-#define ICPT_SOFT_INTERCEPT 0x24 +-#define ICPT_CPU_STOP 0x28 +-#define ICPT_OPEREXC 0x2c +-#define ICPT_IO 0x40 +-#define ICPT_PV_INSTR 0x68 +-#define ICPT_PV_INSTR_NOTIFICATION 0x6c +- +-#define NR_LOCAL_IRQS 32 +-/* +- * Needs to be big enough to contain max_cpus emergency signals +- * and in addition NR_LOCAL_IRQS interrupts +- */ +-#define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ +- (max_cpus + NR_LOCAL_IRQS)) +-/* +- * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages +- * as the dirty bitmap must be managed by bitops that take an int as +- * position indicator. This would end at an unaligned address +- * (0x7fffff00000). As future variants might provide larger pages +- * and to make all addresses properly aligned, let us split at 4TB. +- */ +-#define KVM_SLOT_MAX_BYTES (4UL * TiB) +- +-static CPUWatchpoint hw_watchpoint; +-/* +- * We don't use a list because this structure is also used to transmit the +- * hardware breakpoints to the kernel. +- */ +-static struct kvm_hw_breakpoint *hw_breakpoints; +-static int nb_hw_breakpoints; +- +-const KVMCapabilityInfo kvm_arch_required_capabilities[] = { +- KVM_CAP_LAST_INFO +-}; +- +-static int cap_sync_regs; +-static int cap_async_pf; +-static int cap_mem_op; +-static int cap_s390_irq; +-static int cap_ri; +-static int cap_hpage_1m; +-static int cap_vcpu_resets; +-static int cap_protected; +- +-static int active_cmma; +- +-static int kvm_s390_query_mem_limit(uint64_t *memory_limit) +-{ +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_MEM_CTRL, +- .attr = KVM_S390_VM_MEM_LIMIT_SIZE, +- .addr = (uint64_t) memory_limit, +- }; +- +- return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +-} +- +-int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) +-{ +- int rc; +- +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_MEM_CTRL, +- .attr = KVM_S390_VM_MEM_LIMIT_SIZE, +- .addr = (uint64_t) &new_limit, +- }; +- +- if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { +- return 0; +- } +- +- rc = kvm_s390_query_mem_limit(hw_limit); +- if (rc) { +- return rc; +- } else if (*hw_limit < new_limit) { +- return -E2BIG; +- } +- +- return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +-} +- +-int kvm_s390_cmma_active(void) +-{ +- return active_cmma; +-} +- +-static bool kvm_s390_cmma_available(void) +-{ +- static bool initialized, value; +- +- if (!initialized) { +- initialized = true; +- value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && +- kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); +- } +- return value; +-} +- +-void kvm_s390_cmma_reset(void) +-{ +- int rc; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_MEM_CTRL, +- .attr = KVM_S390_VM_MEM_CLR_CMMA, +- }; +- +- if (!kvm_s390_cmma_active()) { +- return; +- } +- +- rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +- trace_kvm_clear_cmma(rc); +-} +- +-static void kvm_s390_enable_cmma(void) +-{ +- int rc; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_MEM_CTRL, +- .attr = KVM_S390_VM_MEM_ENABLE_CMMA, +- }; +- +- if (cap_hpage_1m) { +- warn_report("CMM will not be enabled because it is not " +- "compatible with huge memory backings."); +- return; +- } +- rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +- active_cmma = !rc; +- trace_kvm_enable_cmma(rc); +-} +- +-static void kvm_s390_set_attr(uint64_t attr) +-{ +- struct kvm_device_attr attribute = { +- .group = KVM_S390_VM_CRYPTO, +- .attr = attr, +- }; +- +- int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); +- +- if (ret) { +- error_report("Failed to set crypto device attribute %lu: %s", +- attr, strerror(-ret)); +- } +-} +- +-static void kvm_s390_init_aes_kw(void) +-{ +- uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; +- +- if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", +- NULL)) { +- attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; +- } +- +- if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { +- kvm_s390_set_attr(attr); +- } +-} +- +-static void kvm_s390_init_dea_kw(void) +-{ +- uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; +- +- if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", +- NULL)) { +- attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; +- } +- +- if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { +- kvm_s390_set_attr(attr); +- } +-} +- +-void kvm_s390_crypto_reset(void) +-{ +- if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { +- kvm_s390_init_aes_kw(); +- kvm_s390_init_dea_kw(); +- } +-} +- +-void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) +-{ +- if (pagesize == 4 * KiB) { +- return; +- } +- +- if (!hpage_1m_allowed()) { +- error_setg(errp, "This QEMU machine does not support huge page " +- "mappings"); +- return; +- } +- +- if (pagesize != 1 * MiB) { +- error_setg(errp, "Memory backing with 2G pages was specified, " +- "but KVM does not support this memory backing"); +- return; +- } +- +- if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { +- error_setg(errp, "Memory backing with 1M pages was specified, " +- "but KVM does not support this memory backing"); +- return; +- } +- +- cap_hpage_1m = 1; +-} +- +-int kvm_s390_get_hpage_1m(void) +-{ +- return cap_hpage_1m; +-} +- +-static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) +-{ +- MachineClass *mc = MACHINE_CLASS(oc); +- +- mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); +-} +- +-int kvm_arch_init(MachineState *ms, KVMState *s) +-{ +- object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, +- false, NULL); +- +- if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { +- error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " +- "please use kernel 3.15 or newer"); +- return -1; +- } +- if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { +- error_report("KVM is missing capability KVM_CAP_S390_COW - " +- "unsupported environment"); +- return -1; +- } +- +- cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); +- cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); +- cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); +- cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); +- cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); +- cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); +- +- kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); +- kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); +- kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); +- if (ri_allowed()) { +- if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { +- cap_ri = 1; +- } +- } +- if (cpu_model_allowed()) { +- kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); +- } +- +- /* +- * The migration interface for ais was introduced with kernel 4.13 +- * but the capability itself had been active since 4.12. As migration +- * support is considered necessary, we only try to enable this for +- * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. +- */ +- if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && +- kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { +- kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); +- } +- +- kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); +- return 0; +-} +- +-int kvm_arch_irqchip_create(KVMState *s) +-{ +- return 0; +-} +- +-unsigned long kvm_arch_vcpu_id(CPUState *cpu) +-{ +- return cpu->cpu_index; +-} +- +-int kvm_arch_init_vcpu(CPUState *cs) +-{ +- unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; +- S390CPU *cpu = S390_CPU(cs); +- kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); +- cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); +- return 0; +-} +- +-int kvm_arch_destroy_vcpu(CPUState *cs) +-{ +- S390CPU *cpu = S390_CPU(cs); +- +- g_free(cpu->irqstate); +- cpu->irqstate = NULL; +- +- return 0; +-} +- +-static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) +-{ +- CPUState *cs = CPU(cpu); +- +- /* +- * The reset call is needed here to reset in-kernel vcpu data that +- * we can't access directly from QEMU (i.e. with older kernels +- * which don't support sync_regs/ONE_REG). Before this ioctl +- * cpu_synchronize_state() is called in common kvm code +- * (kvm-all). +- */ +- if (kvm_vcpu_ioctl(cs, type)) { +- error_report("CPU reset failed on CPU %i type %lx", +- cs->cpu_index, type); +- } +-} +- +-void kvm_s390_reset_vcpu_initial(S390CPU *cpu) +-{ +- kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); +-} +- +-void kvm_s390_reset_vcpu_clear(S390CPU *cpu) +-{ +- if (cap_vcpu_resets) { +- kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); +- } else { +- kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); +- } +-} +- +-void kvm_s390_reset_vcpu_normal(S390CPU *cpu) +-{ +- if (cap_vcpu_resets) { +- kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); +- } +-} +- +-static int can_sync_regs(CPUState *cs, int regs) +-{ +- return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; +-} +- +-int kvm_arch_put_registers(CPUState *cs, int level) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- struct kvm_sregs sregs; +- struct kvm_regs regs; +- struct kvm_fpu fpu = {}; +- int r; +- int i; +- +- /* always save the PSW and the GPRS*/ +- cs->kvm_run->psw_addr = env->psw.addr; +- cs->kvm_run->psw_mask = env->psw.mask; +- +- if (can_sync_regs(cs, KVM_SYNC_GPRS)) { +- for (i = 0; i < 16; i++) { +- cs->kvm_run->s.regs.gprs[i] = env->regs[i]; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; +- } +- } else { +- for (i = 0; i < 16; i++) { +- regs.gprs[i] = env->regs[i]; +- } +- r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); +- if (r < 0) { +- return r; +- } +- } +- +- if (can_sync_regs(cs, KVM_SYNC_VRS)) { +- for (i = 0; i < 32; i++) { +- cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; +- cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; +- } +- cs->kvm_run->s.regs.fpc = env->fpc; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; +- } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { +- for (i = 0; i < 16; i++) { +- cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); +- } +- cs->kvm_run->s.regs.fpc = env->fpc; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; +- } else { +- /* Floating point */ +- for (i = 0; i < 16; i++) { +- fpu.fprs[i] = *get_freg(env, i); +- } +- fpu.fpc = env->fpc; +- +- r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); +- if (r < 0) { +- return r; +- } +- } +- +- /* Do we need to save more than that? */ +- if (level == KVM_PUT_RUNTIME_STATE) { +- return 0; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { +- cs->kvm_run->s.regs.cputm = env->cputm; +- cs->kvm_run->s.regs.ckc = env->ckc; +- cs->kvm_run->s.regs.todpr = env->todpr; +- cs->kvm_run->s.regs.gbea = env->gbea; +- cs->kvm_run->s.regs.pp = env->pp; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; +- } else { +- /* +- * These ONE_REGS are not protected by a capability. As they are only +- * necessary for migration we just trace a possible error, but don't +- * return with an error return code. +- */ +- kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); +- kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); +- kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); +- kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); +- kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); +- } +- +- if (can_sync_regs(cs, KVM_SYNC_RICCB)) { +- memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; +- } +- +- /* pfault parameters */ +- if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { +- cs->kvm_run->s.regs.pft = env->pfault_token; +- cs->kvm_run->s.regs.pfs = env->pfault_select; +- cs->kvm_run->s.regs.pfc = env->pfault_compare; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; +- } else if (cap_async_pf) { +- r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); +- if (r < 0) { +- return r; +- } +- r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); +- if (r < 0) { +- return r; +- } +- r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); +- if (r < 0) { +- return r; +- } +- } +- +- /* access registers and control registers*/ +- if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { +- for (i = 0; i < 16; i++) { +- cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; +- cs->kvm_run->s.regs.crs[i] = env->cregs[i]; +- } +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; +- } else { +- for (i = 0; i < 16; i++) { +- sregs.acrs[i] = env->aregs[i]; +- sregs.crs[i] = env->cregs[i]; +- } +- r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); +- if (r < 0) { +- return r; +- } +- } +- +- if (can_sync_regs(cs, KVM_SYNC_GSCB)) { +- memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_BPBC)) { +- cs->kvm_run->s.regs.bpbc = env->bpbc; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { +- cs->kvm_run->s.regs.etoken = env->etoken; +- cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { +- cs->kvm_run->s.regs.diag318 = env->diag318_info; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; +- } +- +- /* Finally the prefix */ +- if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { +- cs->kvm_run->s.regs.prefix = env->psa; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; +- } else { +- /* prefix is only supported via sync regs */ +- } +- return 0; +-} +- +-int kvm_arch_get_registers(CPUState *cs) +-{ +- S390CPU *cpu = S390_CPU(cs); +- CPUS390XState *env = &cpu->env; +- struct kvm_sregs sregs; +- struct kvm_regs regs; +- struct kvm_fpu fpu; +- int i, r; +- +- /* get the PSW */ +- env->psw.addr = cs->kvm_run->psw_addr; +- env->psw.mask = cs->kvm_run->psw_mask; +- +- /* the GPRS */ +- if (can_sync_regs(cs, KVM_SYNC_GPRS)) { +- for (i = 0; i < 16; i++) { +- env->regs[i] = cs->kvm_run->s.regs.gprs[i]; +- } +- } else { +- r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); +- if (r < 0) { +- return r; +- } +- for (i = 0; i < 16; i++) { +- env->regs[i] = regs.gprs[i]; +- } +- } +- +- /* The ACRS and CRS */ +- if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { +- for (i = 0; i < 16; i++) { +- env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; +- env->cregs[i] = cs->kvm_run->s.regs.crs[i]; +- } +- } else { +- r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); +- if (r < 0) { +- return r; +- } +- for (i = 0; i < 16; i++) { +- env->aregs[i] = sregs.acrs[i]; +- env->cregs[i] = sregs.crs[i]; +- } +- } +- +- /* Floating point and vector registers */ +- if (can_sync_regs(cs, KVM_SYNC_VRS)) { +- for (i = 0; i < 32; i++) { +- env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; +- env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; +- } +- env->fpc = cs->kvm_run->s.regs.fpc; +- } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { +- for (i = 0; i < 16; i++) { +- *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; +- } +- env->fpc = cs->kvm_run->s.regs.fpc; +- } else { +- r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); +- if (r < 0) { +- return r; +- } +- for (i = 0; i < 16; i++) { +- *get_freg(env, i) = fpu.fprs[i]; +- } +- env->fpc = fpu.fpc; +- } +- +- /* The prefix */ +- if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { +- env->psa = cs->kvm_run->s.regs.prefix; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { +- env->cputm = cs->kvm_run->s.regs.cputm; +- env->ckc = cs->kvm_run->s.regs.ckc; +- env->todpr = cs->kvm_run->s.regs.todpr; +- env->gbea = cs->kvm_run->s.regs.gbea; +- env->pp = cs->kvm_run->s.regs.pp; +- } else { +- /* +- * These ONE_REGS are not protected by a capability. As they are only +- * necessary for migration we just trace a possible error, but don't +- * return with an error return code. +- */ +- kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); +- kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); +- kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); +- kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); +- kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); +- } +- +- if (can_sync_regs(cs, KVM_SYNC_RICCB)) { +- memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); +- } +- +- if (can_sync_regs(cs, KVM_SYNC_GSCB)) { +- memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); +- } +- +- if (can_sync_regs(cs, KVM_SYNC_BPBC)) { +- env->bpbc = cs->kvm_run->s.regs.bpbc; +- } +- +- if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { +- env->etoken = cs->kvm_run->s.regs.etoken; +- env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; +- } +- +- /* pfault parameters */ +- if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { +- env->pfault_token = cs->kvm_run->s.regs.pft; +- env->pfault_select = cs->kvm_run->s.regs.pfs; +- env->pfault_compare = cs->kvm_run->s.regs.pfc; +- } else if (cap_async_pf) { +- r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); +- if (r < 0) { +- return r; +- } +- r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); +- if (r < 0) { +- return r; +- } +- r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); +- if (r < 0) { +- return r; +- } +- } +- +- if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { +- env->diag318_info = cs->kvm_run->s.regs.diag318; +- } +- +- return 0; +-} +- +-int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) +-{ +- int r; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_TOD, +- .attr = KVM_S390_VM_TOD_LOW, +- .addr = (uint64_t)tod_low, +- }; +- +- r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +- if (r) { +- return r; +- } +- +- attr.attr = KVM_S390_VM_TOD_HIGH; +- attr.addr = (uint64_t)tod_high; +- return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +-} +- +-int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) +-{ +- int r; +- struct kvm_s390_vm_tod_clock gtod; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_TOD, +- .attr = KVM_S390_VM_TOD_EXT, +- .addr = (uint64_t)>od, +- }; +- +- r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +- *tod_high = gtod.epoch_idx; +- *tod_low = gtod.tod; +- +- return r; +-} +- +-int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) +-{ +- int r; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_TOD, +- .attr = KVM_S390_VM_TOD_LOW, +- .addr = (uint64_t)&tod_low, +- }; +- +- r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +- if (r) { +- return r; +- } +- +- attr.attr = KVM_S390_VM_TOD_HIGH; +- attr.addr = (uint64_t)&tod_high; +- return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +-} +- +-int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) +-{ +- struct kvm_s390_vm_tod_clock gtod = { +- .epoch_idx = tod_high, +- .tod = tod_low, +- }; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_TOD, +- .attr = KVM_S390_VM_TOD_EXT, +- .addr = (uint64_t)>od, +- }; +- +- return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +-} +- +-/** +- * kvm_s390_mem_op: +- * @addr: the logical start address in guest memory +- * @ar: the access register number +- * @hostbuf: buffer in host memory. NULL = do only checks w/o copying +- * @len: length that should be transferred +- * @is_write: true = write, false = read +- * Returns: 0 on success, non-zero if an exception or error occurred +- * +- * Use KVM ioctl to read/write from/to guest memory. An access exception +- * is injected into the vCPU in case of translation errors. +- */ +-int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, +- int len, bool is_write) +-{ +- struct kvm_s390_mem_op mem_op = { +- .gaddr = addr, +- .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, +- .size = len, +- .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE +- : KVM_S390_MEMOP_LOGICAL_READ, +- .buf = (uint64_t)hostbuf, +- .ar = ar, +- }; +- int ret; +- +- if (!cap_mem_op) { +- return -ENOSYS; +- } +- if (!hostbuf) { +- mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; +- } +- +- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); +- if (ret < 0) { +- warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); +- } +- return ret; +-} +- +-int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, +- int len, bool is_write) +-{ +- struct kvm_s390_mem_op mem_op = { +- .sida_offset = offset, +- .size = len, +- .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE +- : KVM_S390_MEMOP_SIDA_READ, +- .buf = (uint64_t)hostbuf, +- }; +- int ret; +- +- if (!cap_mem_op || !cap_protected) { +- return -ENOSYS; +- } +- +- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); +- if (ret < 0) { +- error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); +- abort(); +- } +- return ret; +-} +- +-static uint8_t const *sw_bp_inst; +-static uint8_t sw_bp_ilen; +- +-static void determine_sw_breakpoint_instr(void) +-{ +- /* DIAG 501 is used for sw breakpoints with old kernels */ +- static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; +- /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ +- static const uint8_t instr_0x0000[] = {0x00, 0x00}; +- +- if (sw_bp_inst) { +- return; +- } +- if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { +- sw_bp_inst = diag_501; +- sw_bp_ilen = sizeof(diag_501); +- DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); +- } else { +- sw_bp_inst = instr_0x0000; +- sw_bp_ilen = sizeof(instr_0x0000); +- DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); +- } +-} +- +-int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +-{ +- determine_sw_breakpoint_instr(); +- +- if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, +- sw_bp_ilen, 0) || +- cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { +- return -EINVAL; +- } +- return 0; +-} +- +-int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +-{ +- uint8_t t[MAX_ILEN]; +- +- if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { +- return -EINVAL; +- } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { +- return -EINVAL; +- } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, +- sw_bp_ilen, 1)) { +- return -EINVAL; +- } +- +- return 0; +-} +- +-static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, +- int len, int type) +-{ +- int n; +- +- for (n = 0; n < nb_hw_breakpoints; n++) { +- if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && +- (hw_breakpoints[n].len == len || len == -1)) { +- return &hw_breakpoints[n]; +- } +- } +- +- return NULL; +-} +- +-static int insert_hw_breakpoint(target_ulong addr, int len, int type) +-{ +- int size; +- +- if (find_hw_breakpoint(addr, len, type)) { +- return -EEXIST; +- } +- +- size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); +- +- if (!hw_breakpoints) { +- nb_hw_breakpoints = 0; +- hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); +- } else { +- hw_breakpoints = +- (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); +- } +- +- if (!hw_breakpoints) { +- nb_hw_breakpoints = 0; +- return -ENOMEM; +- } +- +- hw_breakpoints[nb_hw_breakpoints].addr = addr; +- hw_breakpoints[nb_hw_breakpoints].len = len; +- hw_breakpoints[nb_hw_breakpoints].type = type; +- +- nb_hw_breakpoints++; +- +- return 0; +-} +- +-int kvm_arch_insert_hw_breakpoint(target_ulong addr, +- target_ulong len, int type) +-{ +- switch (type) { +- case GDB_BREAKPOINT_HW: +- type = KVM_HW_BP; +- break; +- case GDB_WATCHPOINT_WRITE: +- if (len < 1) { +- return -EINVAL; +- } +- type = KVM_HW_WP_WRITE; +- break; +- default: +- return -ENOSYS; +- } +- return insert_hw_breakpoint(addr, len, type); +-} +- +-int kvm_arch_remove_hw_breakpoint(target_ulong addr, +- target_ulong len, int type) +-{ +- int size; +- struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); +- +- if (bp == NULL) { +- return -ENOENT; +- } +- +- nb_hw_breakpoints--; +- if (nb_hw_breakpoints > 0) { +- /* +- * In order to trim the array, move the last element to the position to +- * be removed - if necessary. +- */ +- if (bp != &hw_breakpoints[nb_hw_breakpoints]) { +- *bp = hw_breakpoints[nb_hw_breakpoints]; +- } +- size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); +- hw_breakpoints = +- (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); +- } else { +- g_free(hw_breakpoints); +- hw_breakpoints = NULL; +- } +- +- return 0; +-} +- +-void kvm_arch_remove_all_hw_breakpoints(void) +-{ +- nb_hw_breakpoints = 0; +- g_free(hw_breakpoints); +- hw_breakpoints = NULL; +-} +- +-void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) +-{ +- int i; +- +- if (nb_hw_breakpoints > 0) { +- dbg->arch.nr_hw_bp = nb_hw_breakpoints; +- dbg->arch.hw_bp = hw_breakpoints; +- +- for (i = 0; i < nb_hw_breakpoints; ++i) { +- hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, +- hw_breakpoints[i].addr); +- } +- dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; +- } else { +- dbg->arch.nr_hw_bp = 0; +- dbg->arch.hw_bp = NULL; +- } +-} +- +-void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) +-{ +-} +- +-MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +-{ +- return MEMTXATTRS_UNSPECIFIED; +-} +- +-int kvm_arch_process_async_events(CPUState *cs) +-{ +- return cs->halted; +-} +- +-static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, +- struct kvm_s390_interrupt *interrupt) +-{ +- int r = 0; +- +- interrupt->type = irq->type; +- switch (irq->type) { +- case KVM_S390_INT_VIRTIO: +- interrupt->parm = irq->u.ext.ext_params; +- /* fall through */ +- case KVM_S390_INT_PFAULT_INIT: +- case KVM_S390_INT_PFAULT_DONE: +- interrupt->parm64 = irq->u.ext.ext_params2; +- break; +- case KVM_S390_PROGRAM_INT: +- interrupt->parm = irq->u.pgm.code; +- break; +- case KVM_S390_SIGP_SET_PREFIX: +- interrupt->parm = irq->u.prefix.address; +- break; +- case KVM_S390_INT_SERVICE: +- interrupt->parm = irq->u.ext.ext_params; +- break; +- case KVM_S390_MCHK: +- interrupt->parm = irq->u.mchk.cr14; +- interrupt->parm64 = irq->u.mchk.mcic; +- break; +- case KVM_S390_INT_EXTERNAL_CALL: +- interrupt->parm = irq->u.extcall.code; +- break; +- case KVM_S390_INT_EMERGENCY: +- interrupt->parm = irq->u.emerg.code; +- break; +- case KVM_S390_SIGP_STOP: +- case KVM_S390_RESTART: +- break; /* These types have no parameters */ +- case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: +- interrupt->parm = irq->u.io.subchannel_id << 16; +- interrupt->parm |= irq->u.io.subchannel_nr; +- interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; +- interrupt->parm64 |= irq->u.io.io_int_word; +- break; +- default: +- r = -EINVAL; +- break; +- } +- return r; +-} +- +-static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) +-{ +- struct kvm_s390_interrupt kvmint = {}; +- int r; +- +- r = s390_kvm_irq_to_interrupt(irq, &kvmint); +- if (r < 0) { +- fprintf(stderr, "%s called with bogus interrupt\n", __func__); +- exit(1); +- } +- +- r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); +- if (r < 0) { +- fprintf(stderr, "KVM failed to inject interrupt\n"); +- exit(1); +- } +-} +- +-void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) +-{ +- CPUState *cs = CPU(cpu); +- int r; +- +- if (cap_s390_irq) { +- r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); +- if (!r) { +- return; +- } +- error_report("KVM failed to inject interrupt %llx", irq->type); +- exit(1); +- } +- +- inject_vcpu_irq_legacy(cs, irq); +-} +- +-void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) +-{ +- struct kvm_s390_interrupt kvmint = {}; +- int r; +- +- r = s390_kvm_irq_to_interrupt(irq, &kvmint); +- if (r < 0) { +- fprintf(stderr, "%s called with bogus interrupt\n", __func__); +- exit(1); +- } +- +- r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); +- if (r < 0) { +- fprintf(stderr, "KVM failed to inject interrupt\n"); +- exit(1); +- } +-} +- +-void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) +-{ +- struct kvm_s390_irq irq = { +- .type = KVM_S390_PROGRAM_INT, +- .u.pgm.code = code, +- }; +- qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", +- cpu->env.psw.addr); +- kvm_s390_vcpu_interrupt(cpu, &irq); +-} +- +-void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) +-{ +- struct kvm_s390_irq irq = { +- .type = KVM_S390_PROGRAM_INT, +- .u.pgm.code = code, +- .u.pgm.trans_exc_code = te_code, +- .u.pgm.exc_access_id = te_code & 3, +- }; +- +- kvm_s390_vcpu_interrupt(cpu, &irq); +-} +- +-static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, +- uint16_t ipbh0) +-{ +- CPUS390XState *env = &cpu->env; +- uint64_t sccb; +- uint32_t code; +- int r; +- +- sccb = env->regs[ipbh0 & 0xf]; +- code = env->regs[(ipbh0 & 0xf0) >> 4]; +- +- switch (run->s390_sieic.icptcode) { +- case ICPT_PV_INSTR_NOTIFICATION: +- g_assert(s390_is_pv()); +- /* The notification intercepts are currently handled by KVM */ +- error_report("unexpected SCLP PV notification"); +- exit(1); +- break; +- case ICPT_PV_INSTR: +- g_assert(s390_is_pv()); +- sclp_service_call_protected(env, sccb, code); +- /* Setting the CC is done by the Ultravisor. */ +- break; +- case ICPT_INSTRUCTION: +- g_assert(!s390_is_pv()); +- r = sclp_service_call(env, sccb, code); +- if (r < 0) { +- kvm_s390_program_interrupt(cpu, -r); +- return; +- } +- setcc(cpu, r); +- } +-} +- +-static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) +-{ +- CPUS390XState *env = &cpu->env; +- int rc = 0; +- uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; +- +- switch (ipa1) { +- case PRIV_B2_XSCH: +- ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_CSCH: +- ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_HSCH: +- ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_MSCH: +- ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_SSCH: +- ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_STCRW: +- ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_STSCH: +- ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_TSCH: +- /* We should only get tsch via KVM_EXIT_S390_TSCH. */ +- fprintf(stderr, "Spurious tsch intercept\n"); +- break; +- case PRIV_B2_CHSC: +- ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_TPI: +- /* This should have been handled by kvm already. */ +- fprintf(stderr, "Spurious tpi intercept\n"); +- break; +- case PRIV_B2_SCHM: +- ioinst_handle_schm(cpu, env->regs[1], env->regs[2], +- run->s390_sieic.ipb, RA_IGNORED); +- break; +- case PRIV_B2_RSCH: +- ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_RCHP: +- ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_STCPS: +- /* We do not provide this instruction, it is suppressed. */ +- break; +- case PRIV_B2_SAL: +- ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); +- break; +- case PRIV_B2_SIGA: +- /* Not provided, set CC = 3 for subchannel not operational */ +- setcc(cpu, 3); +- break; +- case PRIV_B2_SCLP_CALL: +- kvm_sclp_service_call(cpu, run, ipbh0); +- break; +- default: +- rc = -1; +- DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); +- break; +- } +- +- return rc; +-} +- +-static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, +- uint8_t *ar) +-{ +- CPUS390XState *env = &cpu->env; +- uint32_t x2 = (run->s390_sieic.ipa & 0x000f); +- uint32_t base2 = run->s390_sieic.ipb >> 28; +- uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + +- ((run->s390_sieic.ipb & 0xff00) << 4); +- +- if (disp2 & 0x80000) { +- disp2 += 0xfff00000; +- } +- if (ar) { +- *ar = base2; +- } +- +- return (base2 ? env->regs[base2] : 0) + +- (x2 ? env->regs[x2] : 0) + (long)(int)disp2; +-} +- +-static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, +- uint8_t *ar) +-{ +- CPUS390XState *env = &cpu->env; +- uint32_t base2 = run->s390_sieic.ipb >> 28; +- uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + +- ((run->s390_sieic.ipb & 0xff00) << 4); +- +- if (disp2 & 0x80000) { +- disp2 += 0xfff00000; +- } +- if (ar) { +- *ar = base2; +- } +- +- return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; +-} +- +-static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- return clp_service_call(cpu, r2, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; +- uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- return pcilg_service_call(cpu, r1, r2, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; +- uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- return pcistg_service_call(cpu, r1, r2, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- uint64_t fiba; +- uint8_t ar; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- fiba = get_base_disp_rxy(cpu, run, &ar); +- +- return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- CPUS390XState *env = &cpu->env; +- uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- uint8_t r3 = run->s390_sieic.ipa & 0x000f; +- uint8_t isc; +- uint16_t mode; +- int r; +- +- mode = env->regs[r1] & 0xffff; +- isc = (env->regs[r3] >> 27) & 0x7; +- r = css_do_sic(env, isc, mode); +- if (r) { +- kvm_s390_program_interrupt(cpu, -r); +- } +- +- return 0; +-} +- +-static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; +- uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- return rpcit_service_call(cpu, r1, r2, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- uint8_t r3 = run->s390_sieic.ipa & 0x000f; +- uint64_t gaddr; +- uint8_t ar; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- gaddr = get_base_disp_rsy(cpu, run, &ar); +- +- return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) +-{ +- uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- uint64_t fiba; +- uint8_t ar; +- +- if (s390_has_feat(S390_FEAT_ZPCI)) { +- fiba = get_base_disp_rxy(cpu, run, &ar); +- +- return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); +- } else { +- return -1; +- } +-} +- +-static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) +-{ +- int r = 0; +- +- switch (ipa1) { +- case PRIV_B9_CLP: +- r = kvm_clp_service_call(cpu, run); +- break; +- case PRIV_B9_PCISTG: +- r = kvm_pcistg_service_call(cpu, run); +- break; +- case PRIV_B9_PCILG: +- r = kvm_pcilg_service_call(cpu, run); +- break; +- case PRIV_B9_RPCIT: +- r = kvm_rpcit_service_call(cpu, run); +- break; +- case PRIV_B9_EQBS: +- /* just inject exception */ +- r = -1; +- break; +- default: +- r = -1; +- DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); +- break; +- } +- +- return r; +-} +- +-static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) +-{ +- int r = 0; +- +- switch (ipbl) { +- case PRIV_EB_PCISTB: +- r = kvm_pcistb_service_call(cpu, run); +- break; +- case PRIV_EB_SIC: +- r = kvm_sic_service_call(cpu, run); +- break; +- case PRIV_EB_SQBS: +- /* just inject exception */ +- r = -1; +- break; +- default: +- r = -1; +- DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); +- break; +- } +- +- return r; +-} +- +-static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) +-{ +- int r = 0; +- +- switch (ipbl) { +- case PRIV_E3_MPCIFC: +- r = kvm_mpcifc_service_call(cpu, run); +- break; +- case PRIV_E3_STPCIFC: +- r = kvm_stpcifc_service_call(cpu, run); +- break; +- default: +- r = -1; +- DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); +- break; +- } +- +- return r; +-} +- +-static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) +-{ +- CPUS390XState *env = &cpu->env; +- int ret; +- +- ret = s390_virtio_hypercall(env); +- if (ret == -EINVAL) { +- kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); +- return 0; +- } +- +- return ret; +-} +- +-static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) +-{ +- uint64_t r1, r3; +- int rc; +- +- r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- r3 = run->s390_sieic.ipa & 0x000f; +- rc = handle_diag_288(&cpu->env, r1, r3); +- if (rc) { +- kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); +- } +-} +- +-static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) +-{ +- uint64_t r1, r3; +- +- r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; +- r3 = run->s390_sieic.ipa & 0x000f; +- handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); +-} +- +-static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) +-{ +- CPUS390XState *env = &cpu->env; +- unsigned long pc; +- +- pc = env->psw.addr - sw_bp_ilen; +- if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { +- env->psw.addr = pc; +- return EXCP_DEBUG; +- } +- +- return -ENOENT; +-} +- +-void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) +-{ +- CPUS390XState *env = &S390_CPU(cs)->env; +- +- /* Feat bit is set only if KVM supports sync for diag318 */ +- if (s390_has_feat(S390_FEAT_DIAG_318)) { +- env->diag318_info = diag318_info; +- cs->kvm_run->s.regs.diag318 = diag318_info; +- cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; +- } +-} +- +-static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) +-{ +- uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; +- uint64_t diag318_info = run->s.regs.gprs[reg]; +- CPUState *t; +- +- /* +- * DIAG 318 can only be enabled with KVM support. As such, let's +- * ensure a guest cannot execute this instruction erroneously. +- */ +- if (!s390_has_feat(S390_FEAT_DIAG_318)) { +- kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); +- return; +- } +- +- CPU_FOREACH(t) { +- run_on_cpu(t, s390_do_cpu_set_diag318, +- RUN_ON_CPU_HOST_ULONG(diag318_info)); +- } +-} +- +-#define DIAG_KVM_CODE_MASK 0x000000000000ffff +- +-static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) +-{ +- int r = 0; +- uint16_t func_code; +- +- /* +- * For any diagnose call we support, bits 48-63 of the resulting +- * address specify the function code; the remainder is ignored. +- */ +- func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; +- switch (func_code) { +- case DIAG_TIMEREVENT: +- kvm_handle_diag_288(cpu, run); +- break; +- case DIAG_IPL: +- kvm_handle_diag_308(cpu, run); +- break; +- case DIAG_SET_CONTROL_PROGRAM_CODES: +- handle_diag_318(cpu, run); +- break; +- case DIAG_KVM_HYPERCALL: +- r = handle_hypercall(cpu, run); +- break; +- case DIAG_KVM_BREAKPOINT: +- r = handle_sw_breakpoint(cpu, run); +- break; +- default: +- DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); +- kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); +- break; +- } +- +- return r; +-} +- +-static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) +-{ +- CPUS390XState *env = &cpu->env; +- const uint8_t r1 = ipa1 >> 4; +- const uint8_t r3 = ipa1 & 0x0f; +- int ret; +- uint8_t order; +- +- /* get order code */ +- order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; +- +- ret = handle_sigp(env, order, r1, r3); +- setcc(cpu, ret); +- return 0; +-} +- +-static int handle_instruction(S390CPU *cpu, struct kvm_run *run) +-{ +- unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); +- uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; +- int r = -1; +- +- DPRINTF("handle_instruction 0x%x 0x%x\n", +- run->s390_sieic.ipa, run->s390_sieic.ipb); +- switch (ipa0) { +- case IPA0_B2: +- r = handle_b2(cpu, run, ipa1); +- break; +- case IPA0_B9: +- r = handle_b9(cpu, run, ipa1); +- break; +- case IPA0_EB: +- r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); +- break; +- case IPA0_E3: +- r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); +- break; +- case IPA0_DIAG: +- r = handle_diag(cpu, run, run->s390_sieic.ipb); +- break; +- case IPA0_SIGP: +- r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); +- break; +- } +- +- if (r < 0) { +- r = 0; +- kvm_s390_program_interrupt(cpu, PGM_OPERATION); +- } +- +- return r; +-} +- +-static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, +- int pswoffset) +-{ +- CPUState *cs = CPU(cpu); +- +- s390_cpu_halt(cpu); +- cpu->env.crash_reason = reason; +- qemu_system_guest_panicked(cpu_get_crash_info(cs)); +-} +- +-/* try to detect pgm check loops */ +-static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) +-{ +- CPUState *cs = CPU(cpu); +- PSW oldpsw, newpsw; +- +- newpsw.mask = ldq_phys(cs->as, cpu->env.psa + +- offsetof(LowCore, program_new_psw)); +- newpsw.addr = ldq_phys(cs->as, cpu->env.psa + +- offsetof(LowCore, program_new_psw) + 8); +- oldpsw.mask = run->psw_mask; +- oldpsw.addr = run->psw_addr; +- /* +- * Avoid endless loops of operation exceptions, if the pgm new +- * PSW will cause a new operation exception. +- * The heuristic checks if the pgm new psw is within 6 bytes before +- * the faulting psw address (with same DAT, AS settings) and the +- * new psw is not a wait psw and the fault was not triggered by +- * problem state. In that case go into crashed state. +- */ +- +- if (oldpsw.addr - newpsw.addr <= 6 && +- !(newpsw.mask & PSW_MASK_WAIT) && +- !(oldpsw.mask & PSW_MASK_PSTATE) && +- (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && +- (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { +- unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, +- offsetof(LowCore, program_new_psw)); +- return EXCP_HALTED; +- } +- return 0; +-} +- +-static int handle_intercept(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- struct kvm_run *run = cs->kvm_run; +- int icpt_code = run->s390_sieic.icptcode; +- int r = 0; +- +- DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, (long)run->psw_addr); +- switch (icpt_code) { +- case ICPT_INSTRUCTION: +- case ICPT_PV_INSTR: +- case ICPT_PV_INSTR_NOTIFICATION: +- r = handle_instruction(cpu, run); +- break; +- case ICPT_PROGRAM: +- unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, +- offsetof(LowCore, program_new_psw)); +- r = EXCP_HALTED; +- break; +- case ICPT_EXT_INT: +- unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, +- offsetof(LowCore, external_new_psw)); +- r = EXCP_HALTED; +- break; +- case ICPT_WAITPSW: +- /* disabled wait, since enabled wait is handled in kernel */ +- s390_handle_wait(cpu); +- r = EXCP_HALTED; +- break; +- case ICPT_CPU_STOP: +- do_stop_interrupt(&cpu->env); +- r = EXCP_HALTED; +- break; +- case ICPT_OPEREXC: +- /* check for break points */ +- r = handle_sw_breakpoint(cpu, run); +- if (r == -ENOENT) { +- /* Then check for potential pgm check loops */ +- r = handle_oper_loop(cpu, run); +- if (r == 0) { +- kvm_s390_program_interrupt(cpu, PGM_OPERATION); +- } +- } +- break; +- case ICPT_SOFT_INTERCEPT: +- fprintf(stderr, "KVM unimplemented icpt SOFT\n"); +- exit(1); +- break; +- case ICPT_IO: +- fprintf(stderr, "KVM unimplemented icpt IO\n"); +- exit(1); +- break; +- default: +- fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); +- exit(1); +- break; +- } +- +- return r; +-} +- +-static int handle_tsch(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- struct kvm_run *run = cs->kvm_run; +- int ret; +- +- ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, +- RA_IGNORED); +- if (ret < 0) { +- /* +- * Failure. +- * If an I/O interrupt had been dequeued, we have to reinject it. +- */ +- if (run->s390_tsch.dequeued) { +- s390_io_interrupt(run->s390_tsch.subchannel_id, +- run->s390_tsch.subchannel_nr, +- run->s390_tsch.io_int_parm, +- run->s390_tsch.io_int_word); +- } +- ret = 0; +- } +- return ret; +-} +- +-static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) +-{ +- const MachineState *ms = MACHINE(qdev_get_machine()); +- uint16_t conf_cpus = 0, reserved_cpus = 0; +- SysIB_322 sysib; +- int del, i; +- +- if (s390_is_pv()) { +- s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); +- } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { +- return; +- } +- /* Shift the stack of Extended Names to prepare for our own data */ +- memmove(&sysib.ext_names[1], &sysib.ext_names[0], +- sizeof(sysib.ext_names[0]) * (sysib.count - 1)); +- /* First virt level, that doesn't provide Ext Names delimits stack. It is +- * assumed it's not capable of managing Extended Names for lower levels. +- */ +- for (del = 1; del < sysib.count; del++) { +- if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { +- break; +- } +- } +- if (del < sysib.count) { +- memset(sysib.ext_names[del], 0, +- sizeof(sysib.ext_names[0]) * (sysib.count - del)); +- } +- +- /* count the cpus and split them into configured and reserved ones */ +- for (i = 0; i < ms->possible_cpus->len; i++) { +- if (ms->possible_cpus->cpus[i].cpu) { +- conf_cpus++; +- } else { +- reserved_cpus++; +- } +- } +- sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; +- sysib.vm[0].conf_cpus = conf_cpus; +- sysib.vm[0].reserved_cpus = reserved_cpus; +- +- /* Insert short machine name in EBCDIC, padded with blanks */ +- if (qemu_name) { +- memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); +- ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), +- strlen(qemu_name))); +- } +- sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ +- /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's +- * considered by s390 as not capable of providing any Extended Name. +- * Therefore if no name was specified on qemu invocation, we go with the +- * same "KVMguest" default, which KVM has filled into short name field. +- */ +- strpadcpy((char *)sysib.ext_names[0], +- sizeof(sysib.ext_names[0]), +- qemu_name ?: "KVMguest", '\0'); +- +- /* Insert UUID */ +- memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); +- +- if (s390_is_pv()) { +- s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); +- } else { +- s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); +- } +-} +- +-static int handle_stsi(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- struct kvm_run *run = cs->kvm_run; +- +- switch (run->s390_stsi.fc) { +- case 3: +- if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { +- return 0; +- } +- /* Only sysib 3.2.2 needs post-handling for now. */ +- insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); +- return 0; +- default: +- return 0; +- } +-} +- +-static int kvm_arch_handle_debug_exit(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- struct kvm_run *run = cs->kvm_run; +- +- int ret = 0; +- struct kvm_debug_exit_arch *arch_info = &run->debug.arch; +- +- switch (arch_info->type) { +- case KVM_HW_WP_WRITE: +- if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { +- cs->watchpoint_hit = &hw_watchpoint; +- hw_watchpoint.vaddr = arch_info->addr; +- hw_watchpoint.flags = BP_MEM_WRITE; +- ret = EXCP_DEBUG; +- } +- break; +- case KVM_HW_BP: +- if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { +- ret = EXCP_DEBUG; +- } +- break; +- case KVM_SINGLESTEP: +- if (cs->singlestep_enabled) { +- ret = EXCP_DEBUG; +- } +- break; +- default: +- ret = -ENOSYS; +- } +- +- return ret; +-} +- +-int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +-{ +- S390CPU *cpu = S390_CPU(cs); +- int ret = 0; +- +- qemu_mutex_lock_iothread(); +- +- kvm_cpu_synchronize_state(cs); +- +- switch (run->exit_reason) { +- case KVM_EXIT_S390_SIEIC: +- ret = handle_intercept(cpu); +- break; +- case KVM_EXIT_S390_RESET: +- s390_ipl_reset_request(cs, S390_RESET_REIPL); +- break; +- case KVM_EXIT_S390_TSCH: +- ret = handle_tsch(cpu); +- break; +- case KVM_EXIT_S390_STSI: +- ret = handle_stsi(cpu); +- break; +- case KVM_EXIT_DEBUG: +- ret = kvm_arch_handle_debug_exit(cpu); +- break; +- default: +- fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); +- break; +- } +- qemu_mutex_unlock_iothread(); +- +- if (ret == 0) { +- ret = EXCP_INTERRUPT; +- } +- return ret; +-} +- +-bool kvm_arch_stop_on_emulation_error(CPUState *cpu) +-{ +- return true; +-} +- +-void kvm_s390_enable_css_support(S390CPU *cpu) +-{ +- int r; +- +- /* Activate host kernel channel subsystem support. */ +- r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); +- assert(r == 0); +-} +- +-void kvm_arch_init_irq_routing(KVMState *s) +-{ +- /* +- * Note that while irqchip capabilities generally imply that cpustates +- * are handled in-kernel, it is not true for s390 (yet); therefore, we +- * have to override the common code kvm_halt_in_kernel_allowed setting. +- */ +- if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { +- kvm_gsi_routing_allowed = true; +- kvm_halt_in_kernel_allowed = false; +- } +-} +- +-int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, +- int vq, bool assign) +-{ +- struct kvm_ioeventfd kick = { +- .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | +- KVM_IOEVENTFD_FLAG_DATAMATCH, +- .fd = event_notifier_get_fd(notifier), +- .datamatch = vq, +- .addr = sch, +- .len = 8, +- }; +- trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, +- kick.datamatch); +- if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { +- return -ENOSYS; +- } +- if (!assign) { +- kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; +- } +- return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); +-} +- +-int kvm_s390_get_ri(void) +-{ +- return cap_ri; +-} +- +-int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) +-{ +- struct kvm_mp_state mp_state = {}; +- int ret; +- +- /* the kvm part might not have been initialized yet */ +- if (CPU(cpu)->kvm_state == NULL) { +- return 0; +- } +- +- switch (cpu_state) { +- case S390_CPU_STATE_STOPPED: +- mp_state.mp_state = KVM_MP_STATE_STOPPED; +- break; +- case S390_CPU_STATE_CHECK_STOP: +- mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; +- break; +- case S390_CPU_STATE_OPERATING: +- mp_state.mp_state = KVM_MP_STATE_OPERATING; +- break; +- case S390_CPU_STATE_LOAD: +- mp_state.mp_state = KVM_MP_STATE_LOAD; +- break; +- default: +- error_report("Requested CPU state is not a valid S390 CPU state: %u", +- cpu_state); +- exit(1); +- } +- +- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); +- if (ret) { +- trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, +- strerror(-ret)); +- } +- +- return ret; +-} +- +-void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) +-{ +- unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; +- struct kvm_s390_irq_state irq_state = { +- .buf = (uint64_t) cpu->irqstate, +- .len = VCPU_IRQ_BUF_SIZE(max_cpus), +- }; +- CPUState *cs = CPU(cpu); +- int32_t bytes; +- +- if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { +- return; +- } +- +- bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); +- if (bytes < 0) { +- cpu->irqstate_saved_size = 0; +- error_report("Migration of interrupt state failed"); +- return; +- } +- +- cpu->irqstate_saved_size = bytes; +-} +- +-int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) +-{ +- CPUState *cs = CPU(cpu); +- struct kvm_s390_irq_state irq_state = { +- .buf = (uint64_t) cpu->irqstate, +- .len = cpu->irqstate_saved_size, +- }; +- int r; +- +- if (cpu->irqstate_saved_size == 0) { +- return 0; +- } +- +- if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { +- return -ENOSYS; +- } +- +- r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); +- if (r) { +- error_report("Setting interrupt state failed %d", r); +- } +- return r; +-} +- +-int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, +- uint64_t address, uint32_t data, PCIDevice *dev) +-{ +- S390PCIBusDevice *pbdev; +- uint32_t vec = data & ZPCI_MSI_VEC_MASK; +- +- if (!dev) { +- DPRINTF("add_msi_route no pci device\n"); +- return -ENODEV; +- } +- +- pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); +- if (!pbdev) { +- DPRINTF("add_msi_route no zpci device\n"); +- return -ENODEV; +- } +- +- route->type = KVM_IRQ_ROUTING_S390_ADAPTER; +- route->flags = 0; +- route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; +- route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; +- route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; +- route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; +- route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; +- return 0; +-} +- +-int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, +- int vector, PCIDevice *dev) +-{ +- return 0; +-} +- +-int kvm_arch_release_virq_post(int virq) +-{ +- return 0; +-} +- +-int kvm_arch_msi_data_to_gsi(uint32_t data) +-{ +- abort(); +-} +- +-static int query_cpu_subfunc(S390FeatBitmap features) +-{ +- struct kvm_s390_vm_cpu_subfunc prop = {}; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, +- .addr = (uint64_t) &prop, +- }; +- int rc; +- +- rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +- if (rc) { +- return rc; +- } +- +- /* +- * We're going to add all subfunctions now, if the corresponding feature +- * is available that unlocks the query functions. +- */ +- s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); +- if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); +- } +- if (test_bit(S390_FEAT_MSA, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); +- } +- if (test_bit(S390_FEAT_MSA_EXT_3, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); +- } +- if (test_bit(S390_FEAT_MSA_EXT_4, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); +- s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); +- } +- if (test_bit(S390_FEAT_MSA_EXT_5, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); +- } +- if (test_bit(S390_FEAT_MSA_EXT_8, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); +- } +- if (test_bit(S390_FEAT_MSA_EXT_9, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); +- } +- if (test_bit(S390_FEAT_ESORT_BASE, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); +- } +- if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { +- s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); +- } +- return 0; +-} +- +-static int configure_cpu_subfunc(const S390FeatBitmap features) +-{ +- struct kvm_s390_vm_cpu_subfunc prop = {}; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, +- .addr = (uint64_t) &prop, +- }; +- +- if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { +- /* hardware support might be missing, IBC will handle most of this */ +- return 0; +- } +- +- s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); +- if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); +- } +- if (test_bit(S390_FEAT_MSA, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); +- } +- if (test_bit(S390_FEAT_MSA_EXT_3, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); +- } +- if (test_bit(S390_FEAT_MSA_EXT_4, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); +- s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); +- } +- if (test_bit(S390_FEAT_MSA_EXT_5, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); +- } +- if (test_bit(S390_FEAT_MSA_EXT_8, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); +- } +- if (test_bit(S390_FEAT_MSA_EXT_9, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); +- } +- if (test_bit(S390_FEAT_ESORT_BASE, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); +- } +- if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { +- s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); +- } +- return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +-} +- +-static int kvm_to_feat[][2] = { +- { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, +- { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, +- { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, +- { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, +- { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, +- { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, +- { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, +- { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, +- { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, +- { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, +- { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, +- { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, +- { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, +- { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, +-}; +- +-static int query_cpu_feat(S390FeatBitmap features) +-{ +- struct kvm_s390_vm_cpu_feat prop = {}; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_MACHINE_FEAT, +- .addr = (uint64_t) &prop, +- }; +- int rc; +- int i; +- +- rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +- if (rc) { +- return rc; +- } +- +- for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { +- if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { +- set_bit(kvm_to_feat[i][1], features); +- } +- } +- return 0; +-} +- +-static int configure_cpu_feat(const S390FeatBitmap features) +-{ +- struct kvm_s390_vm_cpu_feat prop = {}; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, +- .addr = (uint64_t) &prop, +- }; +- int i; +- +- for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { +- if (test_bit(kvm_to_feat[i][1], features)) { +- set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); +- } +- } +- return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +-} +- +-bool kvm_s390_cpu_models_supported(void) +-{ +- if (!cpu_model_allowed()) { +- /* compatibility machines interfere with the cpu model */ +- return false; +- } +- return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_MACHINE) && +- kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_PROCESSOR) && +- kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_MACHINE_FEAT) && +- kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_PROCESSOR_FEAT) && +- kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, +- KVM_S390_VM_CPU_MACHINE_SUBFUNC); +-} +- +-void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) +-{ +- struct kvm_s390_vm_cpu_machine prop = {}; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_MACHINE, +- .addr = (uint64_t) &prop, +- }; +- uint16_t unblocked_ibc = 0, cpu_type = 0; +- int rc; +- +- memset(model, 0, sizeof(*model)); +- +- if (!kvm_s390_cpu_models_supported()) { +- error_setg(errp, "KVM doesn't support CPU models"); +- return; +- } +- +- /* query the basic cpu model properties */ +- rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); +- if (rc) { +- error_setg(errp, "KVM: Error querying host CPU model: %d", rc); +- return; +- } +- +- cpu_type = cpuid_type(prop.cpuid); +- if (has_ibc(prop.ibc)) { +- model->lowest_ibc = lowest_ibc(prop.ibc); +- unblocked_ibc = unblocked_ibc(prop.ibc); +- } +- model->cpu_id = cpuid_id(prop.cpuid); +- model->cpu_id_format = cpuid_format(prop.cpuid); +- model->cpu_ver = 0xff; +- +- /* get supported cpu features indicated via STFL(E) */ +- s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, +- (uint8_t *) prop.fac_mask); +- /* dat-enhancement facility 2 has no bit but was introduced with stfle */ +- if (test_bit(S390_FEAT_STFLE, model->features)) { +- set_bit(S390_FEAT_DAT_ENH_2, model->features); +- } +- /* get supported cpu features indicated e.g. via SCLP */ +- rc = query_cpu_feat(model->features); +- if (rc) { +- error_setg(errp, "KVM: Error querying CPU features: %d", rc); +- return; +- } +- /* get supported cpu subfunctions indicated via query / test bit */ +- rc = query_cpu_subfunc(model->features); +- if (rc) { +- error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); +- return; +- } +- +- /* PTFF subfunctions might be indicated although kernel support missing */ +- if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { +- clear_bit(S390_FEAT_PTFF_QSIE, model->features); +- clear_bit(S390_FEAT_PTFF_QTOUE, model->features); +- clear_bit(S390_FEAT_PTFF_STOE, model->features); +- clear_bit(S390_FEAT_PTFF_STOUE, model->features); +- } +- +- /* with cpu model support, CMM is only indicated if really available */ +- if (kvm_s390_cmma_available()) { +- set_bit(S390_FEAT_CMM, model->features); +- } else { +- /* no cmm -> no cmm nt */ +- clear_bit(S390_FEAT_CMM_NT, model->features); +- } +- +- /* bpb needs kernel support for migration, VSIE and reset */ +- if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { +- clear_bit(S390_FEAT_BPB, model->features); +- } +- +- /* +- * If we have support for protected virtualization, indicate +- * the protected virtualization IPL unpack facility. +- */ +- if (cap_protected) { +- set_bit(S390_FEAT_UNPACK, model->features); +- } +- +- /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ +- set_bit(S390_FEAT_ZPCI, model->features); +- set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); +- +- if (s390_known_cpu_type(cpu_type)) { +- /* we want the exact model, even if some features are missing */ +- model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), +- ibc_ec_ga(unblocked_ibc), NULL); +- } else { +- /* model unknown, e.g. too new - search using features */ +- model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), +- ibc_ec_ga(unblocked_ibc), +- model->features); +- } +- if (!model->def) { +- error_setg(errp, "KVM: host CPU model could not be identified"); +- return; +- } +- /* for now, we can only provide the AP feature with HW support */ +- if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, +- KVM_S390_VM_CRYPTO_ENABLE_APIE)) { +- set_bit(S390_FEAT_AP, model->features); +- } +- +- /* +- * Extended-Length SCCB is handled entirely within QEMU. +- * For PV guests this is completely fenced by the Ultravisor, as Service +- * Call error checking and STFLE interpretation are handled via SIE. +- */ +- set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); +- +- if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { +- set_bit(S390_FEAT_DIAG_318, model->features); +- } +- +- /* strip of features that are not part of the maximum model */ +- bitmap_and(model->features, model->features, model->def->full_feat, +- S390_FEAT_MAX); +-} +- +-static void kvm_s390_configure_apie(bool interpret) +-{ +- uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : +- KVM_S390_VM_CRYPTO_DISABLE_APIE; +- +- if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { +- kvm_s390_set_attr(attr); +- } +-} +- +-void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) +-{ +- struct kvm_s390_vm_cpu_processor prop = { +- .fac_list = { 0 }, +- }; +- struct kvm_device_attr attr = { +- .group = KVM_S390_VM_CPU_MODEL, +- .attr = KVM_S390_VM_CPU_PROCESSOR, +- .addr = (uint64_t) &prop, +- }; +- int rc; +- +- if (!model) { +- /* compatibility handling if cpu models are disabled */ +- if (kvm_s390_cmma_available()) { +- kvm_s390_enable_cmma(); +- } +- return; +- } +- if (!kvm_s390_cpu_models_supported()) { +- error_setg(errp, "KVM doesn't support CPU models"); +- return; +- } +- prop.cpuid = s390_cpuid_from_cpu_model(model); +- prop.ibc = s390_ibc_from_cpu_model(model); +- /* configure cpu features indicated via STFL(e) */ +- s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, +- (uint8_t *) prop.fac_list); +- rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); +- if (rc) { +- error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); +- return; +- } +- /* configure cpu features indicated e.g. via SCLP */ +- rc = configure_cpu_feat(model->features); +- if (rc) { +- error_setg(errp, "KVM: Error configuring CPU features: %d", rc); +- return; +- } +- /* configure cpu subfunctions indicated via query / test bit */ +- rc = configure_cpu_subfunc(model->features); +- if (rc) { +- error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); +- return; +- } +- /* enable CMM via CMMA */ +- if (test_bit(S390_FEAT_CMM, model->features)) { +- kvm_s390_enable_cmma(); +- } +- +- if (test_bit(S390_FEAT_AP, model->features)) { +- kvm_s390_configure_apie(true); +- } +-} +- +-void kvm_s390_restart_interrupt(S390CPU *cpu) +-{ +- struct kvm_s390_irq irq = { +- .type = KVM_S390_RESTART, +- }; +- +- kvm_s390_vcpu_interrupt(cpu, &irq); +-} +- +-void kvm_s390_stop_interrupt(S390CPU *cpu) +-{ +- struct kvm_s390_irq irq = { +- .type = KVM_S390_SIGP_STOP, +- }; +- +- kvm_s390_vcpu_interrupt(cpu, &irq); +-} +- +-bool kvm_arch_cpu_check_are_resettable(void) +-{ +- return true; +-} +diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c +new file mode 100644 +index 0000000000..5b1fdb55c4 +--- /dev/null ++++ b/target/s390x/kvm/kvm.c +@@ -0,0 +1,2564 @@ ++/* ++ * QEMU S390x KVM implementation ++ * ++ * Copyright (c) 2009 Alexander Graf ++ * Copyright IBM Corp. 2012 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include ++ ++#include ++#include ++ ++#include "qemu-common.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "kvm_s390x.h" ++#include "sysemu/kvm_int.h" ++#include "qemu/cutils.h" ++#include "qapi/error.h" ++#include "qemu/error-report.h" ++#include "qemu/timer.h" ++#include "qemu/units.h" ++#include "qemu/main-loop.h" ++#include "qemu/mmap-alloc.h" ++#include "qemu/log.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/hw_accel.h" ++#include "sysemu/runstate.h" ++#include "sysemu/device_tree.h" ++#include "exec/gdbstub.h" ++#include "exec/ram_addr.h" ++#include "trace.h" ++#include "hw/s390x/s390-pci-inst.h" ++#include "hw/s390x/s390-pci-bus.h" ++#include "hw/s390x/ipl.h" ++#include "hw/s390x/ebcdic.h" ++#include "exec/memattrs.h" ++#include "hw/s390x/s390-virtio-ccw.h" ++#include "hw/s390x/s390-virtio-hcall.h" ++#include "hw/s390x/pv.h" ++ ++#ifndef DEBUG_KVM ++#define DEBUG_KVM 0 ++#endif ++ ++#define DPRINTF(fmt, ...) do { \ ++ if (DEBUG_KVM) { \ ++ fprintf(stderr, fmt, ## __VA_ARGS__); \ ++ } \ ++} while (0) ++ ++#define kvm_vm_check_mem_attr(s, attr) \ ++ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) ++ ++#define IPA0_DIAG 0x8300 ++#define IPA0_SIGP 0xae00 ++#define IPA0_B2 0xb200 ++#define IPA0_B9 0xb900 ++#define IPA0_EB 0xeb00 ++#define IPA0_E3 0xe300 ++ ++#define PRIV_B2_SCLP_CALL 0x20 ++#define PRIV_B2_CSCH 0x30 ++#define PRIV_B2_HSCH 0x31 ++#define PRIV_B2_MSCH 0x32 ++#define PRIV_B2_SSCH 0x33 ++#define PRIV_B2_STSCH 0x34 ++#define PRIV_B2_TSCH 0x35 ++#define PRIV_B2_TPI 0x36 ++#define PRIV_B2_SAL 0x37 ++#define PRIV_B2_RSCH 0x38 ++#define PRIV_B2_STCRW 0x39 ++#define PRIV_B2_STCPS 0x3a ++#define PRIV_B2_RCHP 0x3b ++#define PRIV_B2_SCHM 0x3c ++#define PRIV_B2_CHSC 0x5f ++#define PRIV_B2_SIGA 0x74 ++#define PRIV_B2_XSCH 0x76 ++ ++#define PRIV_EB_SQBS 0x8a ++#define PRIV_EB_PCISTB 0xd0 ++#define PRIV_EB_SIC 0xd1 ++ ++#define PRIV_B9_EQBS 0x9c ++#define PRIV_B9_CLP 0xa0 ++#define PRIV_B9_PCISTG 0xd0 ++#define PRIV_B9_PCILG 0xd2 ++#define PRIV_B9_RPCIT 0xd3 ++ ++#define PRIV_E3_MPCIFC 0xd0 ++#define PRIV_E3_STPCIFC 0xd4 ++ ++#define DIAG_TIMEREVENT 0x288 ++#define DIAG_IPL 0x308 ++#define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 ++#define DIAG_KVM_HYPERCALL 0x500 ++#define DIAG_KVM_BREAKPOINT 0x501 ++ ++#define ICPT_INSTRUCTION 0x04 ++#define ICPT_PROGRAM 0x08 ++#define ICPT_EXT_INT 0x14 ++#define ICPT_WAITPSW 0x1c ++#define ICPT_SOFT_INTERCEPT 0x24 ++#define ICPT_CPU_STOP 0x28 ++#define ICPT_OPEREXC 0x2c ++#define ICPT_IO 0x40 ++#define ICPT_PV_INSTR 0x68 ++#define ICPT_PV_INSTR_NOTIFICATION 0x6c ++ ++#define NR_LOCAL_IRQS 32 ++/* ++ * Needs to be big enough to contain max_cpus emergency signals ++ * and in addition NR_LOCAL_IRQS interrupts ++ */ ++#define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ ++ (max_cpus + NR_LOCAL_IRQS)) ++/* ++ * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages ++ * as the dirty bitmap must be managed by bitops that take an int as ++ * position indicator. This would end at an unaligned address ++ * (0x7fffff00000). As future variants might provide larger pages ++ * and to make all addresses properly aligned, let us split at 4TB. ++ */ ++#define KVM_SLOT_MAX_BYTES (4UL * TiB) ++ ++static CPUWatchpoint hw_watchpoint; ++/* ++ * We don't use a list because this structure is also used to transmit the ++ * hardware breakpoints to the kernel. ++ */ ++static struct kvm_hw_breakpoint *hw_breakpoints; ++static int nb_hw_breakpoints; ++ ++const KVMCapabilityInfo kvm_arch_required_capabilities[] = { ++ KVM_CAP_LAST_INFO ++}; ++ ++static int cap_sync_regs; ++static int cap_async_pf; ++static int cap_mem_op; ++static int cap_s390_irq; ++static int cap_ri; ++static int cap_hpage_1m; ++static int cap_vcpu_resets; ++static int cap_protected; ++ ++static int active_cmma; ++ ++static int kvm_s390_query_mem_limit(uint64_t *memory_limit) ++{ ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_MEM_CTRL, ++ .attr = KVM_S390_VM_MEM_LIMIT_SIZE, ++ .addr = (uint64_t) memory_limit, ++ }; ++ ++ return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++} ++ ++int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) ++{ ++ int rc; ++ ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_MEM_CTRL, ++ .attr = KVM_S390_VM_MEM_LIMIT_SIZE, ++ .addr = (uint64_t) &new_limit, ++ }; ++ ++ if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { ++ return 0; ++ } ++ ++ rc = kvm_s390_query_mem_limit(hw_limit); ++ if (rc) { ++ return rc; ++ } else if (*hw_limit < new_limit) { ++ return -E2BIG; ++ } ++ ++ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++} ++ ++int kvm_s390_cmma_active(void) ++{ ++ return active_cmma; ++} ++ ++static bool kvm_s390_cmma_available(void) ++{ ++ static bool initialized, value; ++ ++ if (!initialized) { ++ initialized = true; ++ value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && ++ kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); ++ } ++ return value; ++} ++ ++void kvm_s390_cmma_reset(void) ++{ ++ int rc; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_MEM_CTRL, ++ .attr = KVM_S390_VM_MEM_CLR_CMMA, ++ }; ++ ++ if (!kvm_s390_cmma_active()) { ++ return; ++ } ++ ++ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++ trace_kvm_clear_cmma(rc); ++} ++ ++static void kvm_s390_enable_cmma(void) ++{ ++ int rc; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_MEM_CTRL, ++ .attr = KVM_S390_VM_MEM_ENABLE_CMMA, ++ }; ++ ++ if (cap_hpage_1m) { ++ warn_report("CMM will not be enabled because it is not " ++ "compatible with huge memory backings."); ++ return; ++ } ++ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++ active_cmma = !rc; ++ trace_kvm_enable_cmma(rc); ++} ++ ++static void kvm_s390_set_attr(uint64_t attr) ++{ ++ struct kvm_device_attr attribute = { ++ .group = KVM_S390_VM_CRYPTO, ++ .attr = attr, ++ }; ++ ++ int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); ++ ++ if (ret) { ++ error_report("Failed to set crypto device attribute %lu: %s", ++ attr, strerror(-ret)); ++ } ++} ++ ++static void kvm_s390_init_aes_kw(void) ++{ ++ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; ++ ++ if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", ++ NULL)) { ++ attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; ++ } ++ ++ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { ++ kvm_s390_set_attr(attr); ++ } ++} ++ ++static void kvm_s390_init_dea_kw(void) ++{ ++ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; ++ ++ if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", ++ NULL)) { ++ attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; ++ } ++ ++ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { ++ kvm_s390_set_attr(attr); ++ } ++} ++ ++void kvm_s390_crypto_reset(void) ++{ ++ if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { ++ kvm_s390_init_aes_kw(); ++ kvm_s390_init_dea_kw(); ++ } ++} ++ ++void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) ++{ ++ if (pagesize == 4 * KiB) { ++ return; ++ } ++ ++ if (!hpage_1m_allowed()) { ++ error_setg(errp, "This QEMU machine does not support huge page " ++ "mappings"); ++ return; ++ } ++ ++ if (pagesize != 1 * MiB) { ++ error_setg(errp, "Memory backing with 2G pages was specified, " ++ "but KVM does not support this memory backing"); ++ return; ++ } ++ ++ if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { ++ error_setg(errp, "Memory backing with 1M pages was specified, " ++ "but KVM does not support this memory backing"); ++ return; ++ } ++ ++ cap_hpage_1m = 1; ++} ++ ++int kvm_s390_get_hpage_1m(void) ++{ ++ return cap_hpage_1m; ++} ++ ++static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) ++{ ++ MachineClass *mc = MACHINE_CLASS(oc); ++ ++ mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); ++} ++ ++int kvm_arch_init(MachineState *ms, KVMState *s) ++{ ++ object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, ++ false, NULL); ++ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { ++ error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " ++ "please use kernel 3.15 or newer"); ++ return -1; ++ } ++ if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { ++ error_report("KVM is missing capability KVM_CAP_S390_COW - " ++ "unsupported environment"); ++ return -1; ++ } ++ ++ cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); ++ cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); ++ cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); ++ cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); ++ cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); ++ cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); ++ ++ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); ++ kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); ++ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); ++ if (ri_allowed()) { ++ if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { ++ cap_ri = 1; ++ } ++ } ++ if (cpu_model_allowed()) { ++ kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); ++ } ++ ++ /* ++ * The migration interface for ais was introduced with kernel 4.13 ++ * but the capability itself had been active since 4.12. As migration ++ * support is considered necessary, we only try to enable this for ++ * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. ++ */ ++ if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && ++ kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { ++ kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); ++ } ++ ++ kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); ++ return 0; ++} ++ ++int kvm_arch_irqchip_create(KVMState *s) ++{ ++ return 0; ++} ++ ++unsigned long kvm_arch_vcpu_id(CPUState *cpu) ++{ ++ return cpu->cpu_index; ++} ++ ++int kvm_arch_init_vcpu(CPUState *cs) ++{ ++ unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; ++ S390CPU *cpu = S390_CPU(cs); ++ kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); ++ cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); ++ return 0; ++} ++ ++int kvm_arch_destroy_vcpu(CPUState *cs) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ ++ g_free(cpu->irqstate); ++ cpu->irqstate = NULL; ++ ++ return 0; ++} ++ ++static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) ++{ ++ CPUState *cs = CPU(cpu); ++ ++ /* ++ * The reset call is needed here to reset in-kernel vcpu data that ++ * we can't access directly from QEMU (i.e. with older kernels ++ * which don't support sync_regs/ONE_REG). Before this ioctl ++ * cpu_synchronize_state() is called in common kvm code ++ * (kvm-all). ++ */ ++ if (kvm_vcpu_ioctl(cs, type)) { ++ error_report("CPU reset failed on CPU %i type %lx", ++ cs->cpu_index, type); ++ } ++} ++ ++void kvm_s390_reset_vcpu_initial(S390CPU *cpu) ++{ ++ kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); ++} ++ ++void kvm_s390_reset_vcpu_clear(S390CPU *cpu) ++{ ++ if (cap_vcpu_resets) { ++ kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); ++ } else { ++ kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); ++ } ++} ++ ++void kvm_s390_reset_vcpu_normal(S390CPU *cpu) ++{ ++ if (cap_vcpu_resets) { ++ kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); ++ } ++} ++ ++static int can_sync_regs(CPUState *cs, int regs) ++{ ++ return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; ++} ++ ++int kvm_arch_put_registers(CPUState *cs, int level) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ struct kvm_sregs sregs; ++ struct kvm_regs regs; ++ struct kvm_fpu fpu = {}; ++ int r; ++ int i; ++ ++ /* always save the PSW and the GPRS*/ ++ cs->kvm_run->psw_addr = env->psw.addr; ++ cs->kvm_run->psw_mask = env->psw.mask; ++ ++ if (can_sync_regs(cs, KVM_SYNC_GPRS)) { ++ for (i = 0; i < 16; i++) { ++ cs->kvm_run->s.regs.gprs[i] = env->regs[i]; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; ++ } ++ } else { ++ for (i = 0; i < 16; i++) { ++ regs.gprs[i] = env->regs[i]; ++ } ++ r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); ++ if (r < 0) { ++ return r; ++ } ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_VRS)) { ++ for (i = 0; i < 32; i++) { ++ cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; ++ cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; ++ } ++ cs->kvm_run->s.regs.fpc = env->fpc; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; ++ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { ++ for (i = 0; i < 16; i++) { ++ cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); ++ } ++ cs->kvm_run->s.regs.fpc = env->fpc; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; ++ } else { ++ /* Floating point */ ++ for (i = 0; i < 16; i++) { ++ fpu.fprs[i] = *get_freg(env, i); ++ } ++ fpu.fpc = env->fpc; ++ ++ r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); ++ if (r < 0) { ++ return r; ++ } ++ } ++ ++ /* Do we need to save more than that? */ ++ if (level == KVM_PUT_RUNTIME_STATE) { ++ return 0; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { ++ cs->kvm_run->s.regs.cputm = env->cputm; ++ cs->kvm_run->s.regs.ckc = env->ckc; ++ cs->kvm_run->s.regs.todpr = env->todpr; ++ cs->kvm_run->s.regs.gbea = env->gbea; ++ cs->kvm_run->s.regs.pp = env->pp; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; ++ } else { ++ /* ++ * These ONE_REGS are not protected by a capability. As they are only ++ * necessary for migration we just trace a possible error, but don't ++ * return with an error return code. ++ */ ++ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); ++ kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); ++ kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); ++ kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); ++ kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_RICCB)) { ++ memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; ++ } ++ ++ /* pfault parameters */ ++ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { ++ cs->kvm_run->s.regs.pft = env->pfault_token; ++ cs->kvm_run->s.regs.pfs = env->pfault_select; ++ cs->kvm_run->s.regs.pfc = env->pfault_compare; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; ++ } else if (cap_async_pf) { ++ r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); ++ if (r < 0) { ++ return r; ++ } ++ r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); ++ if (r < 0) { ++ return r; ++ } ++ r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); ++ if (r < 0) { ++ return r; ++ } ++ } ++ ++ /* access registers and control registers*/ ++ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { ++ for (i = 0; i < 16; i++) { ++ cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; ++ cs->kvm_run->s.regs.crs[i] = env->cregs[i]; ++ } ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; ++ } else { ++ for (i = 0; i < 16; i++) { ++ sregs.acrs[i] = env->aregs[i]; ++ sregs.crs[i] = env->cregs[i]; ++ } ++ r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); ++ if (r < 0) { ++ return r; ++ } ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_GSCB)) { ++ memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_BPBC)) { ++ cs->kvm_run->s.regs.bpbc = env->bpbc; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { ++ cs->kvm_run->s.regs.etoken = env->etoken; ++ cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { ++ cs->kvm_run->s.regs.diag318 = env->diag318_info; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; ++ } ++ ++ /* Finally the prefix */ ++ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { ++ cs->kvm_run->s.regs.prefix = env->psa; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; ++ } else { ++ /* prefix is only supported via sync regs */ ++ } ++ return 0; ++} ++ ++int kvm_arch_get_registers(CPUState *cs) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ struct kvm_sregs sregs; ++ struct kvm_regs regs; ++ struct kvm_fpu fpu; ++ int i, r; ++ ++ /* get the PSW */ ++ env->psw.addr = cs->kvm_run->psw_addr; ++ env->psw.mask = cs->kvm_run->psw_mask; ++ ++ /* the GPRS */ ++ if (can_sync_regs(cs, KVM_SYNC_GPRS)) { ++ for (i = 0; i < 16; i++) { ++ env->regs[i] = cs->kvm_run->s.regs.gprs[i]; ++ } ++ } else { ++ r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); ++ if (r < 0) { ++ return r; ++ } ++ for (i = 0; i < 16; i++) { ++ env->regs[i] = regs.gprs[i]; ++ } ++ } ++ ++ /* The ACRS and CRS */ ++ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { ++ for (i = 0; i < 16; i++) { ++ env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; ++ env->cregs[i] = cs->kvm_run->s.regs.crs[i]; ++ } ++ } else { ++ r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); ++ if (r < 0) { ++ return r; ++ } ++ for (i = 0; i < 16; i++) { ++ env->aregs[i] = sregs.acrs[i]; ++ env->cregs[i] = sregs.crs[i]; ++ } ++ } ++ ++ /* Floating point and vector registers */ ++ if (can_sync_regs(cs, KVM_SYNC_VRS)) { ++ for (i = 0; i < 32; i++) { ++ env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; ++ env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; ++ } ++ env->fpc = cs->kvm_run->s.regs.fpc; ++ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { ++ for (i = 0; i < 16; i++) { ++ *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; ++ } ++ env->fpc = cs->kvm_run->s.regs.fpc; ++ } else { ++ r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); ++ if (r < 0) { ++ return r; ++ } ++ for (i = 0; i < 16; i++) { ++ *get_freg(env, i) = fpu.fprs[i]; ++ } ++ env->fpc = fpu.fpc; ++ } ++ ++ /* The prefix */ ++ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { ++ env->psa = cs->kvm_run->s.regs.prefix; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { ++ env->cputm = cs->kvm_run->s.regs.cputm; ++ env->ckc = cs->kvm_run->s.regs.ckc; ++ env->todpr = cs->kvm_run->s.regs.todpr; ++ env->gbea = cs->kvm_run->s.regs.gbea; ++ env->pp = cs->kvm_run->s.regs.pp; ++ } else { ++ /* ++ * These ONE_REGS are not protected by a capability. As they are only ++ * necessary for migration we just trace a possible error, but don't ++ * return with an error return code. ++ */ ++ kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); ++ kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); ++ kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); ++ kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); ++ kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_RICCB)) { ++ memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_GSCB)) { ++ memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_BPBC)) { ++ env->bpbc = cs->kvm_run->s.regs.bpbc; ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { ++ env->etoken = cs->kvm_run->s.regs.etoken; ++ env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; ++ } ++ ++ /* pfault parameters */ ++ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { ++ env->pfault_token = cs->kvm_run->s.regs.pft; ++ env->pfault_select = cs->kvm_run->s.regs.pfs; ++ env->pfault_compare = cs->kvm_run->s.regs.pfc; ++ } else if (cap_async_pf) { ++ r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); ++ if (r < 0) { ++ return r; ++ } ++ r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); ++ if (r < 0) { ++ return r; ++ } ++ r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); ++ if (r < 0) { ++ return r; ++ } ++ } ++ ++ if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { ++ env->diag318_info = cs->kvm_run->s.regs.diag318; ++ } ++ ++ return 0; ++} ++ ++int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) ++{ ++ int r; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_TOD, ++ .attr = KVM_S390_VM_TOD_LOW, ++ .addr = (uint64_t)tod_low, ++ }; ++ ++ r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++ if (r) { ++ return r; ++ } ++ ++ attr.attr = KVM_S390_VM_TOD_HIGH; ++ attr.addr = (uint64_t)tod_high; ++ return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++} ++ ++int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) ++{ ++ int r; ++ struct kvm_s390_vm_tod_clock gtod; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_TOD, ++ .attr = KVM_S390_VM_TOD_EXT, ++ .addr = (uint64_t)>od, ++ }; ++ ++ r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++ *tod_high = gtod.epoch_idx; ++ *tod_low = gtod.tod; ++ ++ return r; ++} ++ ++int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) ++{ ++ int r; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_TOD, ++ .attr = KVM_S390_VM_TOD_LOW, ++ .addr = (uint64_t)&tod_low, ++ }; ++ ++ r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++ if (r) { ++ return r; ++ } ++ ++ attr.attr = KVM_S390_VM_TOD_HIGH; ++ attr.addr = (uint64_t)&tod_high; ++ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++} ++ ++int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) ++{ ++ struct kvm_s390_vm_tod_clock gtod = { ++ .epoch_idx = tod_high, ++ .tod = tod_low, ++ }; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_TOD, ++ .attr = KVM_S390_VM_TOD_EXT, ++ .addr = (uint64_t)>od, ++ }; ++ ++ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++} ++ ++/** ++ * kvm_s390_mem_op: ++ * @addr: the logical start address in guest memory ++ * @ar: the access register number ++ * @hostbuf: buffer in host memory. NULL = do only checks w/o copying ++ * @len: length that should be transferred ++ * @is_write: true = write, false = read ++ * Returns: 0 on success, non-zero if an exception or error occurred ++ * ++ * Use KVM ioctl to read/write from/to guest memory. An access exception ++ * is injected into the vCPU in case of translation errors. ++ */ ++int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, ++ int len, bool is_write) ++{ ++ struct kvm_s390_mem_op mem_op = { ++ .gaddr = addr, ++ .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, ++ .size = len, ++ .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE ++ : KVM_S390_MEMOP_LOGICAL_READ, ++ .buf = (uint64_t)hostbuf, ++ .ar = ar, ++ }; ++ int ret; ++ ++ if (!cap_mem_op) { ++ return -ENOSYS; ++ } ++ if (!hostbuf) { ++ mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; ++ } ++ ++ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); ++ if (ret < 0) { ++ warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); ++ } ++ return ret; ++} ++ ++int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, ++ int len, bool is_write) ++{ ++ struct kvm_s390_mem_op mem_op = { ++ .sida_offset = offset, ++ .size = len, ++ .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE ++ : KVM_S390_MEMOP_SIDA_READ, ++ .buf = (uint64_t)hostbuf, ++ }; ++ int ret; ++ ++ if (!cap_mem_op || !cap_protected) { ++ return -ENOSYS; ++ } ++ ++ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); ++ if (ret < 0) { ++ error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); ++ abort(); ++ } ++ return ret; ++} ++ ++static uint8_t const *sw_bp_inst; ++static uint8_t sw_bp_ilen; ++ ++static void determine_sw_breakpoint_instr(void) ++{ ++ /* DIAG 501 is used for sw breakpoints with old kernels */ ++ static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; ++ /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ ++ static const uint8_t instr_0x0000[] = {0x00, 0x00}; ++ ++ if (sw_bp_inst) { ++ return; ++ } ++ if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { ++ sw_bp_inst = diag_501; ++ sw_bp_ilen = sizeof(diag_501); ++ DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); ++ } else { ++ sw_bp_inst = instr_0x0000; ++ sw_bp_ilen = sizeof(instr_0x0000); ++ DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); ++ } ++} ++ ++int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) ++{ ++ determine_sw_breakpoint_instr(); ++ ++ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, ++ sw_bp_ilen, 0) || ++ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) ++{ ++ uint8_t t[MAX_ILEN]; ++ ++ if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { ++ return -EINVAL; ++ } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { ++ return -EINVAL; ++ } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, ++ sw_bp_ilen, 1)) { ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, ++ int len, int type) ++{ ++ int n; ++ ++ for (n = 0; n < nb_hw_breakpoints; n++) { ++ if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && ++ (hw_breakpoints[n].len == len || len == -1)) { ++ return &hw_breakpoints[n]; ++ } ++ } ++ ++ return NULL; ++} ++ ++static int insert_hw_breakpoint(target_ulong addr, int len, int type) ++{ ++ int size; ++ ++ if (find_hw_breakpoint(addr, len, type)) { ++ return -EEXIST; ++ } ++ ++ size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); ++ ++ if (!hw_breakpoints) { ++ nb_hw_breakpoints = 0; ++ hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); ++ } else { ++ hw_breakpoints = ++ (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); ++ } ++ ++ if (!hw_breakpoints) { ++ nb_hw_breakpoints = 0; ++ return -ENOMEM; ++ } ++ ++ hw_breakpoints[nb_hw_breakpoints].addr = addr; ++ hw_breakpoints[nb_hw_breakpoints].len = len; ++ hw_breakpoints[nb_hw_breakpoints].type = type; ++ ++ nb_hw_breakpoints++; ++ ++ return 0; ++} ++ ++int kvm_arch_insert_hw_breakpoint(target_ulong addr, ++ target_ulong len, int type) ++{ ++ switch (type) { ++ case GDB_BREAKPOINT_HW: ++ type = KVM_HW_BP; ++ break; ++ case GDB_WATCHPOINT_WRITE: ++ if (len < 1) { ++ return -EINVAL; ++ } ++ type = KVM_HW_WP_WRITE; ++ break; ++ default: ++ return -ENOSYS; ++ } ++ return insert_hw_breakpoint(addr, len, type); ++} ++ ++int kvm_arch_remove_hw_breakpoint(target_ulong addr, ++ target_ulong len, int type) ++{ ++ int size; ++ struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); ++ ++ if (bp == NULL) { ++ return -ENOENT; ++ } ++ ++ nb_hw_breakpoints--; ++ if (nb_hw_breakpoints > 0) { ++ /* ++ * In order to trim the array, move the last element to the position to ++ * be removed - if necessary. ++ */ ++ if (bp != &hw_breakpoints[nb_hw_breakpoints]) { ++ *bp = hw_breakpoints[nb_hw_breakpoints]; ++ } ++ size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); ++ hw_breakpoints = ++ (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); ++ } else { ++ g_free(hw_breakpoints); ++ hw_breakpoints = NULL; ++ } ++ ++ return 0; ++} ++ ++void kvm_arch_remove_all_hw_breakpoints(void) ++{ ++ nb_hw_breakpoints = 0; ++ g_free(hw_breakpoints); ++ hw_breakpoints = NULL; ++} ++ ++void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) ++{ ++ int i; ++ ++ if (nb_hw_breakpoints > 0) { ++ dbg->arch.nr_hw_bp = nb_hw_breakpoints; ++ dbg->arch.hw_bp = hw_breakpoints; ++ ++ for (i = 0; i < nb_hw_breakpoints; ++i) { ++ hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, ++ hw_breakpoints[i].addr); ++ } ++ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; ++ } else { ++ dbg->arch.nr_hw_bp = 0; ++ dbg->arch.hw_bp = NULL; ++ } ++} ++ ++void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) ++{ ++} ++ ++MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) ++{ ++ return MEMTXATTRS_UNSPECIFIED; ++} ++ ++int kvm_arch_process_async_events(CPUState *cs) ++{ ++ return cs->halted; ++} ++ ++static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, ++ struct kvm_s390_interrupt *interrupt) ++{ ++ int r = 0; ++ ++ interrupt->type = irq->type; ++ switch (irq->type) { ++ case KVM_S390_INT_VIRTIO: ++ interrupt->parm = irq->u.ext.ext_params; ++ /* fall through */ ++ case KVM_S390_INT_PFAULT_INIT: ++ case KVM_S390_INT_PFAULT_DONE: ++ interrupt->parm64 = irq->u.ext.ext_params2; ++ break; ++ case KVM_S390_PROGRAM_INT: ++ interrupt->parm = irq->u.pgm.code; ++ break; ++ case KVM_S390_SIGP_SET_PREFIX: ++ interrupt->parm = irq->u.prefix.address; ++ break; ++ case KVM_S390_INT_SERVICE: ++ interrupt->parm = irq->u.ext.ext_params; ++ break; ++ case KVM_S390_MCHK: ++ interrupt->parm = irq->u.mchk.cr14; ++ interrupt->parm64 = irq->u.mchk.mcic; ++ break; ++ case KVM_S390_INT_EXTERNAL_CALL: ++ interrupt->parm = irq->u.extcall.code; ++ break; ++ case KVM_S390_INT_EMERGENCY: ++ interrupt->parm = irq->u.emerg.code; ++ break; ++ case KVM_S390_SIGP_STOP: ++ case KVM_S390_RESTART: ++ break; /* These types have no parameters */ ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ interrupt->parm = irq->u.io.subchannel_id << 16; ++ interrupt->parm |= irq->u.io.subchannel_nr; ++ interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; ++ interrupt->parm64 |= irq->u.io.io_int_word; ++ break; ++ default: ++ r = -EINVAL; ++ break; ++ } ++ return r; ++} ++ ++static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) ++{ ++ struct kvm_s390_interrupt kvmint = {}; ++ int r; ++ ++ r = s390_kvm_irq_to_interrupt(irq, &kvmint); ++ if (r < 0) { ++ fprintf(stderr, "%s called with bogus interrupt\n", __func__); ++ exit(1); ++ } ++ ++ r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); ++ if (r < 0) { ++ fprintf(stderr, "KVM failed to inject interrupt\n"); ++ exit(1); ++ } ++} ++ ++void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) ++{ ++ CPUState *cs = CPU(cpu); ++ int r; ++ ++ if (cap_s390_irq) { ++ r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); ++ if (!r) { ++ return; ++ } ++ error_report("KVM failed to inject interrupt %llx", irq->type); ++ exit(1); ++ } ++ ++ inject_vcpu_irq_legacy(cs, irq); ++} ++ ++void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) ++{ ++ struct kvm_s390_interrupt kvmint = {}; ++ int r; ++ ++ r = s390_kvm_irq_to_interrupt(irq, &kvmint); ++ if (r < 0) { ++ fprintf(stderr, "%s called with bogus interrupt\n", __func__); ++ exit(1); ++ } ++ ++ r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); ++ if (r < 0) { ++ fprintf(stderr, "KVM failed to inject interrupt\n"); ++ exit(1); ++ } ++} ++ ++void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) ++{ ++ struct kvm_s390_irq irq = { ++ .type = KVM_S390_PROGRAM_INT, ++ .u.pgm.code = code, ++ }; ++ qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", ++ cpu->env.psw.addr); ++ kvm_s390_vcpu_interrupt(cpu, &irq); ++} ++ ++void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) ++{ ++ struct kvm_s390_irq irq = { ++ .type = KVM_S390_PROGRAM_INT, ++ .u.pgm.code = code, ++ .u.pgm.trans_exc_code = te_code, ++ .u.pgm.exc_access_id = te_code & 3, ++ }; ++ ++ kvm_s390_vcpu_interrupt(cpu, &irq); ++} ++ ++static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, ++ uint16_t ipbh0) ++{ ++ CPUS390XState *env = &cpu->env; ++ uint64_t sccb; ++ uint32_t code; ++ int r; ++ ++ sccb = env->regs[ipbh0 & 0xf]; ++ code = env->regs[(ipbh0 & 0xf0) >> 4]; ++ ++ switch (run->s390_sieic.icptcode) { ++ case ICPT_PV_INSTR_NOTIFICATION: ++ g_assert(s390_is_pv()); ++ /* The notification intercepts are currently handled by KVM */ ++ error_report("unexpected SCLP PV notification"); ++ exit(1); ++ break; ++ case ICPT_PV_INSTR: ++ g_assert(s390_is_pv()); ++ sclp_service_call_protected(env, sccb, code); ++ /* Setting the CC is done by the Ultravisor. */ ++ break; ++ case ICPT_INSTRUCTION: ++ g_assert(!s390_is_pv()); ++ r = sclp_service_call(env, sccb, code); ++ if (r < 0) { ++ kvm_s390_program_interrupt(cpu, -r); ++ return; ++ } ++ setcc(cpu, r); ++ } ++} ++ ++static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) ++{ ++ CPUS390XState *env = &cpu->env; ++ int rc = 0; ++ uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; ++ ++ switch (ipa1) { ++ case PRIV_B2_XSCH: ++ ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_CSCH: ++ ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_HSCH: ++ ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_MSCH: ++ ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_SSCH: ++ ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_STCRW: ++ ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_STSCH: ++ ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_TSCH: ++ /* We should only get tsch via KVM_EXIT_S390_TSCH. */ ++ fprintf(stderr, "Spurious tsch intercept\n"); ++ break; ++ case PRIV_B2_CHSC: ++ ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_TPI: ++ /* This should have been handled by kvm already. */ ++ fprintf(stderr, "Spurious tpi intercept\n"); ++ break; ++ case PRIV_B2_SCHM: ++ ioinst_handle_schm(cpu, env->regs[1], env->regs[2], ++ run->s390_sieic.ipb, RA_IGNORED); ++ break; ++ case PRIV_B2_RSCH: ++ ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_RCHP: ++ ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_STCPS: ++ /* We do not provide this instruction, it is suppressed. */ ++ break; ++ case PRIV_B2_SAL: ++ ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); ++ break; ++ case PRIV_B2_SIGA: ++ /* Not provided, set CC = 3 for subchannel not operational */ ++ setcc(cpu, 3); ++ break; ++ case PRIV_B2_SCLP_CALL: ++ kvm_sclp_service_call(cpu, run, ipbh0); ++ break; ++ default: ++ rc = -1; ++ DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); ++ break; ++ } ++ ++ return rc; ++} ++ ++static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, ++ uint8_t *ar) ++{ ++ CPUS390XState *env = &cpu->env; ++ uint32_t x2 = (run->s390_sieic.ipa & 0x000f); ++ uint32_t base2 = run->s390_sieic.ipb >> 28; ++ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + ++ ((run->s390_sieic.ipb & 0xff00) << 4); ++ ++ if (disp2 & 0x80000) { ++ disp2 += 0xfff00000; ++ } ++ if (ar) { ++ *ar = base2; ++ } ++ ++ return (base2 ? env->regs[base2] : 0) + ++ (x2 ? env->regs[x2] : 0) + (long)(int)disp2; ++} ++ ++static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, ++ uint8_t *ar) ++{ ++ CPUS390XState *env = &cpu->env; ++ uint32_t base2 = run->s390_sieic.ipb >> 28; ++ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + ++ ((run->s390_sieic.ipb & 0xff00) << 4); ++ ++ if (disp2 & 0x80000) { ++ disp2 += 0xfff00000; ++ } ++ if (ar) { ++ *ar = base2; ++ } ++ ++ return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; ++} ++ ++static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ return clp_service_call(cpu, r2, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; ++ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ return pcilg_service_call(cpu, r1, r2, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; ++ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ return pcistg_service_call(cpu, r1, r2, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ uint64_t fiba; ++ uint8_t ar; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ fiba = get_base_disp_rxy(cpu, run, &ar); ++ ++ return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ CPUS390XState *env = &cpu->env; ++ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ uint8_t r3 = run->s390_sieic.ipa & 0x000f; ++ uint8_t isc; ++ uint16_t mode; ++ int r; ++ ++ mode = env->regs[r1] & 0xffff; ++ isc = (env->regs[r3] >> 27) & 0x7; ++ r = css_do_sic(env, isc, mode); ++ if (r) { ++ kvm_s390_program_interrupt(cpu, -r); ++ } ++ ++ return 0; ++} ++ ++static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; ++ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ return rpcit_service_call(cpu, r1, r2, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ uint8_t r3 = run->s390_sieic.ipa & 0x000f; ++ uint64_t gaddr; ++ uint8_t ar; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ gaddr = get_base_disp_rsy(cpu, run, &ar); ++ ++ return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ uint64_t fiba; ++ uint8_t ar; ++ ++ if (s390_has_feat(S390_FEAT_ZPCI)) { ++ fiba = get_base_disp_rxy(cpu, run, &ar); ++ ++ return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); ++ } else { ++ return -1; ++ } ++} ++ ++static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) ++{ ++ int r = 0; ++ ++ switch (ipa1) { ++ case PRIV_B9_CLP: ++ r = kvm_clp_service_call(cpu, run); ++ break; ++ case PRIV_B9_PCISTG: ++ r = kvm_pcistg_service_call(cpu, run); ++ break; ++ case PRIV_B9_PCILG: ++ r = kvm_pcilg_service_call(cpu, run); ++ break; ++ case PRIV_B9_RPCIT: ++ r = kvm_rpcit_service_call(cpu, run); ++ break; ++ case PRIV_B9_EQBS: ++ /* just inject exception */ ++ r = -1; ++ break; ++ default: ++ r = -1; ++ DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); ++ break; ++ } ++ ++ return r; ++} ++ ++static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) ++{ ++ int r = 0; ++ ++ switch (ipbl) { ++ case PRIV_EB_PCISTB: ++ r = kvm_pcistb_service_call(cpu, run); ++ break; ++ case PRIV_EB_SIC: ++ r = kvm_sic_service_call(cpu, run); ++ break; ++ case PRIV_EB_SQBS: ++ /* just inject exception */ ++ r = -1; ++ break; ++ default: ++ r = -1; ++ DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); ++ break; ++ } ++ ++ return r; ++} ++ ++static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) ++{ ++ int r = 0; ++ ++ switch (ipbl) { ++ case PRIV_E3_MPCIFC: ++ r = kvm_mpcifc_service_call(cpu, run); ++ break; ++ case PRIV_E3_STPCIFC: ++ r = kvm_stpcifc_service_call(cpu, run); ++ break; ++ default: ++ r = -1; ++ DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); ++ break; ++ } ++ ++ return r; ++} ++ ++static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) ++{ ++ CPUS390XState *env = &cpu->env; ++ int ret; ++ ++ ret = s390_virtio_hypercall(env); ++ if (ret == -EINVAL) { ++ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); ++ return 0; ++ } ++ ++ return ret; ++} ++ ++static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint64_t r1, r3; ++ int rc; ++ ++ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ r3 = run->s390_sieic.ipa & 0x000f; ++ rc = handle_diag_288(&cpu->env, r1, r3); ++ if (rc) { ++ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); ++ } ++} ++ ++static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint64_t r1, r3; ++ ++ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ r3 = run->s390_sieic.ipa & 0x000f; ++ handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); ++} ++ ++static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) ++{ ++ CPUS390XState *env = &cpu->env; ++ unsigned long pc; ++ ++ pc = env->psw.addr - sw_bp_ilen; ++ if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { ++ env->psw.addr = pc; ++ return EXCP_DEBUG; ++ } ++ ++ return -ENOENT; ++} ++ ++void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) ++{ ++ CPUS390XState *env = &S390_CPU(cs)->env; ++ ++ /* Feat bit is set only if KVM supports sync for diag318 */ ++ if (s390_has_feat(S390_FEAT_DIAG_318)) { ++ env->diag318_info = diag318_info; ++ cs->kvm_run->s.regs.diag318 = diag318_info; ++ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; ++ } ++} ++ ++static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) ++{ ++ uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; ++ uint64_t diag318_info = run->s.regs.gprs[reg]; ++ CPUState *t; ++ ++ /* ++ * DIAG 318 can only be enabled with KVM support. As such, let's ++ * ensure a guest cannot execute this instruction erroneously. ++ */ ++ if (!s390_has_feat(S390_FEAT_DIAG_318)) { ++ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); ++ return; ++ } ++ ++ CPU_FOREACH(t) { ++ run_on_cpu(t, s390_do_cpu_set_diag318, ++ RUN_ON_CPU_HOST_ULONG(diag318_info)); ++ } ++} ++ ++#define DIAG_KVM_CODE_MASK 0x000000000000ffff ++ ++static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) ++{ ++ int r = 0; ++ uint16_t func_code; ++ ++ /* ++ * For any diagnose call we support, bits 48-63 of the resulting ++ * address specify the function code; the remainder is ignored. ++ */ ++ func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; ++ switch (func_code) { ++ case DIAG_TIMEREVENT: ++ kvm_handle_diag_288(cpu, run); ++ break; ++ case DIAG_IPL: ++ kvm_handle_diag_308(cpu, run); ++ break; ++ case DIAG_SET_CONTROL_PROGRAM_CODES: ++ handle_diag_318(cpu, run); ++ break; ++ case DIAG_KVM_HYPERCALL: ++ r = handle_hypercall(cpu, run); ++ break; ++ case DIAG_KVM_BREAKPOINT: ++ r = handle_sw_breakpoint(cpu, run); ++ break; ++ default: ++ DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); ++ kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); ++ break; ++ } ++ ++ return r; ++} ++ ++static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) ++{ ++ CPUS390XState *env = &cpu->env; ++ const uint8_t r1 = ipa1 >> 4; ++ const uint8_t r3 = ipa1 & 0x0f; ++ int ret; ++ uint8_t order; ++ ++ /* get order code */ ++ order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; ++ ++ ret = handle_sigp(env, order, r1, r3); ++ setcc(cpu, ret); ++ return 0; ++} ++ ++static int handle_instruction(S390CPU *cpu, struct kvm_run *run) ++{ ++ unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); ++ uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; ++ int r = -1; ++ ++ DPRINTF("handle_instruction 0x%x 0x%x\n", ++ run->s390_sieic.ipa, run->s390_sieic.ipb); ++ switch (ipa0) { ++ case IPA0_B2: ++ r = handle_b2(cpu, run, ipa1); ++ break; ++ case IPA0_B9: ++ r = handle_b9(cpu, run, ipa1); ++ break; ++ case IPA0_EB: ++ r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); ++ break; ++ case IPA0_E3: ++ r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); ++ break; ++ case IPA0_DIAG: ++ r = handle_diag(cpu, run, run->s390_sieic.ipb); ++ break; ++ case IPA0_SIGP: ++ r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); ++ break; ++ } ++ ++ if (r < 0) { ++ r = 0; ++ kvm_s390_program_interrupt(cpu, PGM_OPERATION); ++ } ++ ++ return r; ++} ++ ++static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, ++ int pswoffset) ++{ ++ CPUState *cs = CPU(cpu); ++ ++ s390_cpu_halt(cpu); ++ cpu->env.crash_reason = reason; ++ qemu_system_guest_panicked(cpu_get_crash_info(cs)); ++} ++ ++/* try to detect pgm check loops */ ++static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) ++{ ++ CPUState *cs = CPU(cpu); ++ PSW oldpsw, newpsw; ++ ++ newpsw.mask = ldq_phys(cs->as, cpu->env.psa + ++ offsetof(LowCore, program_new_psw)); ++ newpsw.addr = ldq_phys(cs->as, cpu->env.psa + ++ offsetof(LowCore, program_new_psw) + 8); ++ oldpsw.mask = run->psw_mask; ++ oldpsw.addr = run->psw_addr; ++ /* ++ * Avoid endless loops of operation exceptions, if the pgm new ++ * PSW will cause a new operation exception. ++ * The heuristic checks if the pgm new psw is within 6 bytes before ++ * the faulting psw address (with same DAT, AS settings) and the ++ * new psw is not a wait psw and the fault was not triggered by ++ * problem state. In that case go into crashed state. ++ */ ++ ++ if (oldpsw.addr - newpsw.addr <= 6 && ++ !(newpsw.mask & PSW_MASK_WAIT) && ++ !(oldpsw.mask & PSW_MASK_PSTATE) && ++ (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && ++ (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { ++ unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, ++ offsetof(LowCore, program_new_psw)); ++ return EXCP_HALTED; ++ } ++ return 0; ++} ++ ++static int handle_intercept(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ struct kvm_run *run = cs->kvm_run; ++ int icpt_code = run->s390_sieic.icptcode; ++ int r = 0; ++ ++ DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, (long)run->psw_addr); ++ switch (icpt_code) { ++ case ICPT_INSTRUCTION: ++ case ICPT_PV_INSTR: ++ case ICPT_PV_INSTR_NOTIFICATION: ++ r = handle_instruction(cpu, run); ++ break; ++ case ICPT_PROGRAM: ++ unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, ++ offsetof(LowCore, program_new_psw)); ++ r = EXCP_HALTED; ++ break; ++ case ICPT_EXT_INT: ++ unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, ++ offsetof(LowCore, external_new_psw)); ++ r = EXCP_HALTED; ++ break; ++ case ICPT_WAITPSW: ++ /* disabled wait, since enabled wait is handled in kernel */ ++ s390_handle_wait(cpu); ++ r = EXCP_HALTED; ++ break; ++ case ICPT_CPU_STOP: ++ do_stop_interrupt(&cpu->env); ++ r = EXCP_HALTED; ++ break; ++ case ICPT_OPEREXC: ++ /* check for break points */ ++ r = handle_sw_breakpoint(cpu, run); ++ if (r == -ENOENT) { ++ /* Then check for potential pgm check loops */ ++ r = handle_oper_loop(cpu, run); ++ if (r == 0) { ++ kvm_s390_program_interrupt(cpu, PGM_OPERATION); ++ } ++ } ++ break; ++ case ICPT_SOFT_INTERCEPT: ++ fprintf(stderr, "KVM unimplemented icpt SOFT\n"); ++ exit(1); ++ break; ++ case ICPT_IO: ++ fprintf(stderr, "KVM unimplemented icpt IO\n"); ++ exit(1); ++ break; ++ default: ++ fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); ++ exit(1); ++ break; ++ } ++ ++ return r; ++} ++ ++static int handle_tsch(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ struct kvm_run *run = cs->kvm_run; ++ int ret; ++ ++ ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, ++ RA_IGNORED); ++ if (ret < 0) { ++ /* ++ * Failure. ++ * If an I/O interrupt had been dequeued, we have to reinject it. ++ */ ++ if (run->s390_tsch.dequeued) { ++ s390_io_interrupt(run->s390_tsch.subchannel_id, ++ run->s390_tsch.subchannel_nr, ++ run->s390_tsch.io_int_parm, ++ run->s390_tsch.io_int_word); ++ } ++ ret = 0; ++ } ++ return ret; ++} ++ ++static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) ++{ ++ const MachineState *ms = MACHINE(qdev_get_machine()); ++ uint16_t conf_cpus = 0, reserved_cpus = 0; ++ SysIB_322 sysib; ++ int del, i; ++ ++ if (s390_is_pv()) { ++ s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); ++ } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { ++ return; ++ } ++ /* Shift the stack of Extended Names to prepare for our own data */ ++ memmove(&sysib.ext_names[1], &sysib.ext_names[0], ++ sizeof(sysib.ext_names[0]) * (sysib.count - 1)); ++ /* First virt level, that doesn't provide Ext Names delimits stack. It is ++ * assumed it's not capable of managing Extended Names for lower levels. ++ */ ++ for (del = 1; del < sysib.count; del++) { ++ if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { ++ break; ++ } ++ } ++ if (del < sysib.count) { ++ memset(sysib.ext_names[del], 0, ++ sizeof(sysib.ext_names[0]) * (sysib.count - del)); ++ } ++ ++ /* count the cpus and split them into configured and reserved ones */ ++ for (i = 0; i < ms->possible_cpus->len; i++) { ++ if (ms->possible_cpus->cpus[i].cpu) { ++ conf_cpus++; ++ } else { ++ reserved_cpus++; ++ } ++ } ++ sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; ++ sysib.vm[0].conf_cpus = conf_cpus; ++ sysib.vm[0].reserved_cpus = reserved_cpus; ++ ++ /* Insert short machine name in EBCDIC, padded with blanks */ ++ if (qemu_name) { ++ memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); ++ ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), ++ strlen(qemu_name))); ++ } ++ sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ ++ /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's ++ * considered by s390 as not capable of providing any Extended Name. ++ * Therefore if no name was specified on qemu invocation, we go with the ++ * same "KVMguest" default, which KVM has filled into short name field. ++ */ ++ strpadcpy((char *)sysib.ext_names[0], ++ sizeof(sysib.ext_names[0]), ++ qemu_name ?: "KVMguest", '\0'); ++ ++ /* Insert UUID */ ++ memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); ++ ++ if (s390_is_pv()) { ++ s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); ++ } else { ++ s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); ++ } ++} ++ ++static int handle_stsi(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ struct kvm_run *run = cs->kvm_run; ++ ++ switch (run->s390_stsi.fc) { ++ case 3: ++ if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { ++ return 0; ++ } ++ /* Only sysib 3.2.2 needs post-handling for now. */ ++ insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); ++ return 0; ++ default: ++ return 0; ++ } ++} ++ ++static int kvm_arch_handle_debug_exit(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ struct kvm_run *run = cs->kvm_run; ++ ++ int ret = 0; ++ struct kvm_debug_exit_arch *arch_info = &run->debug.arch; ++ ++ switch (arch_info->type) { ++ case KVM_HW_WP_WRITE: ++ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { ++ cs->watchpoint_hit = &hw_watchpoint; ++ hw_watchpoint.vaddr = arch_info->addr; ++ hw_watchpoint.flags = BP_MEM_WRITE; ++ ret = EXCP_DEBUG; ++ } ++ break; ++ case KVM_HW_BP: ++ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { ++ ret = EXCP_DEBUG; ++ } ++ break; ++ case KVM_SINGLESTEP: ++ if (cs->singlestep_enabled) { ++ ret = EXCP_DEBUG; ++ } ++ break; ++ default: ++ ret = -ENOSYS; ++ } ++ ++ return ret; ++} ++ ++int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ int ret = 0; ++ ++ qemu_mutex_lock_iothread(); ++ ++ kvm_cpu_synchronize_state(cs); ++ ++ switch (run->exit_reason) { ++ case KVM_EXIT_S390_SIEIC: ++ ret = handle_intercept(cpu); ++ break; ++ case KVM_EXIT_S390_RESET: ++ s390_ipl_reset_request(cs, S390_RESET_REIPL); ++ break; ++ case KVM_EXIT_S390_TSCH: ++ ret = handle_tsch(cpu); ++ break; ++ case KVM_EXIT_S390_STSI: ++ ret = handle_stsi(cpu); ++ break; ++ case KVM_EXIT_DEBUG: ++ ret = kvm_arch_handle_debug_exit(cpu); ++ break; ++ default: ++ fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); ++ break; ++ } ++ qemu_mutex_unlock_iothread(); ++ ++ if (ret == 0) { ++ ret = EXCP_INTERRUPT; ++ } ++ return ret; ++} ++ ++bool kvm_arch_stop_on_emulation_error(CPUState *cpu) ++{ ++ return true; ++} ++ ++void kvm_s390_enable_css_support(S390CPU *cpu) ++{ ++ int r; ++ ++ /* Activate host kernel channel subsystem support. */ ++ r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); ++ assert(r == 0); ++} ++ ++void kvm_arch_init_irq_routing(KVMState *s) ++{ ++ /* ++ * Note that while irqchip capabilities generally imply that cpustates ++ * are handled in-kernel, it is not true for s390 (yet); therefore, we ++ * have to override the common code kvm_halt_in_kernel_allowed setting. ++ */ ++ if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { ++ kvm_gsi_routing_allowed = true; ++ kvm_halt_in_kernel_allowed = false; ++ } ++} ++ ++int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, ++ int vq, bool assign) ++{ ++ struct kvm_ioeventfd kick = { ++ .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | ++ KVM_IOEVENTFD_FLAG_DATAMATCH, ++ .fd = event_notifier_get_fd(notifier), ++ .datamatch = vq, ++ .addr = sch, ++ .len = 8, ++ }; ++ trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, ++ kick.datamatch); ++ if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { ++ return -ENOSYS; ++ } ++ if (!assign) { ++ kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; ++ } ++ return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); ++} ++ ++int kvm_s390_get_ri(void) ++{ ++ return cap_ri; ++} ++ ++int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) ++{ ++ struct kvm_mp_state mp_state = {}; ++ int ret; ++ ++ /* the kvm part might not have been initialized yet */ ++ if (CPU(cpu)->kvm_state == NULL) { ++ return 0; ++ } ++ ++ switch (cpu_state) { ++ case S390_CPU_STATE_STOPPED: ++ mp_state.mp_state = KVM_MP_STATE_STOPPED; ++ break; ++ case S390_CPU_STATE_CHECK_STOP: ++ mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; ++ break; ++ case S390_CPU_STATE_OPERATING: ++ mp_state.mp_state = KVM_MP_STATE_OPERATING; ++ break; ++ case S390_CPU_STATE_LOAD: ++ mp_state.mp_state = KVM_MP_STATE_LOAD; ++ break; ++ default: ++ error_report("Requested CPU state is not a valid S390 CPU state: %u", ++ cpu_state); ++ exit(1); ++ } ++ ++ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); ++ if (ret) { ++ trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, ++ strerror(-ret)); ++ } ++ ++ return ret; ++} ++ ++void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) ++{ ++ unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; ++ struct kvm_s390_irq_state irq_state = { ++ .buf = (uint64_t) cpu->irqstate, ++ .len = VCPU_IRQ_BUF_SIZE(max_cpus), ++ }; ++ CPUState *cs = CPU(cpu); ++ int32_t bytes; ++ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { ++ return; ++ } ++ ++ bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); ++ if (bytes < 0) { ++ cpu->irqstate_saved_size = 0; ++ error_report("Migration of interrupt state failed"); ++ return; ++ } ++ ++ cpu->irqstate_saved_size = bytes; ++} ++ ++int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ struct kvm_s390_irq_state irq_state = { ++ .buf = (uint64_t) cpu->irqstate, ++ .len = cpu->irqstate_saved_size, ++ }; ++ int r; ++ ++ if (cpu->irqstate_saved_size == 0) { ++ return 0; ++ } ++ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { ++ return -ENOSYS; ++ } ++ ++ r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); ++ if (r) { ++ error_report("Setting interrupt state failed %d", r); ++ } ++ return r; ++} ++ ++int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, ++ uint64_t address, uint32_t data, PCIDevice *dev) ++{ ++ S390PCIBusDevice *pbdev; ++ uint32_t vec = data & ZPCI_MSI_VEC_MASK; ++ ++ if (!dev) { ++ DPRINTF("add_msi_route no pci device\n"); ++ return -ENODEV; ++ } ++ ++ pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); ++ if (!pbdev) { ++ DPRINTF("add_msi_route no zpci device\n"); ++ return -ENODEV; ++ } ++ ++ route->type = KVM_IRQ_ROUTING_S390_ADAPTER; ++ route->flags = 0; ++ route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; ++ route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; ++ route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; ++ route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; ++ route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; ++ return 0; ++} ++ ++int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, ++ int vector, PCIDevice *dev) ++{ ++ return 0; ++} ++ ++int kvm_arch_release_virq_post(int virq) ++{ ++ return 0; ++} ++ ++int kvm_arch_msi_data_to_gsi(uint32_t data) ++{ ++ abort(); ++} ++ ++static int query_cpu_subfunc(S390FeatBitmap features) ++{ ++ struct kvm_s390_vm_cpu_subfunc prop = {}; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, ++ .addr = (uint64_t) &prop, ++ }; ++ int rc; ++ ++ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++ if (rc) { ++ return rc; ++ } ++ ++ /* ++ * We're going to add all subfunctions now, if the corresponding feature ++ * is available that unlocks the query functions. ++ */ ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); ++ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); ++ } ++ if (test_bit(S390_FEAT_MSA, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_3, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_4, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_5, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_8, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_9, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); ++ } ++ if (test_bit(S390_FEAT_ESORT_BASE, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); ++ } ++ if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { ++ s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); ++ } ++ return 0; ++} ++ ++static int configure_cpu_subfunc(const S390FeatBitmap features) ++{ ++ struct kvm_s390_vm_cpu_subfunc prop = {}; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, ++ .addr = (uint64_t) &prop, ++ }; ++ ++ if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { ++ /* hardware support might be missing, IBC will handle most of this */ ++ return 0; ++ } ++ ++ s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); ++ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); ++ } ++ if (test_bit(S390_FEAT_MSA, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_3, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_4, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); ++ s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_5, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_8, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); ++ } ++ if (test_bit(S390_FEAT_MSA_EXT_9, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); ++ } ++ if (test_bit(S390_FEAT_ESORT_BASE, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); ++ } ++ if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { ++ s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); ++ } ++ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++} ++ ++static int kvm_to_feat[][2] = { ++ { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, ++ { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, ++ { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, ++ { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, ++ { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, ++ { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, ++ { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, ++ { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, ++ { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, ++ { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, ++ { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, ++ { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, ++ { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, ++ { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, ++}; ++ ++static int query_cpu_feat(S390FeatBitmap features) ++{ ++ struct kvm_s390_vm_cpu_feat prop = {}; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_MACHINE_FEAT, ++ .addr = (uint64_t) &prop, ++ }; ++ int rc; ++ int i; ++ ++ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++ if (rc) { ++ return rc; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { ++ if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { ++ set_bit(kvm_to_feat[i][1], features); ++ } ++ } ++ return 0; ++} ++ ++static int configure_cpu_feat(const S390FeatBitmap features) ++{ ++ struct kvm_s390_vm_cpu_feat prop = {}; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, ++ .addr = (uint64_t) &prop, ++ }; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { ++ if (test_bit(kvm_to_feat[i][1], features)) { ++ set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); ++ } ++ } ++ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++} ++ ++bool kvm_s390_cpu_models_supported(void) ++{ ++ if (!cpu_model_allowed()) { ++ /* compatibility machines interfere with the cpu model */ ++ return false; ++ } ++ return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_MACHINE) && ++ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_PROCESSOR) && ++ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_MACHINE_FEAT) && ++ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_PROCESSOR_FEAT) && ++ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, ++ KVM_S390_VM_CPU_MACHINE_SUBFUNC); ++} ++ ++void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) ++{ ++ struct kvm_s390_vm_cpu_machine prop = {}; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_MACHINE, ++ .addr = (uint64_t) &prop, ++ }; ++ uint16_t unblocked_ibc = 0, cpu_type = 0; ++ int rc; ++ ++ memset(model, 0, sizeof(*model)); ++ ++ if (!kvm_s390_cpu_models_supported()) { ++ error_setg(errp, "KVM doesn't support CPU models"); ++ return; ++ } ++ ++ /* query the basic cpu model properties */ ++ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); ++ if (rc) { ++ error_setg(errp, "KVM: Error querying host CPU model: %d", rc); ++ return; ++ } ++ ++ cpu_type = cpuid_type(prop.cpuid); ++ if (has_ibc(prop.ibc)) { ++ model->lowest_ibc = lowest_ibc(prop.ibc); ++ unblocked_ibc = unblocked_ibc(prop.ibc); ++ } ++ model->cpu_id = cpuid_id(prop.cpuid); ++ model->cpu_id_format = cpuid_format(prop.cpuid); ++ model->cpu_ver = 0xff; ++ ++ /* get supported cpu features indicated via STFL(E) */ ++ s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, ++ (uint8_t *) prop.fac_mask); ++ /* dat-enhancement facility 2 has no bit but was introduced with stfle */ ++ if (test_bit(S390_FEAT_STFLE, model->features)) { ++ set_bit(S390_FEAT_DAT_ENH_2, model->features); ++ } ++ /* get supported cpu features indicated e.g. via SCLP */ ++ rc = query_cpu_feat(model->features); ++ if (rc) { ++ error_setg(errp, "KVM: Error querying CPU features: %d", rc); ++ return; ++ } ++ /* get supported cpu subfunctions indicated via query / test bit */ ++ rc = query_cpu_subfunc(model->features); ++ if (rc) { ++ error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); ++ return; ++ } ++ ++ /* PTFF subfunctions might be indicated although kernel support missing */ ++ if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { ++ clear_bit(S390_FEAT_PTFF_QSIE, model->features); ++ clear_bit(S390_FEAT_PTFF_QTOUE, model->features); ++ clear_bit(S390_FEAT_PTFF_STOE, model->features); ++ clear_bit(S390_FEAT_PTFF_STOUE, model->features); ++ } ++ ++ /* with cpu model support, CMM is only indicated if really available */ ++ if (kvm_s390_cmma_available()) { ++ set_bit(S390_FEAT_CMM, model->features); ++ } else { ++ /* no cmm -> no cmm nt */ ++ clear_bit(S390_FEAT_CMM_NT, model->features); ++ } ++ ++ /* bpb needs kernel support for migration, VSIE and reset */ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { ++ clear_bit(S390_FEAT_BPB, model->features); ++ } ++ ++ /* ++ * If we have support for protected virtualization, indicate ++ * the protected virtualization IPL unpack facility. ++ */ ++ if (cap_protected) { ++ set_bit(S390_FEAT_UNPACK, model->features); ++ } ++ ++ /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ ++ set_bit(S390_FEAT_ZPCI, model->features); ++ set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); ++ ++ if (s390_known_cpu_type(cpu_type)) { ++ /* we want the exact model, even if some features are missing */ ++ model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), ++ ibc_ec_ga(unblocked_ibc), NULL); ++ } else { ++ /* model unknown, e.g. too new - search using features */ ++ model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), ++ ibc_ec_ga(unblocked_ibc), ++ model->features); ++ } ++ if (!model->def) { ++ error_setg(errp, "KVM: host CPU model could not be identified"); ++ return; ++ } ++ /* for now, we can only provide the AP feature with HW support */ ++ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, ++ KVM_S390_VM_CRYPTO_ENABLE_APIE)) { ++ set_bit(S390_FEAT_AP, model->features); ++ } ++ ++ /* ++ * Extended-Length SCCB is handled entirely within QEMU. ++ * For PV guests this is completely fenced by the Ultravisor, as Service ++ * Call error checking and STFLE interpretation are handled via SIE. ++ */ ++ set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); ++ ++ if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { ++ set_bit(S390_FEAT_DIAG_318, model->features); ++ } ++ ++ /* strip of features that are not part of the maximum model */ ++ bitmap_and(model->features, model->features, model->def->full_feat, ++ S390_FEAT_MAX); ++} ++ ++static void kvm_s390_configure_apie(bool interpret) ++{ ++ uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : ++ KVM_S390_VM_CRYPTO_DISABLE_APIE; ++ ++ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { ++ kvm_s390_set_attr(attr); ++ } ++} ++ ++void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) ++{ ++ struct kvm_s390_vm_cpu_processor prop = { ++ .fac_list = { 0 }, ++ }; ++ struct kvm_device_attr attr = { ++ .group = KVM_S390_VM_CPU_MODEL, ++ .attr = KVM_S390_VM_CPU_PROCESSOR, ++ .addr = (uint64_t) &prop, ++ }; ++ int rc; ++ ++ if (!model) { ++ /* compatibility handling if cpu models are disabled */ ++ if (kvm_s390_cmma_available()) { ++ kvm_s390_enable_cmma(); ++ } ++ return; ++ } ++ if (!kvm_s390_cpu_models_supported()) { ++ error_setg(errp, "KVM doesn't support CPU models"); ++ return; ++ } ++ prop.cpuid = s390_cpuid_from_cpu_model(model); ++ prop.ibc = s390_ibc_from_cpu_model(model); ++ /* configure cpu features indicated via STFL(e) */ ++ s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, ++ (uint8_t *) prop.fac_list); ++ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); ++ if (rc) { ++ error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); ++ return; ++ } ++ /* configure cpu features indicated e.g. via SCLP */ ++ rc = configure_cpu_feat(model->features); ++ if (rc) { ++ error_setg(errp, "KVM: Error configuring CPU features: %d", rc); ++ return; ++ } ++ /* configure cpu subfunctions indicated via query / test bit */ ++ rc = configure_cpu_subfunc(model->features); ++ if (rc) { ++ error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); ++ return; ++ } ++ /* enable CMM via CMMA */ ++ if (test_bit(S390_FEAT_CMM, model->features)) { ++ kvm_s390_enable_cmma(); ++ } ++ ++ if (test_bit(S390_FEAT_AP, model->features)) { ++ kvm_s390_configure_apie(true); ++ } ++} ++ ++void kvm_s390_restart_interrupt(S390CPU *cpu) ++{ ++ struct kvm_s390_irq irq = { ++ .type = KVM_S390_RESTART, ++ }; ++ ++ kvm_s390_vcpu_interrupt(cpu, &irq); ++} ++ ++void kvm_s390_stop_interrupt(S390CPU *cpu) ++{ ++ struct kvm_s390_irq irq = { ++ .type = KVM_S390_SIGP_STOP, ++ }; ++ ++ kvm_s390_vcpu_interrupt(cpu, &irq); ++} ++ ++bool kvm_arch_cpu_check_are_resettable(void) ++{ ++ return true; ++} +diff --git a/target/s390x/kvm/kvm_s390x.h b/target/s390x/kvm/kvm_s390x.h +new file mode 100644 +index 0000000000..05a5e1e6f4 +--- /dev/null ++++ b/target/s390x/kvm/kvm_s390x.h +@@ -0,0 +1,49 @@ ++/* ++ * QEMU KVM support -- s390x specific functions. ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef KVM_S390X_H ++#define KVM_S390X_H ++ ++#include "cpu-qom.h" ++ ++struct kvm_s390_irq; ++ ++void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq); ++void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); ++void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); ++int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, ++ int len, bool is_write); ++int kvm_s390_mem_op_pv(S390CPU *cpu, vaddr addr, void *hostbuf, int len, ++ bool is_write); ++void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code); ++int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state); ++void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); ++int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); ++int kvm_s390_get_hpage_1m(void); ++int kvm_s390_get_ri(void); ++int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); ++int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock); ++int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_clock); ++int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_clock); ++void kvm_s390_enable_css_support(S390CPU *cpu); ++int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, ++ int vq, bool assign); ++int kvm_s390_cmma_active(void); ++void kvm_s390_cmma_reset(void); ++void kvm_s390_reset_vcpu_clear(S390CPU *cpu); ++void kvm_s390_reset_vcpu_normal(S390CPU *cpu); ++void kvm_s390_reset_vcpu_initial(S390CPU *cpu); ++int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit); ++void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp); ++void kvm_s390_crypto_reset(void); ++void kvm_s390_restart_interrupt(S390CPU *cpu); ++void kvm_s390_stop_interrupt(S390CPU *cpu); ++void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info); ++ ++#endif /* KVM_S390X_H */ +diff --git a/target/s390x/kvm/meson.build b/target/s390x/kvm/meson.build +new file mode 100644 +index 0000000000..d1356356b1 +--- /dev/null ++++ b/target/s390x/kvm/meson.build +@@ -0,0 +1,17 @@ ++ ++s390x_ss.add(when: 'CONFIG_KVM', if_true: files( ++ 'kvm.c' ++)) ++ ++# Newer kernels on s390 check for an S390_PGSTE program header and ++# enable the pgste page table extensions in that case. This makes ++# the vm.allocate_pgste sysctl unnecessary. We enable this program ++# header if ++# - we build on s390x ++# - we build the system emulation for s390x (qemu-system-s390x) ++# - KVM is enabled ++# - the linker supports --s390-pgste ++if host_machine.cpu_family() == 's390x' and cc.has_link_argument('-Wl,--s390-pgste') ++ s390x_softmmu_ss.add(when: 'CONFIG_KVM', ++ if_true: declare_dependency(link_args: ['-Wl,--s390-pgste'])) ++endif +diff --git a/target/s390x/kvm/trace-events b/target/s390x/kvm/trace-events +new file mode 100644 +index 0000000000..5289f5f675 +--- /dev/null ++++ b/target/s390x/kvm/trace-events +@@ -0,0 +1,7 @@ ++# See docs/devel/tracing.txt for syntax documentation. ++ ++# kvm.c ++kvm_enable_cmma(int rc) "CMMA: enabling with result code %d" ++kvm_clear_cmma(int rc) "CMMA: clearing with result code %d" ++kvm_failed_cpu_state_set(int cpu_index, uint8_t state, const char *msg) "Warning: Unable to set cpu %d state %" PRIu8 " to KVM: %s" ++kvm_assign_subch_ioeventfd(int fd, uint32_t addr, bool assign, int datamatch) "fd: %d sch: @0x%x assign: %d vq: %d" +diff --git a/target/s390x/kvm/trace.h b/target/s390x/kvm/trace.h +new file mode 100644 +index 0000000000..ae195b1306 +--- /dev/null ++++ b/target/s390x/kvm/trace.h +@@ -0,0 +1 @@ ++#include "trace/trace-target_s390x_kvm.h" +diff --git a/target/s390x/kvm_s390x.h b/target/s390x/kvm_s390x.h +deleted file mode 100644 +index 05a5e1e6f4..0000000000 +--- a/target/s390x/kvm_s390x.h ++++ /dev/null +@@ -1,49 +0,0 @@ +-/* +- * QEMU KVM support -- s390x specific functions. +- * +- * Copyright (c) 2009 Ulrich Hecht +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#ifndef KVM_S390X_H +-#define KVM_S390X_H +- +-#include "cpu-qom.h" +- +-struct kvm_s390_irq; +- +-void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq); +-void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq); +-void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code); +-int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, +- int len, bool is_write); +-int kvm_s390_mem_op_pv(S390CPU *cpu, vaddr addr, void *hostbuf, int len, +- bool is_write); +-void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code); +-int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state); +-void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu); +-int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu); +-int kvm_s390_get_hpage_1m(void); +-int kvm_s390_get_ri(void); +-int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock); +-int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_clock); +-int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_clock); +-int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_clock); +-void kvm_s390_enable_css_support(S390CPU *cpu); +-int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, +- int vq, bool assign); +-int kvm_s390_cmma_active(void); +-void kvm_s390_cmma_reset(void); +-void kvm_s390_reset_vcpu_clear(S390CPU *cpu); +-void kvm_s390_reset_vcpu_normal(S390CPU *cpu); +-void kvm_s390_reset_vcpu_initial(S390CPU *cpu); +-int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit); +-void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp); +-void kvm_s390_crypto_reset(void); +-void kvm_s390_restart_interrupt(S390CPU *cpu); +-void kvm_s390_stop_interrupt(S390CPU *cpu); +-void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info); +- +-#endif /* KVM_S390X_H */ +diff --git a/target/s390x/machine.c b/target/s390x/machine.c +index 5b4e82f1ab..37a076858c 100644 +--- a/target/s390x/machine.c ++++ b/target/s390x/machine.c +@@ -16,10 +16,10 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" +-#include "kvm_s390x.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" + #include "migration/vmstate.h" +-#include "tcg_s390x.h" ++#include "tcg/tcg_s390x.h" + #include "sysemu/kvm.h" + #include "sysemu/tcg.h" + +diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c +deleted file mode 100644 +index f6a7d29273..0000000000 +--- a/target/s390x/mem_helper.c ++++ /dev/null +@@ -1,3008 +0,0 @@ +-/* +- * S/390 memory access helper routines +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2009 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "tcg_s390x.h" +-#include "exec/helper-proto.h" +-#include "exec/exec-all.h" +-#include "exec/cpu_ldst.h" +-#include "qemu/int128.h" +-#include "qemu/atomic128.h" +-#include "tcg/tcg.h" +- +-#if !defined(CONFIG_USER_ONLY) +-#include "hw/s390x/storage-keys.h" +-#include "hw/boards.h" +-#endif +- +-/*****************************************************************************/ +-/* Softmmu support */ +- +-/* #define DEBUG_HELPER */ +-#ifdef DEBUG_HELPER +-#define HELPER_LOG(x...) qemu_log(x) +-#else +-#define HELPER_LOG(x...) +-#endif +- +-static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) +-{ +- uint16_t pkm = env->cregs[3] >> 16; +- +- if (env->psw.mask & PSW_MASK_PSTATE) { +- /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ +- return pkm & (0x80 >> psw_key); +- } +- return true; +-} +- +-static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest, +- uint64_t src, uint32_t len) +-{ +- if (!len || src == dest) { +- return false; +- } +- /* Take care of wrapping at the end of address space. */ +- if (unlikely(wrap_address(env, src + len - 1) < src)) { +- return dest > src || dest <= wrap_address(env, src + len - 1); +- } +- return dest > src && dest <= src + len - 1; +-} +- +-/* Trigger a SPECIFICATION exception if an address or a length is not +- naturally aligned. */ +-static inline void check_alignment(CPUS390XState *env, uint64_t v, +- int wordsize, uintptr_t ra) +-{ +- if (v % wordsize) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +-} +- +-/* Load a value from memory according to its size. */ +-static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr, +- int wordsize, uintptr_t ra) +-{ +- switch (wordsize) { +- case 1: +- return cpu_ldub_data_ra(env, addr, ra); +- case 2: +- return cpu_lduw_data_ra(env, addr, ra); +- default: +- abort(); +- } +-} +- +-/* Store a to memory according to its size. */ +-static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, +- uint64_t value, int wordsize, +- uintptr_t ra) +-{ +- switch (wordsize) { +- case 1: +- cpu_stb_data_ra(env, addr, value, ra); +- break; +- case 2: +- cpu_stw_data_ra(env, addr, value, ra); +- break; +- default: +- abort(); +- } +-} +- +-/* An access covers at most 4096 bytes and therefore at most two pages. */ +-typedef struct S390Access { +- target_ulong vaddr1; +- target_ulong vaddr2; +- char *haddr1; +- char *haddr2; +- uint16_t size1; +- uint16_t size2; +- /* +- * If we can't access the host page directly, we'll have to do I/O access +- * via ld/st helpers. These are internal details, so we store the +- * mmu idx to do the access here instead of passing it around in the +- * helpers. Maybe, one day we can get rid of ld/st access - once we can +- * handle TLB_NOTDIRTY differently. We don't expect these special accesses +- * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP +- * pages, we might trigger a new MMU translation - very unlikely that +- * the mapping changes in between and we would trigger a fault. +- */ +- int mmu_idx; +-} S390Access; +- +-/* +- * With nonfault=1, return the PGM_ exception that would have been injected +- * into the guest; return 0 if no exception was detected. +- * +- * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec. +- * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr. +- */ +-static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, +- MMUAccessType access_type, int mmu_idx, +- bool nonfault, void **phost, uintptr_t ra) +-{ +- int flags; +- +-#if defined(CONFIG_USER_ONLY) +- flags = page_get_flags(addr); +- if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE_ORG))) { +- env->__excp_addr = addr; +- flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING; +- if (nonfault) { +- return flags; +- } +- tcg_s390_program_interrupt(env, flags, ra); +- } +- *phost = g2h(env_cpu(env), addr); +-#else +- /* +- * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL +- * to detect if there was an exception during tlb_fill(). +- */ +- env->tlb_fill_exc = 0; +- flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost, +- ra); +- if (env->tlb_fill_exc) { +- return env->tlb_fill_exc; +- } +- +- if (unlikely(flags & TLB_WATCHPOINT)) { +- /* S390 does not presently use transaction attributes. */ +- cpu_check_watchpoint(env_cpu(env), addr, size, +- MEMTXATTRS_UNSPECIFIED, +- (access_type == MMU_DATA_STORE +- ? BP_MEM_WRITE : BP_MEM_READ), ra); +- } +-#endif +- return 0; +-} +- +-static int access_prepare_nf(S390Access *access, CPUS390XState *env, +- bool nonfault, vaddr vaddr1, int size, +- MMUAccessType access_type, +- int mmu_idx, uintptr_t ra) +-{ +- void *haddr1, *haddr2 = NULL; +- int size1, size2, exc; +- vaddr vaddr2 = 0; +- +- assert(size > 0 && size <= 4096); +- +- size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), +- size2 = size - size1; +- +- exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault, +- &haddr1, ra); +- if (exc) { +- return exc; +- } +- if (unlikely(size2)) { +- /* The access crosses page boundaries. */ +- vaddr2 = wrap_address(env, vaddr1 + size1); +- exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx, +- nonfault, &haddr2, ra); +- if (exc) { +- return exc; +- } +- } +- +- *access = (S390Access) { +- .vaddr1 = vaddr1, +- .vaddr2 = vaddr2, +- .haddr1 = haddr1, +- .haddr2 = haddr2, +- .size1 = size1, +- .size2 = size2, +- .mmu_idx = mmu_idx +- }; +- return 0; +-} +- +-static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, +- MMUAccessType access_type, int mmu_idx, +- uintptr_t ra) +-{ +- S390Access ret; +- int exc = access_prepare_nf(&ret, env, false, vaddr, size, +- access_type, mmu_idx, ra); +- assert(!exc); +- return ret; +-} +- +-/* Helper to handle memset on a single page. */ +-static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, +- uint8_t byte, uint16_t size, int mmu_idx, +- uintptr_t ra) +-{ +-#ifdef CONFIG_USER_ONLY +- g_assert(haddr); +- memset(haddr, byte, size); +-#else +- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); +- int i; +- +- if (likely(haddr)) { +- memset(haddr, byte, size); +- } else { +- /* +- * Do a single access and test if we can then get access to the +- * page. This is especially relevant to speed up TLB_NOTDIRTY. +- */ +- g_assert(size > 0); +- helper_ret_stb_mmu(env, vaddr, byte, oi, ra); +- haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); +- if (likely(haddr)) { +- memset(haddr + 1, byte, size - 1); +- } else { +- for (i = 1; i < size; i++) { +- helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); +- } +- } +- } +-#endif +-} +- +-static void access_memset(CPUS390XState *env, S390Access *desta, +- uint8_t byte, uintptr_t ra) +-{ +- +- do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1, +- desta->mmu_idx, ra); +- if (likely(!desta->size2)) { +- return; +- } +- do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2, +- desta->mmu_idx, ra); +-} +- +-static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, +- int offset, int mmu_idx, uintptr_t ra) +-{ +-#ifdef CONFIG_USER_ONLY +- return ldub_p(*haddr + offset); +-#else +- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); +- uint8_t byte; +- +- if (likely(*haddr)) { +- return ldub_p(*haddr + offset); +- } +- /* +- * Do a single access and test if we can then get access to the +- * page. This is especially relevant to speed up TLB_NOTDIRTY. +- */ +- byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); +- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); +- return byte; +-#endif +-} +- +-static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, +- int offset, uintptr_t ra) +-{ +- if (offset < access->size1) { +- return do_access_get_byte(env, access->vaddr1, &access->haddr1, +- offset, access->mmu_idx, ra); +- } +- return do_access_get_byte(env, access->vaddr2, &access->haddr2, +- offset - access->size1, access->mmu_idx, ra); +-} +- +-static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, +- int offset, uint8_t byte, int mmu_idx, +- uintptr_t ra) +-{ +-#ifdef CONFIG_USER_ONLY +- stb_p(*haddr + offset, byte); +-#else +- TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); +- +- if (likely(*haddr)) { +- stb_p(*haddr + offset, byte); +- return; +- } +- /* +- * Do a single access and test if we can then get access to the +- * page. This is especially relevant to speed up TLB_NOTDIRTY. +- */ +- helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); +- *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); +-#endif +-} +- +-static void access_set_byte(CPUS390XState *env, S390Access *access, +- int offset, uint8_t byte, uintptr_t ra) +-{ +- if (offset < access->size1) { +- do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte, +- access->mmu_idx, ra); +- } else { +- do_access_set_byte(env, access->vaddr2, &access->haddr2, +- offset - access->size1, byte, access->mmu_idx, ra); +- } +-} +- +-/* +- * Move data with the same semantics as memmove() in case ranges don't overlap +- * or src > dest. Undefined behavior on destructive overlaps. +- */ +-static void access_memmove(CPUS390XState *env, S390Access *desta, +- S390Access *srca, uintptr_t ra) +-{ +- int diff; +- +- g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2); +- +- /* Fallback to slow access in case we don't have access to all host pages */ +- if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) || +- !srca->haddr1 || (srca->size2 && !srca->haddr2))) { +- int i; +- +- for (i = 0; i < desta->size1 + desta->size2; i++) { +- uint8_t byte = access_get_byte(env, srca, i, ra); +- +- access_set_byte(env, desta, i, byte, ra); +- } +- return; +- } +- +- if (srca->size1 == desta->size1) { +- memmove(desta->haddr1, srca->haddr1, srca->size1); +- if (unlikely(srca->size2)) { +- memmove(desta->haddr2, srca->haddr2, srca->size2); +- } +- } else if (srca->size1 < desta->size1) { +- diff = desta->size1 - srca->size1; +- memmove(desta->haddr1, srca->haddr1, srca->size1); +- memmove(desta->haddr1 + srca->size1, srca->haddr2, diff); +- if (likely(desta->size2)) { +- memmove(desta->haddr2, srca->haddr2 + diff, desta->size2); +- } +- } else { +- diff = srca->size1 - desta->size1; +- memmove(desta->haddr1, srca->haddr1, desta->size1); +- memmove(desta->haddr2, srca->haddr1 + desta->size1, diff); +- if (likely(srca->size2)) { +- memmove(desta->haddr2 + diff, srca->haddr2, srca->size2); +- } +- } +-} +- +-static int mmu_idx_from_as(uint8_t as) +-{ +- switch (as) { +- case AS_PRIMARY: +- return MMU_PRIMARY_IDX; +- case AS_SECONDARY: +- return MMU_SECONDARY_IDX; +- case AS_HOME: +- return MMU_HOME_IDX; +- default: +- /* FIXME AS_ACCREG */ +- g_assert_not_reached(); +- } +-} +- +-/* and on array */ +-static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src, uintptr_t ra) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca1, srca2, desta; +- uint32_t i; +- uint8_t c = 0; +- +- HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", +- __func__, l, dest, src); +- +- /* NC always processes one more byte than specified - maximum is 256 */ +- l++; +- +- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < l; i++) { +- const uint8_t x = access_get_byte(env, &srca1, i, ra) & +- access_get_byte(env, &srca2, i, ra); +- +- c |= x; +- access_set_byte(env, &desta, i, x, ra); +- } +- return c != 0; +-} +- +-uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src) +-{ +- return do_helper_nc(env, l, dest, src, GETPC()); +-} +- +-/* xor on array */ +-static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src, uintptr_t ra) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca1, srca2, desta; +- uint32_t i; +- uint8_t c = 0; +- +- HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", +- __func__, l, dest, src); +- +- /* XC always processes one more byte than specified - maximum is 256 */ +- l++; +- +- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- +- /* xor with itself is the same as memset(0) */ +- if (src == dest) { +- access_memset(env, &desta, 0, ra); +- return 0; +- } +- +- for (i = 0; i < l; i++) { +- const uint8_t x = access_get_byte(env, &srca1, i, ra) ^ +- access_get_byte(env, &srca2, i, ra); +- +- c |= x; +- access_set_byte(env, &desta, i, x, ra); +- } +- return c != 0; +-} +- +-uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src) +-{ +- return do_helper_xc(env, l, dest, src, GETPC()); +-} +- +-/* or on array */ +-static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src, uintptr_t ra) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca1, srca2, desta; +- uint32_t i; +- uint8_t c = 0; +- +- HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", +- __func__, l, dest, src); +- +- /* OC always processes one more byte than specified - maximum is 256 */ +- l++; +- +- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < l; i++) { +- const uint8_t x = access_get_byte(env, &srca1, i, ra) | +- access_get_byte(env, &srca2, i, ra); +- +- c |= x; +- access_set_byte(env, &desta, i, x, ra); +- } +- return c != 0; +-} +- +-uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src) +-{ +- return do_helper_oc(env, l, dest, src, GETPC()); +-} +- +-/* memmove */ +-static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, +- uint64_t src, uintptr_t ra) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca, desta; +- uint32_t i; +- +- HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", +- __func__, l, dest, src); +- +- /* MVC always copies one more byte than specified - maximum is 256 */ +- l++; +- +- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- +- /* +- * "When the operands overlap, the result is obtained as if the operands +- * were processed one byte at a time". Only non-destructive overlaps +- * behave like memmove(). +- */ +- if (dest == src + 1) { +- access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra); +- } else if (!is_destructive_overlap(env, dest, src, l)) { +- access_memmove(env, &desta, &srca, ra); +- } else { +- for (i = 0; i < l; i++) { +- uint8_t byte = access_get_byte(env, &srca, i, ra); +- +- access_set_byte(env, &desta, i, byte, ra); +- } +- } +- +- return env->cc_op; +-} +- +-void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +-{ +- do_helper_mvc(env, l, dest, src, GETPC()); +-} +- +-/* move inverse */ +-void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca, desta; +- uintptr_t ra = GETPC(); +- int i; +- +- /* MVCIN always copies one more byte than specified - maximum is 256 */ +- l++; +- +- src = wrap_address(env, src - l + 1); +- srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < l; i++) { +- const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra); +- +- access_set_byte(env, &desta, i, x, ra); +- } +-} +- +-/* move numerics */ +-void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca1, srca2, desta; +- uintptr_t ra = GETPC(); +- int i; +- +- /* MVN always copies one more byte than specified - maximum is 256 */ +- l++; +- +- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < l; i++) { +- const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) | +- (access_get_byte(env, &srca2, i, ra) & 0xf0); +- +- access_set_byte(env, &desta, i, x, ra); +- } +-} +- +-/* move with offset */ +-void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- /* MVO always processes one more byte than specified - maximum is 16 */ +- const int len_dest = (l >> 4) + 1; +- const int len_src = (l & 0xf) + 1; +- uintptr_t ra = GETPC(); +- uint8_t byte_dest, byte_src; +- S390Access srca, desta; +- int i, j; +- +- srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra); +- +- /* Handle rightmost byte */ +- byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra); +- byte_src = access_get_byte(env, &srca, len_src - 1, ra); +- byte_dest = (byte_dest & 0x0f) | (byte_src << 4); +- access_set_byte(env, &desta, len_dest - 1, byte_dest, ra); +- +- /* Process remaining bytes from right to left */ +- for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) { +- byte_dest = byte_src >> 4; +- if (j >= 0) { +- byte_src = access_get_byte(env, &srca, j, ra); +- } else { +- byte_src = 0; +- } +- byte_dest |= byte_src << 4; +- access_set_byte(env, &desta, i, byte_dest, ra); +- } +-} +- +-/* move zones */ +-void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- S390Access srca1, srca2, desta; +- uintptr_t ra = GETPC(); +- int i; +- +- /* MVZ always copies one more byte than specified - maximum is 256 */ +- l++; +- +- srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); +- srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < l; i++) { +- const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) | +- (access_get_byte(env, &srca2, i, ra) & 0x0f); +- +- access_set_byte(env, &desta, i, x, ra); +- } +-} +- +-/* compare unsigned byte arrays */ +-static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1, +- uint64_t s2, uintptr_t ra) +-{ +- uint32_t i; +- uint32_t cc = 0; +- +- HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", +- __func__, l, s1, s2); +- +- for (i = 0; i <= l; i++) { +- uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra); +- uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra); +- HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); +- if (x < y) { +- cc = 1; +- break; +- } else if (x > y) { +- cc = 2; +- break; +- } +- } +- +- HELPER_LOG("\n"); +- return cc; +-} +- +-uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2) +-{ +- return do_helper_clc(env, l, s1, s2, GETPC()); +-} +- +-/* compare logical under mask */ +-uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, +- uint64_t addr) +-{ +- uintptr_t ra = GETPC(); +- uint32_t cc = 0; +- +- HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1, +- mask, addr); +- +- while (mask) { +- if (mask & 8) { +- uint8_t d = cpu_ldub_data_ra(env, addr, ra); +- uint8_t r = extract32(r1, 24, 8); +- HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, +- addr); +- if (r < d) { +- cc = 1; +- break; +- } else if (r > d) { +- cc = 2; +- break; +- } +- addr++; +- } +- mask = (mask << 1) & 0xf; +- r1 <<= 8; +- } +- +- HELPER_LOG("\n"); +- return cc; +-} +- +-static inline uint64_t get_address(CPUS390XState *env, int reg) +-{ +- return wrap_address(env, env->regs[reg]); +-} +- +-/* +- * Store the address to the given register, zeroing out unused leftmost +- * bits in bit positions 32-63 (24-bit and 31-bit mode only). +- */ +-static inline void set_address_zero(CPUS390XState *env, int reg, +- uint64_t address) +-{ +- if (env->psw.mask & PSW_MASK_64) { +- env->regs[reg] = address; +- } else { +- if (!(env->psw.mask & PSW_MASK_32)) { +- address &= 0x00ffffff; +- } else { +- address &= 0x7fffffff; +- } +- env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); +- } +-} +- +-static inline void set_address(CPUS390XState *env, int reg, uint64_t address) +-{ +- if (env->psw.mask & PSW_MASK_64) { +- /* 64-Bit mode */ +- env->regs[reg] = address; +- } else { +- if (!(env->psw.mask & PSW_MASK_32)) { +- /* 24-Bit mode. According to the PoO it is implementation +- dependent if bits 32-39 remain unchanged or are set to +- zeros. Choose the former so that the function can also be +- used for TRT. */ +- env->regs[reg] = deposit64(env->regs[reg], 0, 24, address); +- } else { +- /* 31-Bit mode. According to the PoO it is implementation +- dependent if bit 32 remains unchanged or is set to zero. +- Choose the latter so that the function can also be used for +- TRT. */ +- address &= 0x7fffffff; +- env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); +- } +- } +-} +- +-static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length) +-{ +- if (!(env->psw.mask & PSW_MASK_64)) { +- return (uint32_t)length; +- } +- return length; +-} +- +-static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length) +-{ +- if (!(env->psw.mask & PSW_MASK_64)) { +- /* 24-Bit and 31-Bit mode */ +- length &= 0x7fffffff; +- } +- return length; +-} +- +-static inline uint64_t get_length(CPUS390XState *env, int reg) +-{ +- return wrap_length31(env, env->regs[reg]); +-} +- +-static inline void set_length(CPUS390XState *env, int reg, uint64_t length) +-{ +- if (env->psw.mask & PSW_MASK_64) { +- /* 64-Bit mode */ +- env->regs[reg] = length; +- } else { +- /* 24-Bit and 31-Bit mode */ +- env->regs[reg] = deposit64(env->regs[reg], 0, 32, length); +- } +-} +- +-/* search string (c is byte to search, r2 is string, r1 end of string) */ +-void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- uintptr_t ra = GETPC(); +- uint64_t end, str; +- uint32_t len; +- uint8_t v, c = env->regs[0]; +- +- /* Bits 32-55 must contain all 0. */ +- if (env->regs[0] & 0xffffff00u) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- str = get_address(env, r2); +- end = get_address(env, r1); +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 8k. */ +- for (len = 0; len < 0x2000; ++len) { +- if (str + len == end) { +- /* Character not found. R1 & R2 are unmodified. */ +- env->cc_op = 2; +- return; +- } +- v = cpu_ldub_data_ra(env, str + len, ra); +- if (v == c) { +- /* Character found. Set R1 to the location; R2 is unmodified. */ +- env->cc_op = 1; +- set_address(env, r1, str + len); +- return; +- } +- } +- +- /* CPU-determined bytes processed. Advance R2 to next byte to process. */ +- env->cc_op = 3; +- set_address(env, r2, str + len); +-} +- +-void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- uintptr_t ra = GETPC(); +- uint32_t len; +- uint16_t v, c = env->regs[0]; +- uint64_t end, str, adj_end; +- +- /* Bits 32-47 of R0 must be zero. */ +- if (env->regs[0] & 0xffff0000u) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- str = get_address(env, r2); +- end = get_address(env, r1); +- +- /* If the LSB of the two addresses differ, use one extra byte. */ +- adj_end = end + ((str ^ end) & 1); +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 8k. */ +- for (len = 0; len < 0x2000; len += 2) { +- if (str + len == adj_end) { +- /* End of input found. */ +- env->cc_op = 2; +- return; +- } +- v = cpu_lduw_data_ra(env, str + len, ra); +- if (v == c) { +- /* Character found. Set R1 to the location; R2 is unmodified. */ +- env->cc_op = 1; +- set_address(env, r1, str + len); +- return; +- } +- } +- +- /* CPU-determined bytes processed. Advance R2 to next byte to process. */ +- env->cc_op = 3; +- set_address(env, r2, str + len); +-} +- +-/* unsigned string compare (c is string terminator) */ +-uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) +-{ +- uintptr_t ra = GETPC(); +- uint32_t len; +- +- c = c & 0xff; +- s1 = wrap_address(env, s1); +- s2 = wrap_address(env, s2); +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 8k. */ +- for (len = 0; len < 0x2000; ++len) { +- uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra); +- uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra); +- if (v1 == v2) { +- if (v1 == c) { +- /* Equal. CC=0, and don't advance the registers. */ +- env->cc_op = 0; +- env->retxl = s2; +- return s1; +- } +- } else { +- /* Unequal. CC={1,2}, and advance the registers. Note that +- the terminator need not be zero, but the string that contains +- the terminator is by definition "low". */ +- env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2); +- env->retxl = s2 + len; +- return s1 + len; +- } +- } +- +- /* CPU-determined bytes equal; advance the registers. */ +- env->cc_op = 3; +- env->retxl = s2 + len; +- return s1 + len; +-} +- +-/* move page */ +-uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) +-{ +- const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK; +- const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK; +- const int mmu_idx = cpu_mmu_index(env, false); +- const bool f = extract64(r0, 11, 1); +- const bool s = extract64(r0, 10, 1); +- const bool cco = extract64(r0, 8, 1); +- uintptr_t ra = GETPC(); +- S390Access srca, desta; +- int exc; +- +- if ((f && s) || extract64(r0, 12, 4)) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +- +- /* +- * We always manually handle exceptions such that we can properly store +- * r1/r2 to the lowcore on page-translation exceptions. +- * +- * TODO: Access key handling +- */ +- exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE, +- MMU_DATA_LOAD, mmu_idx, ra); +- if (exc) { +- if (cco) { +- return 2; +- } +- goto inject_exc; +- } +- exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE, +- MMU_DATA_STORE, mmu_idx, ra); +- if (exc) { +- if (cco && exc != PGM_PROTECTION) { +- return 1; +- } +- goto inject_exc; +- } +- access_memmove(env, &desta, &srca, ra); +- return 0; /* data moved */ +-inject_exc: +-#if !defined(CONFIG_USER_ONLY) +- if (exc != PGM_ADDRESSING) { +- stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), +- env->tlb_fill_tec); +- } +- if (exc == PGM_PAGE_TRANS) { +- stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id), +- r1 << 4 | r2); +- } +-#endif +- tcg_s390_program_interrupt(env, exc, ra); +-} +- +-/* string copy */ +-uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- const uint64_t d = get_address(env, r1); +- const uint64_t s = get_address(env, r2); +- const uint8_t c = env->regs[0]; +- const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK)); +- S390Access srca, desta; +- uintptr_t ra = GETPC(); +- int i; +- +- if (env->regs[0] & 0xffffff00ull) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- /* +- * Our access should not exceed single pages, as we must not report access +- * exceptions exceeding the actually copied range (which we don't know at +- * this point). We might over-indicate watchpoints within the pages +- * (if we ever care, we have to limit processing to a single byte). +- */ +- srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra); +- for (i = 0; i < len; i++) { +- const uint8_t v = access_get_byte(env, &srca, i, ra); +- +- access_set_byte(env, &desta, i, v, ra); +- if (v == c) { +- set_address_zero(env, r1, d + i); +- return 1; +- } +- } +- set_address_zero(env, r1, d + len); +- set_address_zero(env, r2, s + len); +- return 3; +-} +- +-/* load access registers r1 to r3 from memory at a2 */ +-void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- int i; +- +- if (a2 & 0x3) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- env->aregs[i] = cpu_ldl_data_ra(env, a2, ra); +- a2 += 4; +- +- if (i == r3) { +- break; +- } +- } +-} +- +-/* store access registers r1 to r3 in memory at a2 */ +-void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- int i; +- +- if (a2 & 0x3) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- cpu_stl_data_ra(env, a2, env->aregs[i], ra); +- a2 += 4; +- +- if (i == r3) { +- break; +- } +- } +-} +- +-/* move long helper */ +-static inline uint32_t do_mvcl(CPUS390XState *env, +- uint64_t *dest, uint64_t *destlen, +- uint64_t *src, uint64_t *srclen, +- uint16_t pad, int wordsize, uintptr_t ra) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); +- S390Access srca, desta; +- int i, cc; +- +- if (*destlen == *srclen) { +- cc = 0; +- } else if (*destlen < *srclen) { +- cc = 1; +- } else { +- cc = 2; +- } +- +- if (!*destlen) { +- return cc; +- } +- +- /* +- * Only perform one type of type of operation (move/pad) at a time. +- * Stay within single pages. +- */ +- if (*srclen) { +- /* Copy the src array */ +- len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len); +- *destlen -= len; +- *srclen -= len; +- srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra); +- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); +- access_memmove(env, &desta, &srca, ra); +- *src = wrap_address(env, *src + len); +- *dest = wrap_address(env, *dest + len); +- } else if (wordsize == 1) { +- /* Pad the remaining area */ +- *destlen -= len; +- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); +- access_memset(env, &desta, pad, ra); +- *dest = wrap_address(env, *dest + len); +- } else { +- desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); +- +- /* The remaining length selects the padding byte. */ +- for (i = 0; i < len; (*destlen)--, i++) { +- if (*destlen & 1) { +- access_set_byte(env, &desta, i, pad, ra); +- } else { +- access_set_byte(env, &desta, i, pad >> 8, ra); +- } +- } +- *dest = wrap_address(env, *dest + len); +- } +- +- return *destlen ? 3 : cc; +-} +- +-/* move long */ +-uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- const int mmu_idx = cpu_mmu_index(env, false); +- uintptr_t ra = GETPC(); +- uint64_t destlen = env->regs[r1 + 1] & 0xffffff; +- uint64_t dest = get_address(env, r1); +- uint64_t srclen = env->regs[r2 + 1] & 0xffffff; +- uint64_t src = get_address(env, r2); +- uint8_t pad = env->regs[r2 + 1] >> 24; +- CPUState *cs = env_cpu(env); +- S390Access srca, desta; +- uint32_t cc, cur_len; +- +- if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) { +- cc = 3; +- } else if (srclen == destlen) { +- cc = 0; +- } else if (destlen < srclen) { +- cc = 1; +- } else { +- cc = 2; +- } +- +- /* We might have to zero-out some bits even if there was no action. */ +- if (unlikely(!destlen || cc == 3)) { +- set_address_zero(env, r2, src); +- set_address_zero(env, r1, dest); +- return cc; +- } else if (!srclen) { +- set_address_zero(env, r2, src); +- } +- +- /* +- * Only perform one type of type of operation (move/pad) in one step. +- * Stay within single pages. +- */ +- while (destlen) { +- cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK)); +- if (!srclen) { +- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, +- ra); +- access_memset(env, &desta, pad, ra); +- } else { +- cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len); +- +- srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx, +- ra); +- desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, +- ra); +- access_memmove(env, &desta, &srca, ra); +- src = wrap_address(env, src + cur_len); +- srclen -= cur_len; +- env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen); +- set_address_zero(env, r2, src); +- } +- dest = wrap_address(env, dest + cur_len); +- destlen -= cur_len; +- env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen); +- set_address_zero(env, r1, dest); +- +- /* +- * MVCL is interruptible. Return to the main loop if requested after +- * writing back all state to registers. If no interrupt will get +- * injected, we'll end up back in this handler and continue processing +- * the remaining parts. +- */ +- if (destlen && unlikely(cpu_loop_exit_requested(cs))) { +- cpu_loop_exit_restore(cs, ra); +- } +- } +- return cc; +-} +- +-/* move long extended */ +-uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, +- uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t destlen = get_length(env, r1 + 1); +- uint64_t dest = get_address(env, r1); +- uint64_t srclen = get_length(env, r3 + 1); +- uint64_t src = get_address(env, r3); +- uint8_t pad = a2; +- uint32_t cc; +- +- cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra); +- +- set_length(env, r1 + 1, destlen); +- set_length(env, r3 + 1, srclen); +- set_address(env, r1, dest); +- set_address(env, r3, src); +- +- return cc; +-} +- +-/* move long unicode */ +-uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, +- uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t destlen = get_length(env, r1 + 1); +- uint64_t dest = get_address(env, r1); +- uint64_t srclen = get_length(env, r3 + 1); +- uint64_t src = get_address(env, r3); +- uint16_t pad = a2; +- uint32_t cc; +- +- cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra); +- +- set_length(env, r1 + 1, destlen); +- set_length(env, r3 + 1, srclen); +- set_address(env, r1, dest); +- set_address(env, r3, src); +- +- return cc; +-} +- +-/* compare logical long helper */ +-static inline uint32_t do_clcl(CPUS390XState *env, +- uint64_t *src1, uint64_t *src1len, +- uint64_t *src3, uint64_t *src3len, +- uint16_t pad, uint64_t limit, +- int wordsize, uintptr_t ra) +-{ +- uint64_t len = MAX(*src1len, *src3len); +- uint32_t cc = 0; +- +- check_alignment(env, *src1len | *src3len, wordsize, ra); +- +- if (!len) { +- return cc; +- } +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. */ +- if (len > limit) { +- len = limit; +- cc = 3; +- } +- +- for (; len; len -= wordsize) { +- uint16_t v1 = pad; +- uint16_t v3 = pad; +- +- if (*src1len) { +- v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra); +- } +- if (*src3len) { +- v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra); +- } +- +- if (v1 != v3) { +- cc = (v1 < v3) ? 1 : 2; +- break; +- } +- +- if (*src1len) { +- *src1 += wordsize; +- *src1len -= wordsize; +- } +- if (*src3len) { +- *src3 += wordsize; +- *src3len -= wordsize; +- } +- } +- +- return cc; +-} +- +- +-/* compare logical long */ +-uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- uintptr_t ra = GETPC(); +- uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24); +- uint64_t src1 = get_address(env, r1); +- uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24); +- uint64_t src3 = get_address(env, r2); +- uint8_t pad = env->regs[r2 + 1] >> 24; +- uint32_t cc; +- +- cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra); +- +- env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len); +- env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len); +- set_address(env, r1, src1); +- set_address(env, r2, src3); +- +- return cc; +-} +- +-/* compare logical long extended memcompare insn with padding */ +-uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, +- uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t src1len = get_length(env, r1 + 1); +- uint64_t src1 = get_address(env, r1); +- uint64_t src3len = get_length(env, r3 + 1); +- uint64_t src3 = get_address(env, r3); +- uint8_t pad = a2; +- uint32_t cc; +- +- cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra); +- +- set_length(env, r1 + 1, src1len); +- set_length(env, r3 + 1, src3len); +- set_address(env, r1, src1); +- set_address(env, r3, src3); +- +- return cc; +-} +- +-/* compare logical long unicode memcompare insn with padding */ +-uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, +- uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t src1len = get_length(env, r1 + 1); +- uint64_t src1 = get_address(env, r1); +- uint64_t src3len = get_length(env, r3 + 1); +- uint64_t src3 = get_address(env, r3); +- uint16_t pad = a2; +- uint32_t cc = 0; +- +- cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra); +- +- set_length(env, r1 + 1, src1len); +- set_length(env, r3 + 1, src3len); +- set_address(env, r1, src1); +- set_address(env, r3, src3); +- +- return cc; +-} +- +-/* checksum */ +-uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1, +- uint64_t src, uint64_t src_len) +-{ +- uintptr_t ra = GETPC(); +- uint64_t max_len, len; +- uint64_t cksm = (uint32_t)r1; +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 8k. */ +- max_len = (src_len > 0x2000 ? 0x2000 : src_len); +- +- /* Process full words as available. */ +- for (len = 0; len + 4 <= max_len; len += 4, src += 4) { +- cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra); +- } +- +- switch (max_len - len) { +- case 1: +- cksm += cpu_ldub_data_ra(env, src, ra) << 24; +- len += 1; +- break; +- case 2: +- cksm += cpu_lduw_data_ra(env, src, ra) << 16; +- len += 2; +- break; +- case 3: +- cksm += cpu_lduw_data_ra(env, src, ra) << 16; +- cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8; +- len += 3; +- break; +- } +- +- /* Fold the carry from the checksum. Note that we can see carry-out +- during folding more than once (but probably not more than twice). */ +- while (cksm > 0xffffffffull) { +- cksm = (uint32_t)cksm + (cksm >> 32); +- } +- +- /* Indicate whether or not we've processed everything. */ +- env->cc_op = (len == src_len ? 0 : 3); +- +- /* Return both cksm and processed length. */ +- env->retxl = cksm; +- return len; +-} +- +-void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) +-{ +- uintptr_t ra = GETPC(); +- int len_dest = len >> 4; +- int len_src = len & 0xf; +- uint8_t b; +- +- dest += len_dest; +- src += len_src; +- +- /* last byte is special, it only flips the nibbles */ +- b = cpu_ldub_data_ra(env, src, ra); +- cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); +- src--; +- len_src--; +- +- /* now pack every value */ +- while (len_dest > 0) { +- b = 0; +- +- if (len_src >= 0) { +- b = cpu_ldub_data_ra(env, src, ra) & 0x0f; +- src--; +- len_src--; +- } +- if (len_src >= 0) { +- b |= cpu_ldub_data_ra(env, src, ra) << 4; +- src--; +- len_src--; +- } +- +- len_dest--; +- dest--; +- cpu_stb_data_ra(env, dest, b, ra); +- } +-} +- +-static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src, +- uint32_t srclen, int ssize, uintptr_t ra) +-{ +- int i; +- /* The destination operand is always 16 bytes long. */ +- const int destlen = 16; +- +- /* The operands are processed from right to left. */ +- src += srclen - 1; +- dest += destlen - 1; +- +- for (i = 0; i < destlen; i++) { +- uint8_t b = 0; +- +- /* Start with a positive sign */ +- if (i == 0) { +- b = 0xc; +- } else if (srclen > ssize) { +- b = cpu_ldub_data_ra(env, src, ra) & 0x0f; +- src -= ssize; +- srclen -= ssize; +- } +- +- if (srclen > ssize) { +- b |= cpu_ldub_data_ra(env, src, ra) << 4; +- src -= ssize; +- srclen -= ssize; +- } +- +- cpu_stb_data_ra(env, dest, b, ra); +- dest--; +- } +-} +- +- +-void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src, +- uint32_t srclen) +-{ +- do_pkau(env, dest, src, srclen, 1, GETPC()); +-} +- +-void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src, +- uint32_t srclen) +-{ +- do_pkau(env, dest, src, srclen, 2, GETPC()); +-} +- +-void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest, +- uint64_t src) +-{ +- uintptr_t ra = GETPC(); +- int len_dest = len >> 4; +- int len_src = len & 0xf; +- uint8_t b; +- int second_nibble = 0; +- +- dest += len_dest; +- src += len_src; +- +- /* last byte is special, it only flips the nibbles */ +- b = cpu_ldub_data_ra(env, src, ra); +- cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); +- src--; +- len_src--; +- +- /* now pad every nibble with 0xf0 */ +- +- while (len_dest > 0) { +- uint8_t cur_byte = 0; +- +- if (len_src > 0) { +- cur_byte = cpu_ldub_data_ra(env, src, ra); +- } +- +- len_dest--; +- dest--; +- +- /* only advance one nibble at a time */ +- if (second_nibble) { +- cur_byte >>= 4; +- len_src--; +- src--; +- } +- second_nibble = !second_nibble; +- +- /* digit */ +- cur_byte = (cur_byte & 0xf); +- /* zone bits */ +- cur_byte |= 0xf0; +- +- cpu_stb_data_ra(env, dest, cur_byte, ra); +- } +-} +- +-static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, +- uint32_t destlen, int dsize, uint64_t src, +- uintptr_t ra) +-{ +- int i; +- uint32_t cc; +- uint8_t b; +- /* The source operand is always 16 bytes long. */ +- const int srclen = 16; +- +- /* The operands are processed from right to left. */ +- src += srclen - 1; +- dest += destlen - dsize; +- +- /* Check for the sign. */ +- b = cpu_ldub_data_ra(env, src, ra); +- src--; +- switch (b & 0xf) { +- case 0xa: +- case 0xc: +- case 0xe ... 0xf: +- cc = 0; /* plus */ +- break; +- case 0xb: +- case 0xd: +- cc = 1; /* minus */ +- break; +- default: +- case 0x0 ... 0x9: +- cc = 3; /* invalid */ +- break; +- } +- +- /* Now pad every nibble with 0x30, advancing one nibble at a time. */ +- for (i = 0; i < destlen; i += dsize) { +- if (i == (31 * dsize)) { +- /* If length is 32/64 bytes, the leftmost byte is 0. */ +- b = 0; +- } else if (i % (2 * dsize)) { +- b = cpu_ldub_data_ra(env, src, ra); +- src--; +- } else { +- b >>= 4; +- } +- cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra); +- dest -= dsize; +- } +- +- return cc; +-} +- +-uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen, +- uint64_t src) +-{ +- return do_unpkau(env, dest, destlen, 1, src, GETPC()); +-} +- +-uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen, +- uint64_t src) +-{ +- return do_unpkau(env, dest, destlen, 2, src, GETPC()); +-} +- +-uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen) +-{ +- uintptr_t ra = GETPC(); +- uint32_t cc = 0; +- int i; +- +- for (i = 0; i < destlen; i++) { +- uint8_t b = cpu_ldub_data_ra(env, dest + i, ra); +- /* digit */ +- cc |= (b & 0xf0) > 0x90 ? 2 : 0; +- +- if (i == (destlen - 1)) { +- /* sign */ +- cc |= (b & 0xf) < 0xa ? 1 : 0; +- } else { +- /* digit */ +- cc |= (b & 0xf) > 0x9 ? 2 : 0; +- } +- } +- +- return cc; +-} +- +-static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array, +- uint64_t trans, uintptr_t ra) +-{ +- uint32_t i; +- +- for (i = 0; i <= len; i++) { +- uint8_t byte = cpu_ldub_data_ra(env, array + i, ra); +- uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra); +- cpu_stb_data_ra(env, array + i, new_byte, ra); +- } +- +- return env->cc_op; +-} +- +-void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array, +- uint64_t trans) +-{ +- do_helper_tr(env, len, array, trans, GETPC()); +-} +- +-uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array, +- uint64_t len, uint64_t trans) +-{ +- uintptr_t ra = GETPC(); +- uint8_t end = env->regs[0] & 0xff; +- uint64_t l = len; +- uint64_t i; +- uint32_t cc = 0; +- +- if (!(env->psw.mask & PSW_MASK_64)) { +- array &= 0x7fffffff; +- l = (uint32_t)l; +- } +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 8k. */ +- if (l > 0x2000) { +- l = 0x2000; +- cc = 3; +- } +- +- for (i = 0; i < l; i++) { +- uint8_t byte, new_byte; +- +- byte = cpu_ldub_data_ra(env, array + i, ra); +- +- if (byte == end) { +- cc = 1; +- break; +- } +- +- new_byte = cpu_ldub_data_ra(env, trans + byte, ra); +- cpu_stb_data_ra(env, array + i, new_byte, ra); +- } +- +- env->cc_op = cc; +- env->retxl = len - i; +- return array + i; +-} +- +-static inline uint32_t do_helper_trt(CPUS390XState *env, int len, +- uint64_t array, uint64_t trans, +- int inc, uintptr_t ra) +-{ +- int i; +- +- for (i = 0; i <= len; i++) { +- uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra); +- uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra); +- +- if (sbyte != 0) { +- set_address(env, 1, array + i * inc); +- env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte); +- return (i == len) ? 2 : 1; +- } +- } +- +- return 0; +-} +- +-static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len, +- uint64_t array, uint64_t trans, +- uintptr_t ra) +-{ +- return do_helper_trt(env, len, array, trans, 1, ra); +-} +- +-uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, +- uint64_t trans) +-{ +- return do_helper_trt(env, len, array, trans, 1, GETPC()); +-} +- +-static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len, +- uint64_t array, uint64_t trans, +- uintptr_t ra) +-{ +- return do_helper_trt(env, len, array, trans, -1, ra); +-} +- +-uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array, +- uint64_t trans) +-{ +- return do_helper_trt(env, len, array, trans, -1, GETPC()); +-} +- +-/* Translate one/two to one/two */ +-uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, +- uint32_t tst, uint32_t sizes) +-{ +- uintptr_t ra = GETPC(); +- int dsize = (sizes & 1) ? 1 : 2; +- int ssize = (sizes & 2) ? 1 : 2; +- uint64_t tbl = get_address(env, 1); +- uint64_t dst = get_address(env, r1); +- uint64_t len = get_length(env, r1 + 1); +- uint64_t src = get_address(env, r2); +- uint32_t cc = 3; +- int i; +- +- /* The lower address bits of TBL are ignored. For TROO, TROT, it's +- the low 3 bits (double-word aligned). For TRTO, TRTT, it's either +- the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */ +- if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) { +- tbl &= -4096; +- } else { +- tbl &= -8; +- } +- +- check_alignment(env, len, ssize, ra); +- +- /* Lest we fail to service interrupts in a timely manner, */ +- /* limit the amount of work we're willing to do. */ +- for (i = 0; i < 0x2000; i++) { +- uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra); +- uint64_t tble = tbl + (sval * dsize); +- uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra); +- if (dval == tst) { +- cc = 1; +- break; +- } +- cpu_stsize_data_ra(env, dst, dval, dsize, ra); +- +- len -= ssize; +- src += ssize; +- dst += dsize; +- +- if (len == 0) { +- cc = 0; +- break; +- } +- } +- +- set_address(env, r1, dst); +- set_length(env, r1 + 1, len); +- set_address(env, r2, src); +- +- return cc; +-} +- +-void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, +- uint32_t r1, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); +- Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); +- Int128 oldv; +- uint64_t oldh, oldl; +- bool fail; +- +- check_alignment(env, addr, 16, ra); +- +- oldh = cpu_ldq_data_ra(env, addr + 0, ra); +- oldl = cpu_ldq_data_ra(env, addr + 8, ra); +- +- oldv = int128_make128(oldl, oldh); +- fail = !int128_eq(oldv, cmpv); +- if (fail) { +- newv = oldv; +- } +- +- cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); +- cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); +- +- env->cc_op = fail; +- env->regs[r1] = int128_gethi(oldv); +- env->regs[r1 + 1] = int128_getlo(oldv); +-} +- +-void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, +- uint32_t r1, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); +- Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); +- int mem_idx; +- TCGMemOpIdx oi; +- Int128 oldv; +- bool fail; +- +- assert(HAVE_CMPXCHG128); +- +- mem_idx = cpu_mmu_index(env, false); +- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); +- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); +- fail = !int128_eq(oldv, cmpv); +- +- env->cc_op = fail; +- env->regs[r1] = int128_gethi(oldv); +- env->regs[r1 + 1] = int128_getlo(oldv); +-} +- +-static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, +- uint64_t a2, bool parallel) +-{ +- uint32_t mem_idx = cpu_mmu_index(env, false); +- uintptr_t ra = GETPC(); +- uint32_t fc = extract32(env->regs[0], 0, 8); +- uint32_t sc = extract32(env->regs[0], 8, 8); +- uint64_t pl = get_address(env, 1) & -16; +- uint64_t svh, svl; +- uint32_t cc; +- +- /* Sanity check the function code and storage characteristic. */ +- if (fc > 1 || sc > 3) { +- if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { +- goto spec_exception; +- } +- if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { +- goto spec_exception; +- } +- } +- +- /* Sanity check the alignments. */ +- if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) { +- goto spec_exception; +- } +- +- /* Sanity check writability of the store address. */ +- probe_write(env, a2, 1 << sc, mem_idx, ra); +- +- /* +- * Note that the compare-and-swap is atomic, and the store is atomic, +- * but the complete operation is not. Therefore we do not need to +- * assert serial context in order to implement this. That said, +- * restart early if we can't support either operation that is supposed +- * to be atomic. +- */ +- if (parallel) { +- uint32_t max = 2; +-#ifdef CONFIG_ATOMIC64 +- max = 3; +-#endif +- if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || +- (HAVE_ATOMIC128 ? 0 : sc > max)) { +- cpu_loop_exit_atomic(env_cpu(env), ra); +- } +- } +- +- /* All loads happen before all stores. For simplicity, load the entire +- store value area from the parameter list. */ +- svh = cpu_ldq_data_ra(env, pl + 16, ra); +- svl = cpu_ldq_data_ra(env, pl + 24, ra); +- +- switch (fc) { +- case 0: +- { +- uint32_t nv = cpu_ldl_data_ra(env, pl, ra); +- uint32_t cv = env->regs[r3]; +- uint32_t ov; +- +- if (parallel) { +-#ifdef CONFIG_USER_ONLY +- uint32_t *haddr = g2h(env_cpu(env), a1); +- ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); +-#else +- TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); +- ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); +-#endif +- } else { +- ov = cpu_ldl_data_ra(env, a1, ra); +- cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); +- } +- cc = (ov != cv); +- env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); +- } +- break; +- +- case 1: +- { +- uint64_t nv = cpu_ldq_data_ra(env, pl, ra); +- uint64_t cv = env->regs[r3]; +- uint64_t ov; +- +- if (parallel) { +-#ifdef CONFIG_ATOMIC64 +-# ifdef CONFIG_USER_ONLY +- uint64_t *haddr = g2h(env_cpu(env), a1); +- ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); +-# else +- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); +- ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); +-# endif +-#else +- /* Note that we asserted !parallel above. */ +- g_assert_not_reached(); +-#endif +- } else { +- ov = cpu_ldq_data_ra(env, a1, ra); +- cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); +- } +- cc = (ov != cv); +- env->regs[r3] = ov; +- } +- break; +- +- case 2: +- { +- uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); +- uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); +- Int128 nv = int128_make128(nvl, nvh); +- Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); +- Int128 ov; +- +- if (!parallel) { +- uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); +- uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); +- +- ov = int128_make128(ol, oh); +- cc = !int128_eq(ov, cv); +- if (cc) { +- nv = ov; +- } +- +- cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); +- cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); +- } else if (HAVE_CMPXCHG128) { +- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); +- ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); +- cc = !int128_eq(ov, cv); +- } else { +- /* Note that we asserted !parallel above. */ +- g_assert_not_reached(); +- } +- +- env->regs[r3 + 0] = int128_gethi(ov); +- env->regs[r3 + 1] = int128_getlo(ov); +- } +- break; +- +- default: +- g_assert_not_reached(); +- } +- +- /* Store only if the comparison succeeded. Note that above we use a pair +- of 64-bit big-endian loads, so for sc < 3 we must extract the value +- from the most-significant bits of svh. */ +- if (cc == 0) { +- switch (sc) { +- case 0: +- cpu_stb_data_ra(env, a2, svh >> 56, ra); +- break; +- case 1: +- cpu_stw_data_ra(env, a2, svh >> 48, ra); +- break; +- case 2: +- cpu_stl_data_ra(env, a2, svh >> 32, ra); +- break; +- case 3: +- cpu_stq_data_ra(env, a2, svh, ra); +- break; +- case 4: +- if (!parallel) { +- cpu_stq_data_ra(env, a2 + 0, svh, ra); +- cpu_stq_data_ra(env, a2 + 8, svl, ra); +- } else if (HAVE_ATOMIC128) { +- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); +- Int128 sv = int128_make128(svl, svh); +- helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); +- } else { +- /* Note that we asserted !parallel above. */ +- g_assert_not_reached(); +- } +- break; +- default: +- g_assert_not_reached(); +- } +- } +- +- return cc; +- +- spec_exception: +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +-} +- +-uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) +-{ +- return do_csst(env, r3, a1, a2, false); +-} +- +-uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1, +- uint64_t a2) +-{ +- return do_csst(env, r3, a1, a2, true); +-} +- +-#if !defined(CONFIG_USER_ONLY) +-void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- bool PERchanged = false; +- uint64_t src = a2; +- uint32_t i; +- +- if (src & 0x7) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- uint64_t val = cpu_ldq_data_ra(env, src, ra); +- if (env->cregs[i] != val && i >= 9 && i <= 11) { +- PERchanged = true; +- } +- env->cregs[i] = val; +- HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", +- i, src, val); +- src += sizeof(uint64_t); +- +- if (i == r3) { +- break; +- } +- } +- +- if (PERchanged && env->psw.mask & PSW_MASK_PER) { +- s390_cpu_recompute_watchpoints(env_cpu(env)); +- } +- +- tlb_flush(env_cpu(env)); +-} +- +-void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- bool PERchanged = false; +- uint64_t src = a2; +- uint32_t i; +- +- if (src & 0x3) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- uint32_t val = cpu_ldl_data_ra(env, src, ra); +- if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) { +- PERchanged = true; +- } +- env->cregs[i] = deposit64(env->cregs[i], 0, 32, val); +- HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val); +- src += sizeof(uint32_t); +- +- if (i == r3) { +- break; +- } +- } +- +- if (PERchanged && env->psw.mask & PSW_MASK_PER) { +- s390_cpu_recompute_watchpoints(env_cpu(env)); +- } +- +- tlb_flush(env_cpu(env)); +-} +- +-void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t dest = a2; +- uint32_t i; +- +- if (dest & 0x7) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- cpu_stq_data_ra(env, dest, env->cregs[i], ra); +- dest += sizeof(uint64_t); +- +- if (i == r3) { +- break; +- } +- } +-} +- +-void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) +-{ +- uintptr_t ra = GETPC(); +- uint64_t dest = a2; +- uint32_t i; +- +- if (dest & 0x3) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- for (i = r1;; i = (i + 1) % 16) { +- cpu_stl_data_ra(env, dest, env->cregs[i], ra); +- dest += sizeof(uint32_t); +- +- if (i == r3) { +- break; +- } +- } +-} +- +-uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) +-{ +- uintptr_t ra = GETPC(); +- int i; +- +- real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK; +- +- for (i = 0; i < TARGET_PAGE_SIZE; i += 8) { +- cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra); +- } +- +- return 0; +-} +- +-uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) +-{ +- S390CPU *cpu = env_archcpu(env); +- CPUState *cs = env_cpu(env); +- +- /* +- * TODO: we currently don't handle all access protection types +- * (including access-list and key-controlled) as well as AR mode. +- */ +- if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { +- /* Fetching permitted; storing permitted */ +- return 0; +- } +- +- if (env->int_pgm_code == PGM_PROTECTION) { +- /* retry if reading is possible */ +- cs->exception_index = -1; +- if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { +- /* Fetching permitted; storing not permitted */ +- return 1; +- } +- } +- +- switch (env->int_pgm_code) { +- case PGM_PROTECTION: +- /* Fetching not permitted; storing not permitted */ +- cs->exception_index = -1; +- return 2; +- case PGM_ADDRESSING: +- case PGM_TRANS_SPEC: +- /* exceptions forwarded to the guest */ +- s390_cpu_virt_mem_handle_exc(cpu, GETPC()); +- return 0; +- } +- +- /* Translation not available */ +- cs->exception_index = -1; +- return 3; +-} +- +-/* insert storage key extended */ +-uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) +-{ +- MachineState *ms = MACHINE(qdev_get_machine()); +- static S390SKeysState *ss; +- static S390SKeysClass *skeyclass; +- uint64_t addr = wrap_address(env, r2); +- uint8_t key; +- +- if (addr > ms->ram_size) { +- return 0; +- } +- +- if (unlikely(!ss)) { +- ss = s390_get_skeys_device(); +- skeyclass = S390_SKEYS_GET_CLASS(ss); +- } +- +- if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { +- return 0; +- } +- return key; +-} +- +-/* set storage key extended */ +-void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) +-{ +- MachineState *ms = MACHINE(qdev_get_machine()); +- static S390SKeysState *ss; +- static S390SKeysClass *skeyclass; +- uint64_t addr = wrap_address(env, r2); +- uint8_t key; +- +- if (addr > ms->ram_size) { +- return; +- } +- +- if (unlikely(!ss)) { +- ss = s390_get_skeys_device(); +- skeyclass = S390_SKEYS_GET_CLASS(ss); +- } +- +- key = (uint8_t) r1; +- skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); +- /* +- * As we can only flush by virtual address and not all the entries +- * that point to a physical address we have to flush the whole TLB. +- */ +- tlb_flush_all_cpus_synced(env_cpu(env)); +-} +- +-/* reset reference bit extended */ +-uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) +-{ +- MachineState *ms = MACHINE(qdev_get_machine()); +- static S390SKeysState *ss; +- static S390SKeysClass *skeyclass; +- uint8_t re, key; +- +- if (r2 > ms->ram_size) { +- return 0; +- } +- +- if (unlikely(!ss)) { +- ss = s390_get_skeys_device(); +- skeyclass = S390_SKEYS_GET_CLASS(ss); +- } +- +- if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { +- return 0; +- } +- +- re = key & (SK_R | SK_C); +- key &= ~SK_R; +- +- if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { +- return 0; +- } +- /* +- * As we can only flush by virtual address and not all the entries +- * that point to a physical address we have to flush the whole TLB. +- */ +- tlb_flush_all_cpus_synced(env_cpu(env)); +- +- /* +- * cc +- * +- * 0 Reference bit zero; change bit zero +- * 1 Reference bit zero; change bit one +- * 2 Reference bit one; change bit zero +- * 3 Reference bit one; change bit one +- */ +- +- return re >> 1; +-} +- +-uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +-{ +- const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; +- S390Access srca, desta; +- uintptr_t ra = GETPC(); +- int cc = 0; +- +- HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", +- __func__, l, a1, a2); +- +- if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || +- psw_as == AS_HOME || psw_as == AS_ACCREG) { +- s390_program_interrupt(env, PGM_SPECIAL_OP, ra); +- } +- +- l = wrap_length32(env, l); +- if (l > 256) { +- /* max 256 */ +- l = 256; +- cc = 3; +- } else if (!l) { +- return cc; +- } +- +- /* TODO: Access key handling */ +- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); +- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); +- access_memmove(env, &desta, &srca, ra); +- return cc; +-} +- +-uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) +-{ +- const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; +- S390Access srca, desta; +- uintptr_t ra = GETPC(); +- int cc = 0; +- +- HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", +- __func__, l, a1, a2); +- +- if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || +- psw_as == AS_HOME || psw_as == AS_ACCREG) { +- s390_program_interrupt(env, PGM_SPECIAL_OP, ra); +- } +- +- l = wrap_length32(env, l); +- if (l > 256) { +- /* max 256 */ +- l = 256; +- cc = 3; +- } else if (!l) { +- return cc; +- } +- +- /* TODO: Access key handling */ +- srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); +- desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); +- access_memmove(env, &desta, &srca, ra); +- return cc; +-} +- +-void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) +-{ +- CPUState *cs = env_cpu(env); +- const uintptr_t ra = GETPC(); +- uint64_t table, entry, raddr; +- uint16_t entries, i, index = 0; +- +- if (r2 & 0xff000) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- if (!(r2 & 0x800)) { +- /* invalidation-and-clearing operation */ +- table = r1 & ASCE_ORIGIN; +- entries = (r2 & 0x7ff) + 1; +- +- switch (r1 & ASCE_TYPE_MASK) { +- case ASCE_TYPE_REGION1: +- index = (r2 >> 53) & 0x7ff; +- break; +- case ASCE_TYPE_REGION2: +- index = (r2 >> 42) & 0x7ff; +- break; +- case ASCE_TYPE_REGION3: +- index = (r2 >> 31) & 0x7ff; +- break; +- case ASCE_TYPE_SEGMENT: +- index = (r2 >> 20) & 0x7ff; +- break; +- } +- for (i = 0; i < entries; i++) { +- /* addresses are not wrapped in 24/31bit mode but table index is */ +- raddr = table + ((index + i) & 0x7ff) * sizeof(entry); +- entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra); +- if (!(entry & REGION_ENTRY_I)) { +- /* we are allowed to not store if already invalid */ +- entry |= REGION_ENTRY_I; +- cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra); +- } +- } +- } +- +- /* We simply flush the complete tlb, therefore we can ignore r3. */ +- if (m4 & 1) { +- tlb_flush(cs); +- } else { +- tlb_flush_all_cpus_synced(cs); +- } +-} +- +-/* invalidate pte */ +-void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, +- uint32_t m4) +-{ +- CPUState *cs = env_cpu(env); +- const uintptr_t ra = GETPC(); +- uint64_t page = vaddr & TARGET_PAGE_MASK; +- uint64_t pte_addr, pte; +- +- /* Compute the page table entry address */ +- pte_addr = (pto & SEGMENT_ENTRY_ORIGIN); +- pte_addr += VADDR_PAGE_TX(vaddr) * 8; +- +- /* Mark the page table entry as invalid */ +- pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra); +- pte |= PAGE_ENTRY_I; +- cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra); +- +- /* XXX we exploit the fact that Linux passes the exact virtual +- address here - it's not obliged to! */ +- if (m4 & 1) { +- if (vaddr & ~VADDR_PAGE_TX_MASK) { +- tlb_flush_page(cs, page); +- /* XXX 31-bit hack */ +- tlb_flush_page(cs, page ^ 0x80000000); +- } else { +- /* looks like we don't have a valid virtual address */ +- tlb_flush(cs); +- } +- } else { +- if (vaddr & ~VADDR_PAGE_TX_MASK) { +- tlb_flush_page_all_cpus_synced(cs, page); +- /* XXX 31-bit hack */ +- tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000); +- } else { +- /* looks like we don't have a valid virtual address */ +- tlb_flush_all_cpus_synced(cs); +- } +- } +-} +- +-/* flush local tlb */ +-void HELPER(ptlb)(CPUS390XState *env) +-{ +- tlb_flush(env_cpu(env)); +-} +- +-/* flush global tlb */ +-void HELPER(purge)(CPUS390XState *env) +-{ +- tlb_flush_all_cpus_synced(env_cpu(env)); +-} +- +-/* load real address */ +-uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) +-{ +- uint64_t asc = env->psw.mask & PSW_MASK_ASC; +- uint64_t ret, tec; +- int flags, exc, cc; +- +- /* XXX incomplete - has more corner cases */ +- if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { +- tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); +- } +- +- exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec); +- if (exc) { +- cc = 3; +- ret = exc | 0x80000000; +- } else { +- cc = 0; +- ret |= addr & ~TARGET_PAGE_MASK; +- } +- +- env->cc_op = cc; +- return ret; +-} +-#endif +- +-/* load pair from quadword */ +-uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) +-{ +- uintptr_t ra = GETPC(); +- uint64_t hi, lo; +- +- check_alignment(env, addr, 16, ra); +- hi = cpu_ldq_data_ra(env, addr + 0, ra); +- lo = cpu_ldq_data_ra(env, addr + 8, ra); +- +- env->retxl = lo; +- return hi; +-} +- +-uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) +-{ +- uintptr_t ra = GETPC(); +- uint64_t hi, lo; +- int mem_idx; +- TCGMemOpIdx oi; +- Int128 v; +- +- assert(HAVE_ATOMIC128); +- +- mem_idx = cpu_mmu_index(env, false); +- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); +- v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); +- hi = int128_gethi(v); +- lo = int128_getlo(v); +- +- env->retxl = lo; +- return hi; +-} +- +-/* store pair to quadword */ +-void HELPER(stpq)(CPUS390XState *env, uint64_t addr, +- uint64_t low, uint64_t high) +-{ +- uintptr_t ra = GETPC(); +- +- check_alignment(env, addr, 16, ra); +- cpu_stq_data_ra(env, addr + 0, high, ra); +- cpu_stq_data_ra(env, addr + 8, low, ra); +-} +- +-void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, +- uint64_t low, uint64_t high) +-{ +- uintptr_t ra = GETPC(); +- int mem_idx; +- TCGMemOpIdx oi; +- Int128 v; +- +- assert(HAVE_ATOMIC128); +- +- mem_idx = cpu_mmu_index(env, false); +- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); +- v = int128_make128(low, high); +- helper_atomic_sto_be_mmu(env, addr, v, oi, ra); +-} +- +-/* Execute instruction. This instruction executes an insn modified with +- the contents of r1. It does not change the executed instruction in memory; +- it does not change the program counter. +- +- Perform this by recording the modified instruction in env->ex_value. +- This will be noticed by cpu_get_tb_cpu_state and thus tb translation. +-*/ +-void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) +-{ +- uint64_t insn = cpu_lduw_code(env, addr); +- uint8_t opc = insn >> 8; +- +- /* Or in the contents of R1[56:63]. */ +- insn |= r1 & 0xff; +- +- /* Load the rest of the instruction. */ +- insn <<= 48; +- switch (get_ilen(opc)) { +- case 2: +- break; +- case 4: +- insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32; +- break; +- case 6: +- insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16; +- break; +- default: +- g_assert_not_reached(); +- } +- +- /* The very most common cases can be sped up by avoiding a new TB. */ +- if ((opc & 0xf0) == 0xd0) { +- typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t, +- uint64_t, uintptr_t); +- static const dx_helper dx[16] = { +- [0x0] = do_helper_trt_bkwd, +- [0x2] = do_helper_mvc, +- [0x4] = do_helper_nc, +- [0x5] = do_helper_clc, +- [0x6] = do_helper_oc, +- [0x7] = do_helper_xc, +- [0xc] = do_helper_tr, +- [0xd] = do_helper_trt_fwd, +- }; +- dx_helper helper = dx[opc & 0xf]; +- +- if (helper) { +- uint32_t l = extract64(insn, 48, 8); +- uint32_t b1 = extract64(insn, 44, 4); +- uint32_t d1 = extract64(insn, 32, 12); +- uint32_t b2 = extract64(insn, 28, 4); +- uint32_t d2 = extract64(insn, 16, 12); +- uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1); +- uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2); +- +- env->cc_op = helper(env, l, a1, a2, 0); +- env->psw.addr += ilen; +- return; +- } +- } else if (opc == 0x0a) { +- env->int_svc_code = extract64(insn, 48, 8); +- env->int_svc_ilen = ilen; +- helper_exception(env, EXCP_SVC); +- g_assert_not_reached(); +- } +- +- /* Record the insn we want to execute as well as the ilen to use +- during the execution of the target insn. This will also ensure +- that ex_value is non-zero, which flags that we are in a state +- that requires such execution. */ +- env->ex_value = insn | ilen; +-} +- +-uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, +- uint64_t len) +-{ +- const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; +- const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; +- const uint64_t r0 = env->regs[0]; +- const uintptr_t ra = GETPC(); +- uint8_t dest_key, dest_as, dest_k, dest_a; +- uint8_t src_key, src_as, src_k, src_a; +- uint64_t val; +- int cc = 0; +- +- HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n", +- __func__, dest, src, len); +- +- if (!(env->psw.mask & PSW_MASK_DAT)) { +- tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); +- } +- +- /* OAC (operand access control) for the first operand -> dest */ +- val = (r0 & 0xffff0000ULL) >> 16; +- dest_key = (val >> 12) & 0xf; +- dest_as = (val >> 6) & 0x3; +- dest_k = (val >> 1) & 0x1; +- dest_a = val & 0x1; +- +- /* OAC (operand access control) for the second operand -> src */ +- val = (r0 & 0x0000ffffULL); +- src_key = (val >> 12) & 0xf; +- src_as = (val >> 6) & 0x3; +- src_k = (val >> 1) & 0x1; +- src_a = val & 0x1; +- +- if (!dest_k) { +- dest_key = psw_key; +- } +- if (!src_k) { +- src_key = psw_key; +- } +- if (!dest_a) { +- dest_as = psw_as; +- } +- if (!src_a) { +- src_as = psw_as; +- } +- +- if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { +- tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); +- } +- if (!(env->cregs[0] & CR0_SECONDARY) && +- (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { +- tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); +- } +- if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { +- tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra); +- } +- +- len = wrap_length32(env, len); +- if (len > 4096) { +- cc = 3; +- len = 4096; +- } +- +- /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ +- if (src_as == AS_ACCREG || dest_as == AS_ACCREG || +- (env->psw.mask & PSW_MASK_PSTATE)) { +- qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n", +- __func__); +- tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra); +- } +- +- /* FIXME: Access using correct keys and AR-mode */ +- if (len) { +- S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD, +- mmu_idx_from_as(src_as), ra); +- S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE, +- mmu_idx_from_as(dest_as), ra); +- +- access_memmove(env, &desta, &srca, ra); +- } +- +- return cc; +-} +- +-/* Decode a Unicode character. A return value < 0 indicates success, storing +- the UTF-32 result into OCHAR and the input length into OLEN. A return +- value >= 0 indicates failure, and the CC value to be returned. */ +-typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr, +- uint64_t ilen, bool enh_check, uintptr_t ra, +- uint32_t *ochar, uint32_t *olen); +- +-/* Encode a Unicode character. A return value < 0 indicates success, storing +- the bytes into ADDR and the output length into OLEN. A return value >= 0 +- indicates failure, and the CC value to be returned. */ +-typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr, +- uint64_t ilen, uintptr_t ra, uint32_t c, +- uint32_t *olen); +- +-static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- bool enh_check, uintptr_t ra, +- uint32_t *ochar, uint32_t *olen) +-{ +- uint8_t s0, s1, s2, s3; +- uint32_t c, l; +- +- if (ilen < 1) { +- return 0; +- } +- s0 = cpu_ldub_data_ra(env, addr, ra); +- if (s0 <= 0x7f) { +- /* one byte character */ +- l = 1; +- c = s0; +- } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) { +- /* invalid character */ +- return 2; +- } else if (s0 <= 0xdf) { +- /* two byte character */ +- l = 2; +- if (ilen < 2) { +- return 0; +- } +- s1 = cpu_ldub_data_ra(env, addr + 1, ra); +- c = s0 & 0x1f; +- c = (c << 6) | (s1 & 0x3f); +- if (enh_check && (s1 & 0xc0) != 0x80) { +- return 2; +- } +- } else if (s0 <= 0xef) { +- /* three byte character */ +- l = 3; +- if (ilen < 3) { +- return 0; +- } +- s1 = cpu_ldub_data_ra(env, addr + 1, ra); +- s2 = cpu_ldub_data_ra(env, addr + 2, ra); +- c = s0 & 0x0f; +- c = (c << 6) | (s1 & 0x3f); +- c = (c << 6) | (s2 & 0x3f); +- /* Fold the byte-by-byte range descriptions in the PoO into +- tests against the complete value. It disallows encodings +- that could be smaller, and the UTF-16 surrogates. */ +- if (enh_check +- && ((s1 & 0xc0) != 0x80 +- || (s2 & 0xc0) != 0x80 +- || c < 0x1000 +- || (c >= 0xd800 && c <= 0xdfff))) { +- return 2; +- } +- } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) { +- /* four byte character */ +- l = 4; +- if (ilen < 4) { +- return 0; +- } +- s1 = cpu_ldub_data_ra(env, addr + 1, ra); +- s2 = cpu_ldub_data_ra(env, addr + 2, ra); +- s3 = cpu_ldub_data_ra(env, addr + 3, ra); +- c = s0 & 0x07; +- c = (c << 6) | (s1 & 0x3f); +- c = (c << 6) | (s2 & 0x3f); +- c = (c << 6) | (s3 & 0x3f); +- /* See above. */ +- if (enh_check +- && ((s1 & 0xc0) != 0x80 +- || (s2 & 0xc0) != 0x80 +- || (s3 & 0xc0) != 0x80 +- || c < 0x010000 +- || c > 0x10ffff)) { +- return 2; +- } +- } else { +- /* invalid character */ +- return 2; +- } +- +- *ochar = c; +- *olen = l; +- return -1; +-} +- +-static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- bool enh_check, uintptr_t ra, +- uint32_t *ochar, uint32_t *olen) +-{ +- uint16_t s0, s1; +- uint32_t c, l; +- +- if (ilen < 2) { +- return 0; +- } +- s0 = cpu_lduw_data_ra(env, addr, ra); +- if ((s0 & 0xfc00) != 0xd800) { +- /* one word character */ +- l = 2; +- c = s0; +- } else { +- /* two word character */ +- l = 4; +- if (ilen < 4) { +- return 0; +- } +- s1 = cpu_lduw_data_ra(env, addr + 2, ra); +- c = extract32(s0, 6, 4) + 1; +- c = (c << 6) | (s0 & 0x3f); +- c = (c << 10) | (s1 & 0x3ff); +- if (enh_check && (s1 & 0xfc00) != 0xdc00) { +- /* invalid surrogate character */ +- return 2; +- } +- } +- +- *ochar = c; +- *olen = l; +- return -1; +-} +- +-static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- bool enh_check, uintptr_t ra, +- uint32_t *ochar, uint32_t *olen) +-{ +- uint32_t c; +- +- if (ilen < 4) { +- return 0; +- } +- c = cpu_ldl_data_ra(env, addr, ra); +- if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) { +- /* invalid unicode character */ +- return 2; +- } +- +- *ochar = c; +- *olen = 4; +- return -1; +-} +- +-static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- uintptr_t ra, uint32_t c, uint32_t *olen) +-{ +- uint8_t d[4]; +- uint32_t l, i; +- +- if (c <= 0x7f) { +- /* one byte character */ +- l = 1; +- d[0] = c; +- } else if (c <= 0x7ff) { +- /* two byte character */ +- l = 2; +- d[1] = 0x80 | extract32(c, 0, 6); +- d[0] = 0xc0 | extract32(c, 6, 5); +- } else if (c <= 0xffff) { +- /* three byte character */ +- l = 3; +- d[2] = 0x80 | extract32(c, 0, 6); +- d[1] = 0x80 | extract32(c, 6, 6); +- d[0] = 0xe0 | extract32(c, 12, 4); +- } else { +- /* four byte character */ +- l = 4; +- d[3] = 0x80 | extract32(c, 0, 6); +- d[2] = 0x80 | extract32(c, 6, 6); +- d[1] = 0x80 | extract32(c, 12, 6); +- d[0] = 0xf0 | extract32(c, 18, 3); +- } +- +- if (ilen < l) { +- return 1; +- } +- for (i = 0; i < l; ++i) { +- cpu_stb_data_ra(env, addr + i, d[i], ra); +- } +- +- *olen = l; +- return -1; +-} +- +-static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- uintptr_t ra, uint32_t c, uint32_t *olen) +-{ +- uint16_t d0, d1; +- +- if (c <= 0xffff) { +- /* one word character */ +- if (ilen < 2) { +- return 1; +- } +- cpu_stw_data_ra(env, addr, c, ra); +- *olen = 2; +- } else { +- /* two word character */ +- if (ilen < 4) { +- return 1; +- } +- d1 = 0xdc00 | extract32(c, 0, 10); +- d0 = 0xd800 | extract32(c, 10, 6); +- d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1); +- cpu_stw_data_ra(env, addr + 0, d0, ra); +- cpu_stw_data_ra(env, addr + 2, d1, ra); +- *olen = 4; +- } +- +- return -1; +-} +- +-static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, +- uintptr_t ra, uint32_t c, uint32_t *olen) +-{ +- if (ilen < 4) { +- return 1; +- } +- cpu_stl_data_ra(env, addr, c, ra); +- *olen = 4; +- return -1; +-} +- +-static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1, +- uint32_t r2, uint32_t m3, uintptr_t ra, +- decode_unicode_fn decode, +- encode_unicode_fn encode) +-{ +- uint64_t dst = get_address(env, r1); +- uint64_t dlen = get_length(env, r1 + 1); +- uint64_t src = get_address(env, r2); +- uint64_t slen = get_length(env, r2 + 1); +- bool enh_check = m3 & 1; +- int cc, i; +- +- /* Lest we fail to service interrupts in a timely manner, limit the +- amount of work we're willing to do. For now, let's cap at 256. */ +- for (i = 0; i < 256; ++i) { +- uint32_t c, ilen, olen; +- +- cc = decode(env, src, slen, enh_check, ra, &c, &ilen); +- if (unlikely(cc >= 0)) { +- break; +- } +- cc = encode(env, dst, dlen, ra, c, &olen); +- if (unlikely(cc >= 0)) { +- break; +- } +- +- src += ilen; +- slen -= ilen; +- dst += olen; +- dlen -= olen; +- cc = 3; +- } +- +- set_address(env, r1, dst); +- set_length(env, r1 + 1, dlen); +- set_address(env, r2, src); +- set_length(env, r2 + 1, slen); +- +- return cc; +-} +- +-uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf8, encode_utf16); +-} +- +-uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf8, encode_utf32); +-} +- +-uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf16, encode_utf8); +-} +- +-uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf16, encode_utf32); +-} +- +-uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf32, encode_utf8); +-} +- +-uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) +-{ +- return convert_unicode(env, r1, r2, m3, GETPC(), +- decode_utf32, encode_utf16); +-} +- +-void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, +- uintptr_t ra) +-{ +- /* test the actual access, not just any access to the page due to LAP */ +- while (len) { +- const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); +- const uint64_t curlen = MIN(pagelen, len); +- +- probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); +- addr = wrap_address(env, addr + curlen); +- len -= curlen; +- } +-} +- +-void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) +-{ +- probe_write_access(env, addr, len, GETPC()); +-} +diff --git a/target/s390x/meson.build b/target/s390x/meson.build +index c42eadb7d2..84c1402a6a 100644 +--- a/target/s390x/meson.build ++++ b/target/s390x/meson.build +@@ -4,27 +4,10 @@ s390x_ss.add(files( + 'cpu_features.c', + 'cpu_models.c', + 'gdbstub.c', +- 'helper.c', + 'interrupt.c', ++ 'cpu-dump.c', + )) + +-s390x_ss.add(when: 'CONFIG_TCG', if_true: files( +- 'cc_helper.c', +- 'crypto_helper.c', +- 'excp_helper.c', +- 'fpu_helper.c', +- 'int_helper.c', +- 'mem_helper.c', +- 'misc_helper.c', +- 'translate.c', +- 'vec_fpu_helper.c', +- 'vec_helper.c', +- 'vec_int_helper.c', +- 'vec_string_helper.c', +-), if_false: files('tcg-stub.c')) +- +-s390x_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c')) +- + gen_features = executable('gen-features', 'gen-features.c', native: true, + build_by_default: false) + +@@ -37,26 +20,25 @@ s390x_ss.add(gen_features_h) + + s390x_softmmu_ss = ss.source_set() + s390x_softmmu_ss.add(files( ++ 'helper.c', + 'arch_dump.c', + 'diag.c', + 'ioinst.c', + 'machine.c', + 'mmu_helper.c', + 'sigp.c', ++ 'cpu-sysemu.c', ++ 'cpu_models_sysemu.c', ++)) ++ ++s390x_user_ss = ss.source_set() ++s390x_user_ss.add(files( ++ 'cpu_models_user.c', + )) + +-# Newer kernels on s390 check for an S390_PGSTE program header and +-# enable the pgste page table extensions in that case. This makes +-# the vm.allocate_pgste sysctl unnecessary. We enable this program +-# header if +-# - we build on s390x +-# - we build the system emulation for s390x (qemu-system-s390x) +-# - KVM is enabled +-# - the linker supports --s390-pgste +-if host_machine.cpu_family() == 's390x' and cc.has_link_argument('-Wl,--s390-pgste') +- s390x_softmmu_ss.add(when: 'CONFIG_KVM', +- if_true: declare_dependency(link_args: ['-Wl,--s390-pgste'])) +-endif ++subdir('tcg') ++subdir('kvm') + + target_arch += {'s390x': s390x_ss} + target_softmmu_arch += {'s390x': s390x_softmmu_ss} ++target_user_arch += {'s390x': s390x_user_ss} +diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c +deleted file mode 100644 +index 7ea90d414a..0000000000 +--- a/target/s390x/misc_helper.c ++++ /dev/null +@@ -1,785 +0,0 @@ +-/* +- * S/390 misc helper routines +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2009 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-#include "qemu/osdep.h" +-#include "qemu/cutils.h" +-#include "qemu/main-loop.h" +-#include "cpu.h" +-#include "internal.h" +-#include "exec/memory.h" +-#include "qemu/host-utils.h" +-#include "exec/helper-proto.h" +-#include "qemu/timer.h" +-#include "exec/exec-all.h" +-#include "exec/cpu_ldst.h" +-#include "qapi/error.h" +-#include "tcg_s390x.h" +-#include "s390-tod.h" +- +-#if !defined(CONFIG_USER_ONLY) +-#include "sysemu/cpus.h" +-#include "sysemu/sysemu.h" +-#include "hw/s390x/ebcdic.h" +-#include "hw/s390x/s390-virtio-hcall.h" +-#include "hw/s390x/sclp.h" +-#include "hw/s390x/s390_flic.h" +-#include "hw/s390x/ioinst.h" +-#include "hw/s390x/s390-pci-inst.h" +-#include "hw/boards.h" +-#include "hw/s390x/tod.h" +-#endif +- +-/* #define DEBUG_HELPER */ +-#ifdef DEBUG_HELPER +-#define HELPER_LOG(x...) qemu_log(x) +-#else +-#define HELPER_LOG(x...) +-#endif +- +-/* Raise an exception statically from a TB. */ +-void HELPER(exception)(CPUS390XState *env, uint32_t excp) +-{ +- CPUState *cs = env_cpu(env); +- +- HELPER_LOG("%s: exception %d\n", __func__, excp); +- cs->exception_index = excp; +- cpu_loop_exit(cs); +-} +- +-/* Store CPU Timer (also used for EXTRACT CPU TIME) */ +-uint64_t HELPER(stpt)(CPUS390XState *env) +-{ +-#if defined(CONFIG_USER_ONLY) +- /* +- * Fake a descending CPU timer. We could get negative values here, +- * but we don't care as it is up to the OS when to process that +- * interrupt and reset to > 0. +- */ +- return UINT64_MAX - (uint64_t)cpu_get_host_ticks(); +-#else +- return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); +-#endif +-} +- +-/* Store Clock */ +-uint64_t HELPER(stck)(CPUS390XState *env) +-{ +-#ifdef CONFIG_USER_ONLY +- struct timespec ts; +- uint64_t ns; +- +- clock_gettime(CLOCK_REALTIME, &ts); +- ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec; +- +- return TOD_UNIX_EPOCH + time2tod(ns); +-#else +- S390TODState *td = s390_get_todstate(); +- S390TODClass *tdc = S390_TOD_GET_CLASS(td); +- S390TOD tod; +- +- tdc->get(td, &tod, &error_abort); +- return tod.low; +-#endif +-} +- +-#ifndef CONFIG_USER_ONLY +-/* SCLP service call */ +-uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) +-{ +- qemu_mutex_lock_iothread(); +- int r = sclp_service_call(env, r1, r2); +- qemu_mutex_unlock_iothread(); +- if (r < 0) { +- tcg_s390_program_interrupt(env, -r, GETPC()); +- } +- return r; +-} +- +-void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) +-{ +- uint64_t r; +- +- switch (num) { +- case 0x500: +- /* KVM hypercall */ +- qemu_mutex_lock_iothread(); +- r = s390_virtio_hypercall(env); +- qemu_mutex_unlock_iothread(); +- break; +- case 0x44: +- /* yield */ +- r = 0; +- break; +- case 0x308: +- /* ipl */ +- qemu_mutex_lock_iothread(); +- handle_diag_308(env, r1, r3, GETPC()); +- qemu_mutex_unlock_iothread(); +- r = 0; +- break; +- case 0x288: +- /* time bomb (watchdog) */ +- r = handle_diag_288(env, r1, r3); +- break; +- default: +- r = -1; +- break; +- } +- +- if (r) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +-} +- +-/* Set Prefix */ +-void HELPER(spx)(CPUS390XState *env, uint64_t a1) +-{ +- CPUState *cs = env_cpu(env); +- uint32_t prefix = a1 & 0x7fffe000; +- +- env->psa = prefix; +- HELPER_LOG("prefix: %#x\n", prefix); +- tlb_flush_page(cs, 0); +- tlb_flush_page(cs, TARGET_PAGE_SIZE); +-} +- +-static void update_ckc_timer(CPUS390XState *env) +-{ +- S390TODState *td = s390_get_todstate(); +- uint64_t time; +- +- /* stop the timer and remove pending CKC IRQs */ +- timer_del(env->tod_timer); +- g_assert(qemu_mutex_iothread_locked()); +- env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; +- +- /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ +- if (env->ckc == -1ULL) { +- return; +- } +- +- /* difference between origins */ +- time = env->ckc - td->base.low; +- +- /* nanoseconds */ +- time = tod2time(time); +- +- timer_mod(env->tod_timer, time); +-} +- +-/* Set Clock Comparator */ +-void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) +-{ +- env->ckc = ckc; +- +- qemu_mutex_lock_iothread(); +- update_ckc_timer(env); +- qemu_mutex_unlock_iothread(); +-} +- +-void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) +-{ +- S390CPU *cpu = S390_CPU(cs); +- +- update_ckc_timer(&cpu->env); +-} +- +-/* Set Clock */ +-uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) +-{ +- S390TODState *td = s390_get_todstate(); +- S390TODClass *tdc = S390_TOD_GET_CLASS(td); +- S390TOD tod = { +- .high = 0, +- .low = tod_low, +- }; +- +- qemu_mutex_lock_iothread(); +- tdc->set(td, &tod, &error_abort); +- qemu_mutex_unlock_iothread(); +- return 0; +-} +- +-/* Set Tod Programmable Field */ +-void HELPER(sckpf)(CPUS390XState *env, uint64_t r0) +-{ +- uint32_t val = r0; +- +- if (val & 0xffff0000) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); +- } +- env->todpr = val; +-} +- +-/* Store Clock Comparator */ +-uint64_t HELPER(stckc)(CPUS390XState *env) +-{ +- return env->ckc; +-} +- +-/* Set CPU Timer */ +-void HELPER(spt)(CPUS390XState *env, uint64_t time) +-{ +- if (time == -1ULL) { +- return; +- } +- +- /* nanoseconds */ +- time = tod2time(time); +- +- env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time; +- +- timer_mod(env->cpu_timer, env->cputm); +-} +- +-/* Store System Information */ +-uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) +-{ +- const uintptr_t ra = GETPC(); +- const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK; +- const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; +- const MachineState *ms = MACHINE(qdev_get_machine()); +- uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; +- S390CPU *cpu = env_archcpu(env); +- SysIB sysib = { }; +- int i, cc = 0; +- +- if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) { +- /* invalid function code: no other checks are performed */ +- return 3; +- } +- +- if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) { +- /* query the current level: no further checks are performed */ +- env->regs[0] = STSI_R0_FC_LEVEL_3; +- return 0; +- } +- +- if (a0 & ~TARGET_PAGE_MASK) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- /* count the cpus and split them into configured and reserved ones */ +- for (i = 0; i < ms->possible_cpus->len; i++) { +- total_cpus++; +- if (ms->possible_cpus->cpus[i].cpu) { +- conf_cpus++; +- } else { +- reserved_cpus++; +- } +- } +- +- /* +- * In theory, we could report Level 1 / Level 2 as current. However, +- * the Linux kernel will detect this as running under LPAR and assume +- * that we have a sclp linemode console (which is always present on +- * LPAR, but not the default for QEMU), therefore not displaying boot +- * messages and making booting a Linux kernel under TCG harder. +- * +- * For now we fake the same SMP configuration on all levels. +- * +- * TODO: We could later make the level configurable via the machine +- * and change defaults (linemode console) based on machine type +- * and accelerator. +- */ +- switch (r0 & STSI_R0_FC_MASK) { +- case STSI_R0_FC_LEVEL_1: +- if ((sel1 == 1) && (sel2 == 1)) { +- /* Basic Machine Configuration */ +- char type[5] = {}; +- +- ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16); +- /* same as machine type number in STORE CPU ID, but in EBCDIC */ +- snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); +- ebcdic_put(sysib.sysib_111.type, type, 4); +- /* model number (not stored in STORE CPU ID for z/Architecure) */ +- ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); +- ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); +- ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); +- } else if ((sel1 == 2) && (sel2 == 1)) { +- /* Basic Machine CPU */ +- ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16); +- ebcdic_put(sysib.sysib_121.plant, "QEMU", 4); +- sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id); +- } else if ((sel1 == 2) && (sel2 == 2)) { +- /* Basic Machine CPUs */ +- sysib.sysib_122.capability = cpu_to_be32(0x443afc29); +- sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus); +- sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus); +- sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus); +- } else { +- cc = 3; +- } +- break; +- case STSI_R0_FC_LEVEL_2: +- if ((sel1 == 2) && (sel2 == 1)) { +- /* LPAR CPU */ +- ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16); +- ebcdic_put(sysib.sysib_221.plant, "QEMU", 4); +- sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id); +- } else if ((sel1 == 2) && (sel2 == 2)) { +- /* LPAR CPUs */ +- sysib.sysib_222.lcpuc = 0x80; /* dedicated */ +- sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus); +- sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus); +- sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus); +- ebcdic_put(sysib.sysib_222.name, "QEMU ", 8); +- sysib.sysib_222.caf = cpu_to_be32(1000); +- sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus); +- } else { +- cc = 3; +- } +- break; +- case STSI_R0_FC_LEVEL_3: +- if ((sel1 == 2) && (sel2 == 2)) { +- /* VM CPUs */ +- sysib.sysib_322.count = 1; +- sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus); +- sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus); +- sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus); +- sysib.sysib_322.vm[0].caf = cpu_to_be32(1000); +- /* Linux kernel uses this to distinguish us from z/VM */ +- ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16); +- sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */ +- +- /* If our VM has a name, use the real name */ +- if (qemu_name) { +- memset(sysib.sysib_322.vm[0].name, 0x40, +- sizeof(sysib.sysib_322.vm[0].name)); +- ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name, +- MIN(sizeof(sysib.sysib_322.vm[0].name), +- strlen(qemu_name))); +- strpadcpy((char *)sysib.sysib_322.ext_names[0], +- sizeof(sysib.sysib_322.ext_names[0]), +- qemu_name, '\0'); +- +- } else { +- ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8); +- strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest"); +- } +- +- /* add the uuid */ +- memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid, +- sizeof(sysib.sysib_322.vm[0].uuid)); +- } else { +- cc = 3; +- } +- break; +- } +- +- if (cc == 0) { +- if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) { +- s390_cpu_virt_mem_handle_exc(cpu, ra); +- } +- } +- +- return cc; +-} +- +-uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, +- uint32_t r3) +-{ +- int cc; +- +- /* TODO: needed to inject interrupts - push further down */ +- qemu_mutex_lock_iothread(); +- cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); +- qemu_mutex_unlock_iothread(); +- +- return cc; +-} +-#endif +- +-#ifndef CONFIG_USER_ONLY +-void HELPER(xsch)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_xsch(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(csch)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_csch(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(hsch)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_hsch(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(rchp)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_rchp(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(rsch)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_rsch(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(sal)(CPUS390XState *env, uint64_t r1) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- ioinst_handle_sal(cpu, r1, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) +-{ +- const uintptr_t ra = GETPC(); +- S390CPU *cpu = env_archcpu(env); +- QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); +- QEMUS390FlicIO *io = NULL; +- LowCore *lowcore; +- +- if (addr & 0x3) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- qemu_mutex_lock_iothread(); +- io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); +- if (!io) { +- qemu_mutex_unlock_iothread(); +- return 0; +- } +- +- if (addr) { +- struct { +- uint16_t id; +- uint16_t nr; +- uint32_t parm; +- } intc = { +- .id = cpu_to_be16(io->id), +- .nr = cpu_to_be16(io->nr), +- .parm = cpu_to_be32(io->parm), +- }; +- +- if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { +- /* writing failed, reinject and properly clean up */ +- s390_io_interrupt(io->id, io->nr, io->parm, io->word); +- qemu_mutex_unlock_iothread(); +- g_free(io); +- s390_cpu_virt_mem_handle_exc(cpu, ra); +- return 0; +- } +- } else { +- /* no protection applies */ +- lowcore = cpu_map_lowcore(env); +- lowcore->subchannel_id = cpu_to_be16(io->id); +- lowcore->subchannel_nr = cpu_to_be16(io->nr); +- lowcore->io_int_parm = cpu_to_be32(io->parm); +- lowcore->io_int_word = cpu_to_be32(io->word); +- cpu_unmap_lowcore(lowcore); +- } +- +- g_free(io); +- qemu_mutex_unlock_iothread(); +- return 1; +-} +- +-void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(chsc)(CPUS390XState *env, uint64_t inst) +-{ +- S390CPU *cpu = env_archcpu(env); +- qemu_mutex_lock_iothread(); +- ioinst_handle_chsc(cpu, inst >> 16, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +-#endif +- +-#ifndef CONFIG_USER_ONLY +-void HELPER(per_check_exception)(CPUS390XState *env) +-{ +- if (env->per_perc_atmid) { +- tcg_s390_program_interrupt(env, PGM_PER, GETPC()); +- } +-} +- +-/* Check if an address is within the PER starting address and the PER +- ending address. The address range might loop. */ +-static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) +-{ +- if (env->cregs[10] <= env->cregs[11]) { +- return env->cregs[10] <= addr && addr <= env->cregs[11]; +- } else { +- return env->cregs[10] <= addr || addr <= env->cregs[11]; +- } +-} +- +-void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to) +-{ +- if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) { +- if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS) +- || get_per_in_range(env, to)) { +- env->per_address = from; +- env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env); +- } +- } +-} +- +-void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) +-{ +- if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) { +- env->per_address = addr; +- env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env); +- +- /* If the instruction has to be nullified, trigger the +- exception immediately. */ +- if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { +- CPUState *cs = env_cpu(env); +- +- env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; +- env->int_pgm_code = PGM_PER; +- env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr)); +- +- cs->exception_index = EXCP_PGM; +- cpu_loop_exit(cs); +- } +- } +-} +- +-void HELPER(per_store_real)(CPUS390XState *env) +-{ +- if ((env->cregs[9] & PER_CR9_EVENT_STORE) && +- (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) { +- /* PSW is saved just before calling the helper. */ +- env->per_address = env->psw.addr; +- env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env); +- } +-} +-#endif +- +-static uint8_t stfl_bytes[2048]; +-static unsigned int used_stfl_bytes; +- +-static void prepare_stfl(void) +-{ +- static bool initialized; +- int i; +- +- /* racy, but we don't care, the same values are always written */ +- if (initialized) { +- return; +- } +- +- s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); +- for (i = 0; i < sizeof(stfl_bytes); i++) { +- if (stfl_bytes[i]) { +- used_stfl_bytes = i + 1; +- } +- } +- initialized = true; +-} +- +-#ifndef CONFIG_USER_ONLY +-void HELPER(stfl)(CPUS390XState *env) +-{ +- LowCore *lowcore; +- +- lowcore = cpu_map_lowcore(env); +- prepare_stfl(); +- memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); +- cpu_unmap_lowcore(lowcore); +-} +-#endif +- +-uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) +-{ +- const uintptr_t ra = GETPC(); +- const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8; +- int max_bytes; +- int i; +- +- if (addr & 0x7) { +- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); +- } +- +- prepare_stfl(); +- max_bytes = ROUND_UP(used_stfl_bytes, 8); +- +- /* +- * The PoP says that doublewords beyond the highest-numbered facility +- * bit may or may not be stored. However, existing hardware appears to +- * not store the words, and existing software depend on that. +- */ +- for (i = 0; i < MIN(count_bytes, max_bytes); ++i) { +- cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra); +- } +- +- env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1); +- return count_bytes >= max_bytes ? 0 : 3; +-} +- +-#ifndef CONFIG_USER_ONLY +-/* +- * Note: we ignore any return code of the functions called for the pci +- * instructions, as the only time they return !0 is when the stub is +- * called, and in that case we didn't even offer the zpci facility. +- * The only exception is SIC, where program checks need to be handled +- * by the caller. +- */ +-void HELPER(clp)(CPUS390XState *env, uint32_t r2) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- clp_service_call(cpu, r2, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- pcilg_service_call(cpu, r1, r2, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- pcistg_service_call(cpu, r1, r2, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, +- uint32_t ar) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) +-{ +- int r; +- +- qemu_mutex_lock_iothread(); +- r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); +- qemu_mutex_unlock_iothread(); +- /* css_do_sic() may actually return a PGM_xxx value to inject */ +- if (r) { +- tcg_s390_program_interrupt(env, -r, GETPC()); +- } +-} +- +-void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- rpcit_service_call(cpu, r1, r2, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, +- uint64_t gaddr, uint32_t ar) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +- +-void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, +- uint32_t ar) +-{ +- S390CPU *cpu = env_archcpu(env); +- +- qemu_mutex_lock_iothread(); +- mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); +- qemu_mutex_unlock_iothread(); +-} +-#endif +diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c +index d492b23a17..d779a9fc51 100644 +--- a/target/s390x/mmu_helper.c ++++ b/target/s390x/mmu_helper.c +@@ -19,8 +19,8 @@ + #include "qemu/error-report.h" + #include "exec/address-spaces.h" + #include "cpu.h" +-#include "internal.h" +-#include "kvm_s390x.h" ++#include "s390x-internal.h" ++#include "kvm/kvm_s390x.h" + #include "sysemu/kvm.h" + #include "sysemu/tcg.h" + #include "exec/exec-all.h" +diff --git a/target/s390x/s390-tod.h b/target/s390x/s390-tod.h +deleted file mode 100644 +index 8b74d6a6d8..0000000000 +--- a/target/s390x/s390-tod.h ++++ /dev/null +@@ -1,29 +0,0 @@ +-/* +- * TOD (Time Of Day) clock +- * +- * Copyright 2018 Red Hat, Inc. +- * Author(s): David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#ifndef TARGET_S390_TOD_H +-#define TARGET_S390_TOD_H +- +-/* The value of the TOD clock for 1.1.1970. */ +-#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL +- +-/* Converts ns to s390's clock format */ +-static inline uint64_t time2tod(uint64_t ns) +-{ +- return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9); +-} +- +-/* Converts s390's clock format to ns */ +-static inline uint64_t tod2time(uint64_t t) +-{ +- return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9); +-} +- +-#endif +diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h +new file mode 100644 +index 0000000000..5506f185e8 +--- /dev/null ++++ b/target/s390x/s390x-internal.h +@@ -0,0 +1,396 @@ ++/* ++ * s390x internal definitions and helpers ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef S390X_INTERNAL_H ++#define S390X_INTERNAL_H ++ ++#include "cpu.h" ++ ++#ifndef CONFIG_USER_ONLY ++typedef struct LowCore { ++ /* prefix area: defined by architecture */ ++ uint32_t ccw1[2]; /* 0x000 */ ++ uint32_t ccw2[4]; /* 0x008 */ ++ uint8_t pad1[0x80 - 0x18]; /* 0x018 */ ++ uint32_t ext_params; /* 0x080 */ ++ uint16_t cpu_addr; /* 0x084 */ ++ uint16_t ext_int_code; /* 0x086 */ ++ uint16_t svc_ilen; /* 0x088 */ ++ uint16_t svc_code; /* 0x08a */ ++ uint16_t pgm_ilen; /* 0x08c */ ++ uint16_t pgm_code; /* 0x08e */ ++ uint32_t data_exc_code; /* 0x090 */ ++ uint16_t mon_class_num; /* 0x094 */ ++ uint16_t per_perc_atmid; /* 0x096 */ ++ uint64_t per_address; /* 0x098 */ ++ uint8_t exc_access_id; /* 0x0a0 */ ++ uint8_t per_access_id; /* 0x0a1 */ ++ uint8_t op_access_id; /* 0x0a2 */ ++ uint8_t ar_access_id; /* 0x0a3 */ ++ uint8_t pad2[0xA8 - 0xA4]; /* 0x0a4 */ ++ uint64_t trans_exc_code; /* 0x0a8 */ ++ uint64_t monitor_code; /* 0x0b0 */ ++ uint16_t subchannel_id; /* 0x0b8 */ ++ uint16_t subchannel_nr; /* 0x0ba */ ++ uint32_t io_int_parm; /* 0x0bc */ ++ uint32_t io_int_word; /* 0x0c0 */ ++ uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */ ++ uint32_t stfl_fac_list; /* 0x0c8 */ ++ uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */ ++ uint64_t mcic; /* 0x0e8 */ ++ uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */ ++ uint32_t external_damage_code; /* 0x0f4 */ ++ uint64_t failing_storage_address; /* 0x0f8 */ ++ uint8_t pad6[0x110 - 0x100]; /* 0x100 */ ++ uint64_t per_breaking_event_addr; /* 0x110 */ ++ uint8_t pad7[0x120 - 0x118]; /* 0x118 */ ++ PSW restart_old_psw; /* 0x120 */ ++ PSW external_old_psw; /* 0x130 */ ++ PSW svc_old_psw; /* 0x140 */ ++ PSW program_old_psw; /* 0x150 */ ++ PSW mcck_old_psw; /* 0x160 */ ++ PSW io_old_psw; /* 0x170 */ ++ uint8_t pad8[0x1a0 - 0x180]; /* 0x180 */ ++ PSW restart_new_psw; /* 0x1a0 */ ++ PSW external_new_psw; /* 0x1b0 */ ++ PSW svc_new_psw; /* 0x1c0 */ ++ PSW program_new_psw; /* 0x1d0 */ ++ PSW mcck_new_psw; /* 0x1e0 */ ++ PSW io_new_psw; /* 0x1f0 */ ++ uint8_t pad13[0x11b0 - 0x200]; /* 0x200 */ ++ ++ uint64_t mcesad; /* 0x11B0 */ ++ ++ /* 64 bit extparam used for pfault, diag 250 etc */ ++ uint64_t ext_params2; /* 0x11B8 */ ++ ++ uint8_t pad14[0x1200 - 0x11C0]; /* 0x11C0 */ ++ ++ /* System info area */ ++ ++ uint64_t floating_pt_save_area[16]; /* 0x1200 */ ++ uint64_t gpregs_save_area[16]; /* 0x1280 */ ++ uint32_t st_status_fixed_logout[4]; /* 0x1300 */ ++ uint8_t pad15[0x1318 - 0x1310]; /* 0x1310 */ ++ uint32_t prefixreg_save_area; /* 0x1318 */ ++ uint32_t fpt_creg_save_area; /* 0x131c */ ++ uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */ ++ uint32_t tod_progreg_save_area; /* 0x1324 */ ++ uint64_t cpu_timer_save_area; /* 0x1328 */ ++ uint64_t clock_comp_save_area; /* 0x1330 */ ++ uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */ ++ uint32_t access_regs_save_area[16]; /* 0x1340 */ ++ uint64_t cregs_save_area[16]; /* 0x1380 */ ++ ++ /* align to the top of the prefix area */ ++ ++ uint8_t pad18[0x2000 - 0x1400]; /* 0x1400 */ ++} QEMU_PACKED LowCore; ++QEMU_BUILD_BUG_ON(sizeof(LowCore) != 8192); ++#endif /* CONFIG_USER_ONLY */ ++ ++#define MAX_ILEN 6 ++ ++/* While the PoO talks about ILC (a number between 1-3) what is actually ++ stored in LowCore is shifted left one bit (an even between 2-6). As ++ this is the actual length of the insn and therefore more useful, that ++ is what we want to pass around and manipulate. To make sure that we ++ have applied this distinction universally, rename the "ILC" to "ILEN". */ ++static inline int get_ilen(uint8_t opc) ++{ ++ switch (opc >> 6) { ++ case 0: ++ return 2; ++ case 1: ++ case 2: ++ return 4; ++ default: ++ return 6; ++ } ++} ++ ++/* Compute the ATMID field that is stored in the per_perc_atmid lowcore ++ entry when a PER exception is triggered. */ ++static inline uint8_t get_per_atmid(CPUS390XState *env) ++{ ++ return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) | ++ (1 << 6) | ++ ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) | ++ ((env->psw.mask & PSW_MASK_DAT) ? (1 << 4) : 0) | ++ ((env->psw.mask & PSW_ASC_SECONDARY) ? (1 << 3) : 0) | ++ ((env->psw.mask & PSW_ASC_ACCREG) ? (1 << 2) : 0); ++} ++ ++static inline uint64_t wrap_address(CPUS390XState *env, uint64_t a) ++{ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ if (!(env->psw.mask & PSW_MASK_32)) { ++ /* 24-Bit mode */ ++ a &= 0x00ffffff; ++ } else { ++ /* 31-Bit mode */ ++ a &= 0x7fffffff; ++ } ++ } ++ return a; ++} ++ ++/* CC optimization */ ++ ++/* Instead of computing the condition codes after each x86 instruction, ++ * QEMU just stores the result (called CC_DST), the type of operation ++ * (called CC_OP) and whatever operands are needed (CC_SRC and possibly ++ * CC_VR). When the condition codes are needed, the condition codes can ++ * be calculated using this information. Condition codes are not generated ++ * if they are only needed for conditional branches. ++ */ ++enum cc_op { ++ CC_OP_CONST0 = 0, /* CC is 0 */ ++ CC_OP_CONST1, /* CC is 1 */ ++ CC_OP_CONST2, /* CC is 2 */ ++ CC_OP_CONST3, /* CC is 3 */ ++ ++ CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */ ++ CC_OP_STATIC, /* CC value is env->cc_op */ ++ ++ CC_OP_NZ, /* env->cc_dst != 0 */ ++ CC_OP_ADDU, /* dst != 0, src = carry out (0,1) */ ++ CC_OP_SUBU, /* dst != 0, src = borrow out (0,-1) */ ++ ++ CC_OP_LTGT_32, /* signed less/greater than (32bit) */ ++ CC_OP_LTGT_64, /* signed less/greater than (64bit) */ ++ CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */ ++ CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */ ++ CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */ ++ CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */ ++ ++ CC_OP_ADD_64, /* overflow on add (64bit) */ ++ CC_OP_SUB_64, /* overflow on subtraction (64bit) */ ++ CC_OP_ABS_64, /* sign eval on abs (64bit) */ ++ CC_OP_NABS_64, /* sign eval on nabs (64bit) */ ++ CC_OP_MULS_64, /* overflow on signed multiply (64bit) */ ++ ++ CC_OP_ADD_32, /* overflow on add (32bit) */ ++ CC_OP_SUB_32, /* overflow on subtraction (32bit) */ ++ CC_OP_ABS_32, /* sign eval on abs (64bit) */ ++ CC_OP_NABS_32, /* sign eval on nabs (64bit) */ ++ CC_OP_MULS_32, /* overflow on signed multiply (32bit) */ ++ ++ CC_OP_COMP_32, /* complement */ ++ CC_OP_COMP_64, /* complement */ ++ ++ CC_OP_TM_32, /* test under mask (32bit) */ ++ CC_OP_TM_64, /* test under mask (64bit) */ ++ ++ CC_OP_NZ_F32, /* FP dst != 0 (32bit) */ ++ CC_OP_NZ_F64, /* FP dst != 0 (64bit) */ ++ CC_OP_NZ_F128, /* FP dst != 0 (128bit) */ ++ ++ CC_OP_ICM, /* insert characters under mask */ ++ CC_OP_SLA_32, /* Calculate shift left signed (32bit) */ ++ CC_OP_SLA_64, /* Calculate shift left signed (64bit) */ ++ CC_OP_FLOGR, /* find leftmost one */ ++ CC_OP_LCBB, /* load count to block boundary */ ++ CC_OP_VC, /* vector compare result */ ++ CC_OP_MAX ++}; ++ ++#ifndef CONFIG_USER_ONLY ++ ++static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, ++ uint8_t *ar) ++{ ++ hwaddr addr = 0; ++ uint8_t reg; ++ ++ reg = ipb >> 28; ++ if (reg > 0) { ++ addr = env->regs[reg]; ++ } ++ addr += (ipb >> 16) & 0xfff; ++ if (ar) { ++ *ar = reg; ++ } ++ ++ return addr; ++} ++ ++/* Base/displacement are at the same locations. */ ++#define decode_basedisp_rs decode_basedisp_s ++ ++#endif /* CONFIG_USER_ONLY */ ++ ++/* arch_dump.c */ ++int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, ++ int cpuid, void *opaque); ++ ++ ++/* cc_helper.c */ ++const char *cc_name(enum cc_op cc_op); ++uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, ++ uint64_t vr); ++ ++/* cpu.c */ ++#ifndef CONFIG_USER_ONLY ++unsigned int s390_cpu_halt(S390CPU *cpu); ++void s390_cpu_unhalt(S390CPU *cpu); ++void s390_cpu_init_sysemu(Object *obj); ++bool s390_cpu_realize_sysemu(DeviceState *dev, Error **errp); ++void s390_cpu_finalize(Object *obj); ++void s390_cpu_class_init_sysemu(CPUClass *cc); ++void s390_cpu_machine_reset_cb(void *opaque); ++ ++#else ++static inline unsigned int s390_cpu_halt(S390CPU *cpu) ++{ ++ return 0; ++} ++ ++static inline void s390_cpu_unhalt(S390CPU *cpu) ++{ ++} ++#endif /* CONFIG_USER_ONLY */ ++ ++ ++/* cpu_models.c */ ++void s390_cpu_model_class_register_props(ObjectClass *oc); ++void s390_realize_cpu_model(CPUState *cs, Error **errp); ++S390CPUModel *get_max_cpu_model(Error **errp); ++void apply_cpu_model(const S390CPUModel *model, Error **errp); ++ObjectClass *s390_cpu_class_by_name(const char *name); ++ ++ ++/* excp_helper.c */ ++void s390x_cpu_debug_excp_handler(CPUState *cs); ++void s390_cpu_do_interrupt(CPUState *cpu); ++bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); ++bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool probe, uintptr_t retaddr); ++void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, ++ MMUAccessType access_type, ++ int mmu_idx, uintptr_t retaddr); ++ ++ ++/* fpu_helper.c */ ++uint32_t set_cc_nz_f32(float32 v); ++uint32_t set_cc_nz_f64(float64 v); ++uint32_t set_cc_nz_f128(float128 v); ++#define S390_IEEE_MASK_INVALID 0x80 ++#define S390_IEEE_MASK_DIVBYZERO 0x40 ++#define S390_IEEE_MASK_OVERFLOW 0x20 ++#define S390_IEEE_MASK_UNDERFLOW 0x10 ++#define S390_IEEE_MASK_INEXACT 0x08 ++#define S390_IEEE_MASK_QUANTUM 0x04 ++uint8_t s390_softfloat_exc_to_ieee(unsigned int exc); ++int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3); ++void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode); ++int float_comp_to_cc(CPUS390XState *env, int float_compare); ++ ++#define DCMASK_ZERO 0x0c00 ++#define DCMASK_NORMAL 0x0300 ++#define DCMASK_SUBNORMAL 0x00c0 ++#define DCMASK_INFINITY 0x0030 ++#define DCMASK_QUIET_NAN 0x000c ++#define DCMASK_SIGNALING_NAN 0x0003 ++#define DCMASK_NAN 0x000f ++#define DCMASK_NEGATIVE 0x0555 ++uint16_t float32_dcmask(CPUS390XState *env, float32 f1); ++uint16_t float64_dcmask(CPUS390XState *env, float64 f1); ++uint16_t float128_dcmask(CPUS390XState *env, float128 f1); ++ ++ ++/* gdbstub.c */ ++int s390_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); ++int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); ++void s390_cpu_gdb_init(CPUState *cs); ++ ++ ++/* helper.c */ ++void s390_cpu_dump_state(CPUState *cpu, FILE *f, int flags); ++void do_restart_interrupt(CPUS390XState *env); ++#ifndef CONFIG_USER_ONLY ++void s390_cpu_recompute_watchpoints(CPUState *cs); ++void s390x_tod_timer(void *opaque); ++void s390x_cpu_timer(void *opaque); ++void s390_handle_wait(S390CPU *cpu); ++hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); ++hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); ++#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) ++int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch); ++int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len); ++LowCore *cpu_map_lowcore(CPUS390XState *env); ++void cpu_unmap_lowcore(LowCore *lowcore); ++#endif /* CONFIG_USER_ONLY */ ++ ++ ++/* interrupt.c */ ++void trigger_pgm_exception(CPUS390XState *env, uint32_t code); ++void cpu_inject_clock_comparator(S390CPU *cpu); ++void cpu_inject_cpu_timer(S390CPU *cpu); ++void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr); ++int cpu_inject_external_call(S390CPU *cpu, uint16_t src_cpu_addr); ++bool s390_cpu_has_io_int(S390CPU *cpu); ++bool s390_cpu_has_ext_int(S390CPU *cpu); ++bool s390_cpu_has_mcck_int(S390CPU *cpu); ++bool s390_cpu_has_int(S390CPU *cpu); ++bool s390_cpu_has_restart_int(S390CPU *cpu); ++bool s390_cpu_has_stop_int(S390CPU *cpu); ++void cpu_inject_restart(S390CPU *cpu); ++void cpu_inject_stop(S390CPU *cpu); ++ ++ ++/* ioinst.c */ ++void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, ++ uintptr_t ra); ++void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, ++ uintptr_t ra); ++void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra); ++void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, ++ uintptr_t ra); ++int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra); ++void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra); ++void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2, ++ uint32_t ipb, uintptr_t ra); ++void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra); ++ ++ ++/* mem_helper.c */ ++target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr); ++void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, ++ uintptr_t ra); ++ ++ ++/* mmu_helper.c */ ++int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, ++ target_ulong *raddr, int *flags, uint64_t *tec); ++int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw, ++ target_ulong *addr, int *flags, uint64_t *tec); ++ ++ ++/* misc_helper.c */ ++int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3); ++void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, ++ uintptr_t ra); ++ ++ ++/* translate.c */ ++void s390x_translate_init(void); ++ ++ ++/* sigp.c */ ++int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3); ++void do_stop_interrupt(CPUS390XState *env); ++ ++#endif /* S390X_INTERNAL_H */ +diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c +index c2d5cdf061..d57427ced8 100644 +--- a/target/s390x/sigp.c ++++ b/target/s390x/sigp.c +@@ -10,7 +10,7 @@ + + #include "qemu/osdep.h" + #include "cpu.h" +-#include "internal.h" ++#include "s390x-internal.h" + #include "sysemu/hw_accel.h" + #include "sysemu/runstate.h" + #include "exec/address-spaces.h" +diff --git a/target/s390x/tcg-stub.c b/target/s390x/tcg-stub.c +deleted file mode 100644 +index d22c898802..0000000000 +--- a/target/s390x/tcg-stub.c ++++ /dev/null +@@ -1,30 +0,0 @@ +-/* +- * QEMU TCG support -- s390x specific function stubs. +- * +- * Copyright (C) 2018 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "cpu.h" +-#include "tcg_s390x.h" +- +-void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) +-{ +-} +-void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, +- uint32_t code, uintptr_t ra) +-{ +- g_assert_not_reached(); +-} +-void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, +- uintptr_t ra) +-{ +- g_assert_not_reached(); +-} +diff --git a/target/s390x/tcg/cc_helper.c b/target/s390x/tcg/cc_helper.c +new file mode 100644 +index 0000000000..c2c96c3a3c +--- /dev/null ++++ b/target/s390x/tcg/cc_helper.c +@@ -0,0 +1,538 @@ ++/* ++ * S/390 condition code helper routines ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2009 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "tcg_s390x.h" ++#include "exec/exec-all.h" ++#include "exec/helper-proto.h" ++#include "qemu/host-utils.h" ++ ++/* #define DEBUG_HELPER */ ++#ifdef DEBUG_HELPER ++#define HELPER_LOG(x...) qemu_log(x) ++#else ++#define HELPER_LOG(x...) ++#endif ++ ++static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst) ++{ ++ if (src == dst) { ++ return 0; ++ } else if (src < dst) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++static uint32_t cc_calc_ltgt0_32(int32_t dst) ++{ ++ return cc_calc_ltgt_32(dst, 0); ++} ++ ++static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst) ++{ ++ if (src == dst) { ++ return 0; ++ } else if (src < dst) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++static uint32_t cc_calc_ltgt0_64(int64_t dst) ++{ ++ return cc_calc_ltgt_64(dst, 0); ++} ++ ++static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst) ++{ ++ if (src == dst) { ++ return 0; ++ } else if (src < dst) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst) ++{ ++ if (src == dst) { ++ return 0; ++ } else if (src < dst) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask) ++{ ++ uint32_t r = val & mask; ++ ++ if (r == 0) { ++ return 0; ++ } else if (r == mask) { ++ return 3; ++ } else { ++ return 1; ++ } ++} ++ ++static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask) ++{ ++ uint64_t r = val & mask; ++ ++ if (r == 0) { ++ return 0; ++ } else if (r == mask) { ++ return 3; ++ } else { ++ int top = clz64(mask); ++ if ((int64_t)(val << top) < 0) { ++ return 2; ++ } else { ++ return 1; ++ } ++ } ++} ++ ++static uint32_t cc_calc_nz(uint64_t dst) ++{ ++ return !!dst; ++} ++ ++static uint32_t cc_calc_addu(uint64_t carry_out, uint64_t result) ++{ ++ g_assert(carry_out <= 1); ++ return (result != 0) + 2 * carry_out; ++} ++ ++static uint32_t cc_calc_subu(uint64_t borrow_out, uint64_t result) ++{ ++ return cc_calc_addu(borrow_out + 1, result); ++} ++ ++static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar) ++{ ++ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { ++ return 3; /* overflow */ ++ } else { ++ if (ar < 0) { ++ return 1; ++ } else if (ar > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++ } ++} ++ ++static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar) ++{ ++ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { ++ return 3; /* overflow */ ++ } else { ++ if (ar < 0) { ++ return 1; ++ } else if (ar > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++ } ++} ++ ++static uint32_t cc_calc_abs_64(int64_t dst) ++{ ++ if ((uint64_t)dst == 0x8000000000000000ULL) { ++ return 3; ++ } else if (dst) { ++ return 2; ++ } else { ++ return 0; ++ } ++} ++ ++static uint32_t cc_calc_nabs_64(int64_t dst) ++{ ++ return !!dst; ++} ++ ++static uint32_t cc_calc_comp_64(int64_t dst) ++{ ++ if ((uint64_t)dst == 0x8000000000000000ULL) { ++ return 3; ++ } else if (dst < 0) { ++ return 1; ++ } else if (dst > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++} ++ ++ ++static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar) ++{ ++ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) { ++ return 3; /* overflow */ ++ } else { ++ if (ar < 0) { ++ return 1; ++ } else if (ar > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++ } ++} ++ ++static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar) ++{ ++ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) { ++ return 3; /* overflow */ ++ } else { ++ if (ar < 0) { ++ return 1; ++ } else if (ar > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++ } ++} ++ ++static uint32_t cc_calc_abs_32(int32_t dst) ++{ ++ if ((uint32_t)dst == 0x80000000UL) { ++ return 3; ++ } else if (dst) { ++ return 2; ++ } else { ++ return 0; ++ } ++} ++ ++static uint32_t cc_calc_nabs_32(int32_t dst) ++{ ++ return !!dst; ++} ++ ++static uint32_t cc_calc_comp_32(int32_t dst) ++{ ++ if ((uint32_t)dst == 0x80000000UL) { ++ return 3; ++ } else if (dst < 0) { ++ return 1; ++ } else if (dst > 0) { ++ return 2; ++ } else { ++ return 0; ++ } ++} ++ ++/* calculate condition code for insert character under mask insn */ ++static uint32_t cc_calc_icm(uint64_t mask, uint64_t val) ++{ ++ if ((val & mask) == 0) { ++ return 0; ++ } else { ++ int top = clz64(mask); ++ if ((int64_t)(val << top) < 0) { ++ return 1; ++ } else { ++ return 2; ++ } ++ } ++} ++ ++static uint32_t cc_calc_sla_32(uint32_t src, int shift) ++{ ++ uint32_t mask = ((1U << shift) - 1U) << (32 - shift); ++ uint32_t sign = 1U << 31; ++ uint32_t match; ++ int32_t r; ++ ++ /* Check if the sign bit stays the same. */ ++ if (src & sign) { ++ match = mask; ++ } else { ++ match = 0; ++ } ++ if ((src & mask) != match) { ++ /* Overflow. */ ++ return 3; ++ } ++ ++ r = ((src << shift) & ~sign) | (src & sign); ++ if (r == 0) { ++ return 0; ++ } else if (r < 0) { ++ return 1; ++ } ++ return 2; ++} ++ ++static uint32_t cc_calc_sla_64(uint64_t src, int shift) ++{ ++ uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift); ++ uint64_t sign = 1ULL << 63; ++ uint64_t match; ++ int64_t r; ++ ++ /* Check if the sign bit stays the same. */ ++ if (src & sign) { ++ match = mask; ++ } else { ++ match = 0; ++ } ++ if ((src & mask) != match) { ++ /* Overflow. */ ++ return 3; ++ } ++ ++ r = ((src << shift) & ~sign) | (src & sign); ++ if (r == 0) { ++ return 0; ++ } else if (r < 0) { ++ return 1; ++ } ++ return 2; ++} ++ ++static uint32_t cc_calc_flogr(uint64_t dst) ++{ ++ return dst ? 2 : 0; ++} ++ ++static uint32_t cc_calc_lcbb(uint64_t dst) ++{ ++ return dst == 16 ? 0 : 3; ++} ++ ++static uint32_t cc_calc_vc(uint64_t low, uint64_t high) ++{ ++ if (high == -1ull && low == -1ull) { ++ /* all elements match */ ++ return 0; ++ } else if (high == 0 && low == 0) { ++ /* no elements match */ ++ return 3; ++ } else { ++ /* some elements but not all match */ ++ return 1; ++ } ++} ++ ++static uint32_t cc_calc_muls_32(int64_t res) ++{ ++ const int64_t tmp = res >> 31; ++ ++ if (!res) { ++ return 0; ++ } else if (tmp && tmp != -1) { ++ return 3; ++ } else if (res < 0) { ++ return 1; ++ } ++ return 2; ++} ++ ++static uint64_t cc_calc_muls_64(int64_t res_high, uint64_t res_low) ++{ ++ if (!res_high && !res_low) { ++ return 0; ++ } else if (res_high + (res_low >> 63) != 0) { ++ return 3; ++ } else if (res_high < 0) { ++ return 1; ++ } ++ return 2; ++} ++ ++static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, ++ uint64_t src, uint64_t dst, uint64_t vr) ++{ ++ uint32_t r = 0; ++ ++ switch (cc_op) { ++ case CC_OP_CONST0: ++ case CC_OP_CONST1: ++ case CC_OP_CONST2: ++ case CC_OP_CONST3: ++ /* cc_op value _is_ cc */ ++ r = cc_op; ++ break; ++ case CC_OP_LTGT0_32: ++ r = cc_calc_ltgt0_32(dst); ++ break; ++ case CC_OP_LTGT0_64: ++ r = cc_calc_ltgt0_64(dst); ++ break; ++ case CC_OP_LTGT_32: ++ r = cc_calc_ltgt_32(src, dst); ++ break; ++ case CC_OP_LTGT_64: ++ r = cc_calc_ltgt_64(src, dst); ++ break; ++ case CC_OP_LTUGTU_32: ++ r = cc_calc_ltugtu_32(src, dst); ++ break; ++ case CC_OP_LTUGTU_64: ++ r = cc_calc_ltugtu_64(src, dst); ++ break; ++ case CC_OP_TM_32: ++ r = cc_calc_tm_32(src, dst); ++ break; ++ case CC_OP_TM_64: ++ r = cc_calc_tm_64(src, dst); ++ break; ++ case CC_OP_NZ: ++ r = cc_calc_nz(dst); ++ break; ++ case CC_OP_ADDU: ++ r = cc_calc_addu(src, dst); ++ break; ++ case CC_OP_SUBU: ++ r = cc_calc_subu(src, dst); ++ break; ++ case CC_OP_ADD_64: ++ r = cc_calc_add_64(src, dst, vr); ++ break; ++ case CC_OP_SUB_64: ++ r = cc_calc_sub_64(src, dst, vr); ++ break; ++ case CC_OP_ABS_64: ++ r = cc_calc_abs_64(dst); ++ break; ++ case CC_OP_NABS_64: ++ r = cc_calc_nabs_64(dst); ++ break; ++ case CC_OP_COMP_64: ++ r = cc_calc_comp_64(dst); ++ break; ++ case CC_OP_MULS_64: ++ r = cc_calc_muls_64(src, dst); ++ break; ++ ++ case CC_OP_ADD_32: ++ r = cc_calc_add_32(src, dst, vr); ++ break; ++ case CC_OP_SUB_32: ++ r = cc_calc_sub_32(src, dst, vr); ++ break; ++ case CC_OP_ABS_32: ++ r = cc_calc_abs_32(dst); ++ break; ++ case CC_OP_NABS_32: ++ r = cc_calc_nabs_32(dst); ++ break; ++ case CC_OP_COMP_32: ++ r = cc_calc_comp_32(dst); ++ break; ++ case CC_OP_MULS_32: ++ r = cc_calc_muls_32(dst); ++ break; ++ ++ case CC_OP_ICM: ++ r = cc_calc_icm(src, dst); ++ break; ++ case CC_OP_SLA_32: ++ r = cc_calc_sla_32(src, dst); ++ break; ++ case CC_OP_SLA_64: ++ r = cc_calc_sla_64(src, dst); ++ break; ++ case CC_OP_FLOGR: ++ r = cc_calc_flogr(dst); ++ break; ++ case CC_OP_LCBB: ++ r = cc_calc_lcbb(dst); ++ break; ++ case CC_OP_VC: ++ r = cc_calc_vc(src, dst); ++ break; ++ ++ case CC_OP_NZ_F32: ++ r = set_cc_nz_f32(dst); ++ break; ++ case CC_OP_NZ_F64: ++ r = set_cc_nz_f64(dst); ++ break; ++ case CC_OP_NZ_F128: ++ r = set_cc_nz_f128(make_float128(src, dst)); ++ break; ++ ++ default: ++ cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op)); ++ } ++ ++ HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, ++ cc_name(cc_op), src, dst, vr, r); ++ return r; ++} ++ ++uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst, ++ uint64_t vr) ++{ ++ return do_calc_cc(env, cc_op, src, dst, vr); ++} ++ ++uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src, ++ uint64_t dst, uint64_t vr) ++{ ++ return do_calc_cc(env, cc_op, src, dst, vr); ++} ++ ++#ifndef CONFIG_USER_ONLY ++void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) ++{ ++ s390_cpu_set_psw(env, mask, addr); ++ cpu_loop_exit(env_cpu(env)); ++} ++ ++void HELPER(sacf)(CPUS390XState *env, uint64_t a1) ++{ ++ HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1); ++ ++ switch (a1 & 0xf00) { ++ case 0x000: ++ env->psw.mask &= ~PSW_MASK_ASC; ++ env->psw.mask |= PSW_ASC_PRIMARY; ++ break; ++ case 0x100: ++ env->psw.mask &= ~PSW_MASK_ASC; ++ env->psw.mask |= PSW_ASC_SECONDARY; ++ break; ++ case 0x300: ++ env->psw.mask &= ~PSW_MASK_ASC; ++ env->psw.mask |= PSW_ASC_HOME; ++ break; ++ default: ++ HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1); ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++} ++#endif +diff --git a/target/s390x/tcg/crypto_helper.c b/target/s390x/tcg/crypto_helper.c +new file mode 100644 +index 0000000000..138d9e7ad9 +--- /dev/null ++++ b/target/s390x/tcg/crypto_helper.c +@@ -0,0 +1,61 @@ ++/* ++ * s390x crypto helpers ++ * ++ * Copyright (c) 2017 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/main-loop.h" ++#include "s390x-internal.h" ++#include "tcg_s390x.h" ++#include "exec/helper-proto.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++ ++uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3, ++ uint32_t type) ++{ ++ const uintptr_t ra = GETPC(); ++ const uint8_t mod = env->regs[0] & 0x80ULL; ++ const uint8_t fc = env->regs[0] & 0x7fULL; ++ uint8_t subfunc[16] = { 0 }; ++ uint64_t param_addr; ++ int i; ++ ++ switch (type) { ++ case S390_FEAT_TYPE_KMAC: ++ case S390_FEAT_TYPE_KIMD: ++ case S390_FEAT_TYPE_KLMD: ++ case S390_FEAT_TYPE_PCKMO: ++ case S390_FEAT_TYPE_PCC: ++ if (mod) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ break; ++ } ++ ++ s390_get_feat_block(type, subfunc); ++ if (!test_be_bit(fc, subfunc)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ switch (fc) { ++ case 0: /* query subfunction */ ++ for (i = 0; i < 16; i++) { ++ param_addr = wrap_address(env, env->regs[1] + i); ++ cpu_stb_data_ra(env, param_addr, subfunc[i], ra); ++ } ++ break; ++ default: ++ /* we don't implement any other subfunction yet */ ++ g_assert_not_reached(); ++ } ++ ++ return 0; ++} +diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c +new file mode 100644 +index 0000000000..a61917d04f +--- /dev/null ++++ b/target/s390x/tcg/excp_helper.c +@@ -0,0 +1,641 @@ ++/* ++ * s390x exception / interrupt helpers ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2011 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "exec/helper-proto.h" ++#include "qemu/timer.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++#include "hw/s390x/ioinst.h" ++#include "exec/address-spaces.h" ++#include "tcg_s390x.h" ++#ifndef CONFIG_USER_ONLY ++#include "hw/s390x/s390_flic.h" ++#include "hw/boards.h" ++#endif ++ ++void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, ++ uint32_t code, uintptr_t ra) ++{ ++ CPUState *cs = env_cpu(env); ++ ++ cpu_restore_state(cs, ra, true); ++ qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", ++ env->psw.addr); ++ trigger_pgm_exception(env, code); ++ cpu_loop_exit(cs); ++} ++ ++void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, ++ uintptr_t ra) ++{ ++ g_assert(dxc <= 0xff); ++#if !defined(CONFIG_USER_ONLY) ++ /* Store the DXC into the lowcore */ ++ stl_phys(env_cpu(env)->as, ++ env->psa + offsetof(LowCore, data_exc_code), dxc); ++#endif ++ ++ /* Store the DXC into the FPC if AFP is enabled */ ++ if (env->cregs[0] & CR0_AFP) { ++ env->fpc = deposit32(env->fpc, 8, 8, dxc); ++ } ++ tcg_s390_program_interrupt(env, PGM_DATA, ra); ++} ++ ++void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, ++ uintptr_t ra) ++{ ++ g_assert(vxc <= 0xff); ++#if !defined(CONFIG_USER_ONLY) ++ /* Always store the VXC into the lowcore, without AFP it is undefined */ ++ stl_phys(env_cpu(env)->as, ++ env->psa + offsetof(LowCore, data_exc_code), vxc); ++#endif ++ ++ /* Always store the VXC into the FPC, without AFP it is undefined */ ++ env->fpc = deposit32(env->fpc, 8, 8, vxc); ++ tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra); ++} ++ ++void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) ++{ ++ tcg_s390_data_exception(env, dxc, GETPC()); ++} ++ ++#if defined(CONFIG_USER_ONLY) ++ ++void s390_cpu_do_interrupt(CPUState *cs) ++{ ++ cs->exception_index = -1; ++} ++ ++bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool probe, uintptr_t retaddr) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ ++ trigger_pgm_exception(&cpu->env, PGM_ADDRESSING); ++ /* On real machines this value is dropped into LowMem. Since this ++ is userland, simply put this someplace that cpu_loop can find it. */ ++ cpu->env.__excp_addr = address; ++ cpu_loop_exit_restore(cs, retaddr); ++} ++ ++#else /* !CONFIG_USER_ONLY */ ++ ++static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) ++{ ++ switch (mmu_idx) { ++ case MMU_PRIMARY_IDX: ++ return PSW_ASC_PRIMARY; ++ case MMU_SECONDARY_IDX: ++ return PSW_ASC_SECONDARY; ++ case MMU_HOME_IDX: ++ return PSW_ASC_HOME; ++ default: ++ abort(); ++ } ++} ++ ++bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool probe, uintptr_t retaddr) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ target_ulong vaddr, raddr; ++ uint64_t asc, tec; ++ int prot, excp; ++ ++ qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", ++ __func__, address, access_type, mmu_idx); ++ ++ vaddr = address; ++ ++ if (mmu_idx < MMU_REAL_IDX) { ++ asc = cpu_mmu_idx_to_asc(mmu_idx); ++ /* 31-Bit mode */ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ vaddr &= 0x7fffffff; ++ } ++ excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec); ++ } else if (mmu_idx == MMU_REAL_IDX) { ++ /* 31-Bit mode */ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ vaddr &= 0x7fffffff; ++ } ++ excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec); ++ } else { ++ g_assert_not_reached(); ++ } ++ ++ /* check out of RAM access */ ++ if (!excp && ++ !address_space_access_valid(&address_space_memory, raddr, ++ TARGET_PAGE_SIZE, access_type, ++ MEMTXATTRS_UNSPECIFIED)) { ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ qemu_log_mask(CPU_LOG_MMU, ++ "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", ++ __func__, (uint64_t)raddr, (uint64_t)ms->ram_size); ++ excp = PGM_ADDRESSING; ++ tec = 0; /* unused */ ++ } ++ ++ env->tlb_fill_exc = excp; ++ env->tlb_fill_tec = tec; ++ ++ if (!excp) { ++ qemu_log_mask(CPU_LOG_MMU, ++ "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", ++ __func__, (uint64_t)vaddr, (uint64_t)raddr, prot); ++ tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot, ++ mmu_idx, TARGET_PAGE_SIZE); ++ return true; ++ } ++ if (probe) { ++ return false; ++ } ++ ++ if (excp != PGM_ADDRESSING) { ++ stq_phys(env_cpu(env)->as, ++ env->psa + offsetof(LowCore, trans_exc_code), tec); ++ } ++ ++ /* ++ * For data accesses, ILEN will be filled in from the unwind info, ++ * within cpu_loop_exit_restore. For code accesses, retaddr == 0, ++ * and so unwinding will not occur. However, ILEN is also undefined ++ * for that case -- we choose to set ILEN = 2. ++ */ ++ env->int_pgm_ilen = 2; ++ trigger_pgm_exception(env, excp); ++ cpu_loop_exit_restore(cs, retaddr); ++} ++ ++static void do_program_interrupt(CPUS390XState *env) ++{ ++ uint64_t mask, addr; ++ LowCore *lowcore; ++ int ilen = env->int_pgm_ilen; ++ ++ assert(ilen == 2 || ilen == 4 || ilen == 6); ++ ++ switch (env->int_pgm_code) { ++ case PGM_PER: ++ if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) { ++ break; ++ } ++ /* FALL THROUGH */ ++ case PGM_OPERATION: ++ case PGM_PRIVILEGED: ++ case PGM_EXECUTE: ++ case PGM_PROTECTION: ++ case PGM_ADDRESSING: ++ case PGM_SPECIFICATION: ++ case PGM_DATA: ++ case PGM_FIXPT_OVERFLOW: ++ case PGM_FIXPT_DIVIDE: ++ case PGM_DEC_OVERFLOW: ++ case PGM_DEC_DIVIDE: ++ case PGM_HFP_EXP_OVERFLOW: ++ case PGM_HFP_EXP_UNDERFLOW: ++ case PGM_HFP_SIGNIFICANCE: ++ case PGM_HFP_DIVIDE: ++ case PGM_TRANS_SPEC: ++ case PGM_SPECIAL_OP: ++ case PGM_OPERAND: ++ case PGM_HFP_SQRT: ++ case PGM_PC_TRANS_SPEC: ++ case PGM_ALET_SPEC: ++ case PGM_MONITOR: ++ /* advance the PSW if our exception is not nullifying */ ++ env->psw.addr += ilen; ++ break; ++ } ++ ++ qemu_log_mask(CPU_LOG_INT, ++ "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n", ++ __func__, env->int_pgm_code, ilen, env->psw.mask, ++ env->psw.addr); ++ ++ lowcore = cpu_map_lowcore(env); ++ ++ /* Signal PER events with the exception. */ ++ if (env->per_perc_atmid) { ++ env->int_pgm_code |= PGM_PER; ++ lowcore->per_address = cpu_to_be64(env->per_address); ++ lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid); ++ env->per_perc_atmid = 0; ++ } ++ ++ lowcore->pgm_ilen = cpu_to_be16(ilen); ++ lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); ++ lowcore->program_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); ++ lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); ++ mask = be64_to_cpu(lowcore->program_new_psw.mask); ++ addr = be64_to_cpu(lowcore->program_new_psw.addr); ++ lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); ++ ++ cpu_unmap_lowcore(lowcore); ++ ++ s390_cpu_set_psw(env, mask, addr); ++} ++ ++static void do_svc_interrupt(CPUS390XState *env) ++{ ++ uint64_t mask, addr; ++ LowCore *lowcore; ++ ++ lowcore = cpu_map_lowcore(env); ++ ++ lowcore->svc_code = cpu_to_be16(env->int_svc_code); ++ lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen); ++ lowcore->svc_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); ++ lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen); ++ mask = be64_to_cpu(lowcore->svc_new_psw.mask); ++ addr = be64_to_cpu(lowcore->svc_new_psw.addr); ++ ++ cpu_unmap_lowcore(lowcore); ++ ++ s390_cpu_set_psw(env, mask, addr); ++ ++ /* When a PER event is pending, the PER exception has to happen ++ immediately after the SERVICE CALL one. */ ++ if (env->per_perc_atmid) { ++ env->int_pgm_code = PGM_PER; ++ env->int_pgm_ilen = env->int_svc_ilen; ++ do_program_interrupt(env); ++ } ++} ++ ++#define VIRTIO_SUBCODE_64 0x0D00 ++ ++static void do_ext_interrupt(CPUS390XState *env) ++{ ++ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); ++ S390CPU *cpu = env_archcpu(env); ++ uint64_t mask, addr; ++ uint16_t cpu_addr; ++ LowCore *lowcore; ++ ++ if (!(env->psw.mask & PSW_MASK_EXT)) { ++ cpu_abort(CPU(cpu), "Ext int w/o ext mask\n"); ++ } ++ ++ lowcore = cpu_map_lowcore(env); ++ ++ if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) && ++ (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) { ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ unsigned int max_cpus = ms->smp.max_cpus; ++ ++ lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY); ++ cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS); ++ g_assert(cpu_addr < S390_MAX_CPUS); ++ lowcore->cpu_addr = cpu_to_be16(cpu_addr); ++ clear_bit(cpu_addr, env->emergency_signals); ++ if (bitmap_empty(env->emergency_signals, max_cpus)) { ++ env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL; ++ } ++ } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) && ++ (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) { ++ lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL); ++ lowcore->cpu_addr = cpu_to_be16(env->external_call_addr); ++ env->pending_int &= ~INTERRUPT_EXTERNAL_CALL; ++ } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) && ++ (env->cregs[0] & CR0_CKC_SC)) { ++ lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP); ++ lowcore->cpu_addr = 0; ++ env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; ++ } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) && ++ (env->cregs[0] & CR0_CPU_TIMER_SC)) { ++ lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER); ++ lowcore->cpu_addr = 0; ++ env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER; ++ } else if (qemu_s390_flic_has_service(flic) && ++ (env->cregs[0] & CR0_SERVICE_SC)) { ++ uint32_t param; ++ ++ param = qemu_s390_flic_dequeue_service(flic); ++ lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE); ++ lowcore->ext_params = cpu_to_be32(param); ++ lowcore->cpu_addr = 0; ++ } else { ++ g_assert_not_reached(); ++ } ++ ++ mask = be64_to_cpu(lowcore->external_new_psw.mask); ++ addr = be64_to_cpu(lowcore->external_new_psw.addr); ++ lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); ++ lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); ++ ++ cpu_unmap_lowcore(lowcore); ++ ++ s390_cpu_set_psw(env, mask, addr); ++} ++ ++static void do_io_interrupt(CPUS390XState *env) ++{ ++ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); ++ uint64_t mask, addr; ++ QEMUS390FlicIO *io; ++ LowCore *lowcore; ++ ++ g_assert(env->psw.mask & PSW_MASK_IO); ++ io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); ++ g_assert(io); ++ ++ lowcore = cpu_map_lowcore(env); ++ ++ lowcore->subchannel_id = cpu_to_be16(io->id); ++ lowcore->subchannel_nr = cpu_to_be16(io->nr); ++ lowcore->io_int_parm = cpu_to_be32(io->parm); ++ lowcore->io_int_word = cpu_to_be32(io->word); ++ lowcore->io_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); ++ lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); ++ mask = be64_to_cpu(lowcore->io_new_psw.mask); ++ addr = be64_to_cpu(lowcore->io_new_psw.addr); ++ ++ cpu_unmap_lowcore(lowcore); ++ g_free(io); ++ ++ s390_cpu_set_psw(env, mask, addr); ++} ++ ++typedef struct MchkExtSaveArea { ++ uint64_t vregs[32][2]; /* 0x0000 */ ++ uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ ++} MchkExtSaveArea; ++QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); ++ ++static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) ++{ ++ hwaddr len = sizeof(MchkExtSaveArea); ++ MchkExtSaveArea *sa; ++ int i; ++ ++ sa = cpu_physical_memory_map(mcesao, &len, true); ++ if (!sa) { ++ return -EFAULT; ++ } ++ if (len != sizeof(MchkExtSaveArea)) { ++ cpu_physical_memory_unmap(sa, len, 1, 0); ++ return -EFAULT; ++ } ++ ++ for (i = 0; i < 32; i++) { ++ sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]); ++ sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); ++ } ++ ++ cpu_physical_memory_unmap(sa, len, 1, len); ++ return 0; ++} ++ ++static void do_mchk_interrupt(CPUS390XState *env) ++{ ++ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); ++ uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP; ++ uint64_t mask, addr, mcesao = 0; ++ LowCore *lowcore; ++ int i; ++ ++ /* for now we only support channel report machine checks (floating) */ ++ g_assert(env->psw.mask & PSW_MASK_MCHECK); ++ g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC); ++ ++ qemu_s390_flic_dequeue_crw_mchk(flic); ++ ++ lowcore = cpu_map_lowcore(env); ++ ++ /* extended save area */ ++ if (mcic & MCIC_VB_VR) { ++ /* length and alignment is 1024 bytes */ ++ mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull; ++ } ++ ++ /* try to store vector registers */ ++ if (!mcesao || mchk_store_vregs(env, mcesao)) { ++ mcic &= ~MCIC_VB_VR; ++ } ++ ++ /* we are always in z/Architecture mode */ ++ lowcore->ar_access_id = 1; ++ ++ for (i = 0; i < 16; i++) { ++ lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i)); ++ lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); ++ lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); ++ lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); ++ } ++ lowcore->prefixreg_save_area = cpu_to_be32(env->psa); ++ lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); ++ lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); ++ lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm); ++ lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8); ++ ++ lowcore->mcic = cpu_to_be64(mcic); ++ lowcore->mcck_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); ++ lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); ++ mask = be64_to_cpu(lowcore->mcck_new_psw.mask); ++ addr = be64_to_cpu(lowcore->mcck_new_psw.addr); ++ ++ cpu_unmap_lowcore(lowcore); ++ ++ s390_cpu_set_psw(env, mask, addr); ++} ++ ++void s390_cpu_do_interrupt(CPUState *cs) ++{ ++ QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ bool stopped = false; ++ ++ qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n", ++ __func__, cs->exception_index, env->psw.mask, env->psw.addr); ++ ++try_deliver: ++ /* handle machine checks */ ++ if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) { ++ cs->exception_index = EXCP_MCHK; ++ } ++ /* handle external interrupts */ ++ if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) { ++ cs->exception_index = EXCP_EXT; ++ } ++ /* handle I/O interrupts */ ++ if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) { ++ cs->exception_index = EXCP_IO; ++ } ++ /* RESTART interrupt */ ++ if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) { ++ cs->exception_index = EXCP_RESTART; ++ } ++ /* STOP interrupt has least priority */ ++ if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) { ++ cs->exception_index = EXCP_STOP; ++ } ++ ++ switch (cs->exception_index) { ++ case EXCP_PGM: ++ do_program_interrupt(env); ++ break; ++ case EXCP_SVC: ++ do_svc_interrupt(env); ++ break; ++ case EXCP_EXT: ++ do_ext_interrupt(env); ++ break; ++ case EXCP_IO: ++ do_io_interrupt(env); ++ break; ++ case EXCP_MCHK: ++ do_mchk_interrupt(env); ++ break; ++ case EXCP_RESTART: ++ do_restart_interrupt(env); ++ break; ++ case EXCP_STOP: ++ do_stop_interrupt(env); ++ stopped = true; ++ break; ++ } ++ ++ if (cs->exception_index != -1 && !stopped) { ++ /* check if there are more pending interrupts to deliver */ ++ cs->exception_index = -1; ++ goto try_deliver; ++ } ++ cs->exception_index = -1; ++ ++ /* we might still have pending interrupts, but not deliverable */ ++ if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { ++ cs->interrupt_request &= ~CPU_INTERRUPT_HARD; ++ } ++ ++ /* WAIT PSW during interrupt injection or STOP interrupt */ ++ if ((env->psw.mask & PSW_MASK_WAIT) || stopped) { ++ /* don't trigger a cpu_loop_exit(), use an interrupt instead */ ++ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); ++ } else if (cs->halted) { ++ /* unhalt if we had a WAIT PSW somehwere in our injection chain */ ++ s390_cpu_unhalt(cpu); ++ } ++} ++ ++bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request) ++{ ++ if (interrupt_request & CPU_INTERRUPT_HARD) { ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ ++ if (env->ex_value) { ++ /* Execution of the target insn is indivisible from ++ the parent EXECUTE insn. */ ++ return false; ++ } ++ if (s390_cpu_has_int(cpu)) { ++ s390_cpu_do_interrupt(cs); ++ return true; ++ } ++ if (env->psw.mask & PSW_MASK_WAIT) { ++ /* Woken up because of a floating interrupt but it has already ++ * been delivered. Go back to sleep. */ ++ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); ++ } ++ } ++ return false; ++} ++ ++void s390x_cpu_debug_excp_handler(CPUState *cs) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ CPUWatchpoint *wp_hit = cs->watchpoint_hit; ++ ++ if (wp_hit && wp_hit->flags & BP_CPU) { ++ /* FIXME: When the storage-alteration-space control bit is set, ++ the exception should only be triggered if the memory access ++ is done using an address space with the storage-alteration-event ++ bit set. We have no way to detect that with the current ++ watchpoint code. */ ++ cs->watchpoint_hit = NULL; ++ ++ env->per_address = env->psw.addr; ++ env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env); ++ /* FIXME: We currently no way to detect the address space used ++ to trigger the watchpoint. For now just consider it is the ++ current default ASC. This turn to be true except when MVCP ++ and MVCS instrutions are not used. */ ++ env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46; ++ ++ /* ++ * Remove all watchpoints to re-execute the code. A PER exception ++ * will be triggered, it will call s390_cpu_set_psw which will ++ * recompute the watchpoints. ++ */ ++ cpu_watchpoint_remove_all(cs, BP_CPU); ++ cpu_loop_exit_noexc(cs); ++ } ++} ++ ++/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, ++ this is only for the atomic operations, for which we want to raise a ++ specification exception. */ ++void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, ++ MMUAccessType access_type, ++ int mmu_idx, uintptr_t retaddr) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ CPUS390XState *env = &cpu->env; ++ ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); ++} ++ ++static void QEMU_NORETURN monitor_event(CPUS390XState *env, ++ uint64_t monitor_code, ++ uint8_t monitor_class, uintptr_t ra) ++{ ++ /* Store the Monitor Code and the Monitor Class Number into the lowcore */ ++ stq_phys(env_cpu(env)->as, ++ env->psa + offsetof(LowCore, monitor_code), monitor_code); ++ stw_phys(env_cpu(env)->as, ++ env->psa + offsetof(LowCore, mon_class_num), monitor_class); ++ ++ tcg_s390_program_interrupt(env, PGM_MONITOR, ra); ++} ++ ++void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code, ++ uint32_t monitor_class) ++{ ++ g_assert(monitor_class <= 0xff); ++ ++ if (env->cregs[8] & (0x8000 >> monitor_class)) { ++ monitor_event(env, monitor_code, monitor_class, GETPC()); ++ } ++} ++ ++#endif /* !CONFIG_USER_ONLY */ +diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c +new file mode 100644 +index 0000000000..4067205405 +--- /dev/null ++++ b/target/s390x/tcg/fpu_helper.c +@@ -0,0 +1,976 @@ ++/* ++ * S/390 FPU helper routines ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2009 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "tcg_s390x.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++#include "exec/helper-proto.h" ++#include "fpu/softfloat.h" ++ ++/* #define DEBUG_HELPER */ ++#ifdef DEBUG_HELPER ++#define HELPER_LOG(x...) qemu_log(x) ++#else ++#define HELPER_LOG(x...) ++#endif ++ ++#define RET128(F) (env->retxl = F.low, F.high) ++ ++uint8_t s390_softfloat_exc_to_ieee(unsigned int exc) ++{ ++ uint8_t s390_exc = 0; ++ ++ s390_exc |= (exc & float_flag_invalid) ? S390_IEEE_MASK_INVALID : 0; ++ s390_exc |= (exc & float_flag_divbyzero) ? S390_IEEE_MASK_DIVBYZERO : 0; ++ s390_exc |= (exc & float_flag_overflow) ? S390_IEEE_MASK_OVERFLOW : 0; ++ s390_exc |= (exc & float_flag_underflow) ? S390_IEEE_MASK_UNDERFLOW : 0; ++ s390_exc |= (exc & float_flag_inexact) ? S390_IEEE_MASK_INEXACT : 0; ++ ++ return s390_exc; ++} ++ ++/* Should be called after any operation that may raise IEEE exceptions. */ ++static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) ++{ ++ unsigned s390_exc, qemu_exc; ++ ++ /* Get the exceptions raised by the current operation. Reset the ++ fpu_status contents so that the next operation has a clean slate. */ ++ qemu_exc = env->fpu_status.float_exception_flags; ++ if (qemu_exc == 0) { ++ return; ++ } ++ env->fpu_status.float_exception_flags = 0; ++ s390_exc = s390_softfloat_exc_to_ieee(qemu_exc); ++ ++ /* ++ * IEEE-Underflow exception recognition exists if a tininess condition ++ * (underflow) exists and ++ * - The mask bit in the FPC is zero and the result is inexact ++ * - The mask bit in the FPC is one ++ * So tininess conditions that are not inexact don't trigger any ++ * underflow action in case the mask bit is not one. ++ */ ++ if (!(s390_exc & S390_IEEE_MASK_INEXACT) && ++ !((env->fpc >> 24) & S390_IEEE_MASK_UNDERFLOW)) { ++ s390_exc &= ~S390_IEEE_MASK_UNDERFLOW; ++ } ++ ++ /* ++ * FIXME: ++ * 1. Right now, all inexact conditions are inidicated as ++ * "truncated" (0) and never as "incremented" (1) in the DXC. ++ * 2. Only traps due to invalid/divbyzero are suppressing. Other traps ++ * are completing, meaning the target register has to be written! ++ * This, however will mean that we have to write the register before ++ * triggering the trap - impossible right now. ++ */ ++ ++ /* ++ * invalid/divbyzero cannot coexist with other conditions. ++ * overflow/underflow however can coexist with inexact, we have to ++ * handle it separatly. ++ */ ++ if (s390_exc & ~S390_IEEE_MASK_INEXACT) { ++ if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { ++ /* trap condition - inexact reported along */ ++ tcg_s390_data_exception(env, s390_exc, retaddr); ++ } ++ /* nontrap condition - inexact handled differently */ ++ env->fpc |= (s390_exc & ~S390_IEEE_MASK_INEXACT) << 16; ++ } ++ ++ /* inexact handling */ ++ if (s390_exc & S390_IEEE_MASK_INEXACT && !XxC) { ++ /* trap condition - overflow/underflow _not_ reported along */ ++ if (s390_exc & S390_IEEE_MASK_INEXACT & env->fpc >> 24) { ++ tcg_s390_data_exception(env, s390_exc & S390_IEEE_MASK_INEXACT, ++ retaddr); ++ } ++ /* nontrap condition */ ++ env->fpc |= (s390_exc & S390_IEEE_MASK_INEXACT) << 16; ++ } ++} ++ ++int float_comp_to_cc(CPUS390XState *env, FloatRelation float_compare) ++{ ++ switch (float_compare) { ++ case float_relation_equal: ++ return 0; ++ case float_relation_less: ++ return 1; ++ case float_relation_greater: ++ return 2; ++ case float_relation_unordered: ++ return 3; ++ default: ++ cpu_abort(env_cpu(env), "unknown return value for float compare\n"); ++ } ++} ++ ++/* condition codes for unary FP ops */ ++uint32_t set_cc_nz_f32(float32 v) ++{ ++ if (float32_is_any_nan(v)) { ++ return 3; ++ } else if (float32_is_zero(v)) { ++ return 0; ++ } else if (float32_is_neg(v)) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++uint32_t set_cc_nz_f64(float64 v) ++{ ++ if (float64_is_any_nan(v)) { ++ return 3; ++ } else if (float64_is_zero(v)) { ++ return 0; ++ } else if (float64_is_neg(v)) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++uint32_t set_cc_nz_f128(float128 v) ++{ ++ if (float128_is_any_nan(v)) { ++ return 3; ++ } else if (float128_is_zero(v)) { ++ return 0; ++ } else if (float128_is_neg(v)) { ++ return 1; ++ } else { ++ return 2; ++ } ++} ++ ++/* condition codes for FP to integer conversion ops */ ++static uint32_t set_cc_conv_f32(float32 v, float_status *stat) ++{ ++ if (stat->float_exception_flags & float_flag_invalid) { ++ return 3; ++ } else { ++ return set_cc_nz_f32(v); ++ } ++} ++ ++static uint32_t set_cc_conv_f64(float64 v, float_status *stat) ++{ ++ if (stat->float_exception_flags & float_flag_invalid) { ++ return 3; ++ } else { ++ return set_cc_nz_f64(v); ++ } ++} ++ ++static uint32_t set_cc_conv_f128(float128 v, float_status *stat) ++{ ++ if (stat->float_exception_flags & float_flag_invalid) { ++ return 3; ++ } else { ++ return set_cc_nz_f128(v); ++ } ++} ++ ++static inline uint8_t round_from_m34(uint32_t m34) ++{ ++ return extract32(m34, 0, 4); ++} ++ ++static inline bool xxc_from_m34(uint32_t m34) ++{ ++ /* XxC is bit 1 of m4 */ ++ return extract32(m34, 4 + 3 - 1, 1); ++} ++ ++/* 32-bit FP addition */ ++uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float32 ret = float32_add(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP addition */ ++uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float64 ret = float64_add(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 128-bit FP addition */ ++uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ float128 ret = float128_add(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* 32-bit FP subtraction */ ++uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float32 ret = float32_sub(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP subtraction */ ++uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float64 ret = float64_sub(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 128-bit FP subtraction */ ++uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ float128 ret = float128_sub(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* 32-bit FP division */ ++uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float32 ret = float32_div(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP division */ ++uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float64 ret = float64_div(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 128-bit FP division */ ++uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ float128 ret = float128_div(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* 32-bit FP multiplication */ ++uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float32 ret = float32_mul(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP multiplication */ ++uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float64 ret = float64_mul(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64/32-bit FP multiplication */ ++uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ float64 ret = float32_to_float64(f2, &env->fpu_status); ++ ret = float64_mul(f1, ret, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 128-bit FP multiplication */ ++uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ float128 ret = float128_mul(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* 128/64-bit FP multiplication */ ++uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t f2) ++{ ++ float128 ret = float64_to_float128(f2, &env->fpu_status); ++ ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* convert 32-bit float to 64-bit float */ ++uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2) ++{ ++ float64 ret = float32_to_float64(f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* convert 128-bit float to 64-bit float */ ++uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 64-bit float to 128-bit float */ ++uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2) ++{ ++ float128 ret = float64_to_float128(f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* convert 32-bit float to 128-bit float */ ++uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2) ++{ ++ float128 ret = float32_to_float128(f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++/* convert 64-bit float to 32-bit float */ ++uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float32 ret = float64_to_float32(f2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 128-bit float to 32-bit float */ ++uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* 32-bit FP compare */ ++uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ FloatRelation cmp = float32_compare_quiet(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++/* 64-bit FP compare */ ++uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ FloatRelation cmp = float64_compare_quiet(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++/* 128-bit FP compare */ ++uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ FloatRelation cmp = float128_compare_quiet(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++int s390_swap_bfp_rounding_mode(CPUS390XState *env, int m3) ++{ ++ int ret = env->fpu_status.float_rounding_mode; ++ ++ switch (m3) { ++ case 0: ++ /* current mode */ ++ break; ++ case 1: ++ /* round to nearest with ties away from 0 */ ++ set_float_rounding_mode(float_round_ties_away, &env->fpu_status); ++ break; ++ case 3: ++ /* round to prepare for shorter precision */ ++ set_float_rounding_mode(float_round_to_odd, &env->fpu_status); ++ break; ++ case 4: ++ /* round to nearest with ties to even */ ++ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status); ++ break; ++ case 5: ++ /* round to zero */ ++ set_float_rounding_mode(float_round_to_zero, &env->fpu_status); ++ break; ++ case 6: ++ /* round to +inf */ ++ set_float_rounding_mode(float_round_up, &env->fpu_status); ++ break; ++ case 7: ++ /* round to -inf */ ++ set_float_rounding_mode(float_round_down, &env->fpu_status); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ return ret; ++} ++ ++void s390_restore_bfp_rounding_mode(CPUS390XState *env, int old_mode) ++{ ++ set_float_rounding_mode(old_mode, &env->fpu_status); ++} ++ ++/* convert 64-bit int to 32-bit float */ ++uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float32 ret = int64_to_float32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 64-bit int to 64-bit float */ ++uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float64 ret = int64_to_float64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 64-bit int to 128-bit float */ ++uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 ret = int64_to_float128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return RET128(ret); ++} ++ ++/* convert 64-bit uint to 32-bit float */ ++uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float32 ret = uint64_to_float32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 64-bit uint to 64-bit float */ ++uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float64 ret = uint64_to_float64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* convert 64-bit uint to 128-bit float */ ++uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 ret = uint64_to_float128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return RET128(ret); ++} ++ ++/* convert 32-bit float to 64-bit int */ ++uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ int64_t ret = float32_to_int64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float32_is_any_nan(v2)) { ++ return INT64_MIN; ++ } ++ return ret; ++} ++ ++/* convert 64-bit float to 64-bit int */ ++uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ int64_t ret = float64_to_int64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float64_is_any_nan(v2)) { ++ return INT64_MIN; ++ } ++ return ret; ++} ++ ++/* convert 128-bit float to 64-bit int */ ++uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 v2 = make_float128(h, l); ++ int64_t ret = float128_to_int64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float128_is_any_nan(v2)) { ++ return INT64_MIN; ++ } ++ return ret; ++} ++ ++/* convert 32-bit float to 32-bit int */ ++uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ int32_t ret = float32_to_int32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float32_is_any_nan(v2)) { ++ return INT32_MIN; ++ } ++ return ret; ++} ++ ++/* convert 64-bit float to 32-bit int */ ++uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ int32_t ret = float64_to_int32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float64_is_any_nan(v2)) { ++ return INT32_MIN; ++ } ++ return ret; ++} ++ ++/* convert 128-bit float to 32-bit int */ ++uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 v2 = make_float128(h, l); ++ int32_t ret = float128_to_int32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float128_is_any_nan(v2)) { ++ return INT32_MIN; ++ } ++ return ret; ++} ++ ++/* convert 32-bit float to 64-bit uint */ ++uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ uint64_t ret = float32_to_uint64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float32_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* convert 64-bit float to 64-bit uint */ ++uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ uint64_t ret = float64_to_uint64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float64_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* convert 128-bit float to 64-bit uint */ ++uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 v2 = make_float128(h, l); ++ uint64_t ret = float128_to_uint64(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float128_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* convert 32-bit float to 32-bit uint */ ++uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ uint32_t ret = float32_to_uint32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f32(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float32_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* convert 64-bit float to 32-bit uint */ ++uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ uint32_t ret = float64_to_uint32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f64(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float64_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* convert 128-bit float to 32-bit uint */ ++uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 v2 = make_float128(h, l); ++ uint32_t ret = float128_to_uint32(v2, &env->fpu_status); ++ uint32_t cc = set_cc_conv_f128(v2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ env->cc_op = cc; ++ if (float128_is_any_nan(v2)) { ++ return 0; ++ } ++ return ret; ++} ++ ++/* round to integer 32-bit */ ++uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float32 ret = float32_round_to_int(f2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* round to integer 64-bit */ ++uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float64 ret = float64_round_to_int(f2, &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return ret; ++} ++ ++/* round to integer 128-bit */ ++uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint32_t m34) ++{ ++ int old_mode = s390_swap_bfp_rounding_mode(env, round_from_m34(m34)); ++ float128 ret = float128_round_to_int(make_float128(ah, al), ++ &env->fpu_status); ++ ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_exceptions(env, xxc_from_m34(m34), GETPC()); ++ return RET128(ret); ++} ++ ++/* 32-bit FP compare and signal */ ++uint32_t HELPER(keb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ FloatRelation cmp = float32_compare(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++/* 64-bit FP compare and signal */ ++uint32_t HELPER(kdb)(CPUS390XState *env, uint64_t f1, uint64_t f2) ++{ ++ FloatRelation cmp = float64_compare(f1, f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++/* 128-bit FP compare and signal */ ++uint32_t HELPER(kxb)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t bh, uint64_t bl) ++{ ++ FloatRelation cmp = float128_compare(make_float128(ah, al), ++ make_float128(bh, bl), ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return float_comp_to_cc(env, cmp); ++} ++ ++/* 32-bit FP multiply and add */ ++uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1, ++ uint64_t f2, uint64_t f3) ++{ ++ float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP multiply and add */ ++uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1, ++ uint64_t f2, uint64_t f3) ++{ ++ float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 32-bit FP multiply and subtract */ ++uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1, ++ uint64_t f2, uint64_t f3) ++{ ++ float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c, ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* 64-bit FP multiply and subtract */ ++uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1, ++ uint64_t f2, uint64_t f3) ++{ ++ float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c, ++ &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* The rightmost bit has the number 11. */ ++static inline uint16_t dcmask(int bit, bool neg) ++{ ++ return 1 << (11 - bit - neg); ++} ++ ++#define DEF_FLOAT_DCMASK(_TYPE) \ ++uint16_t _TYPE##_dcmask(CPUS390XState *env, _TYPE f1) \ ++{ \ ++ const bool neg = _TYPE##_is_neg(f1); \ ++ \ ++ /* Sorted by most common cases - only one class is possible */ \ ++ if (_TYPE##_is_normal(f1)) { \ ++ return dcmask(2, neg); \ ++ } else if (_TYPE##_is_zero(f1)) { \ ++ return dcmask(0, neg); \ ++ } else if (_TYPE##_is_denormal(f1)) { \ ++ return dcmask(4, neg); \ ++ } else if (_TYPE##_is_infinity(f1)) { \ ++ return dcmask(6, neg); \ ++ } else if (_TYPE##_is_quiet_nan(f1, &env->fpu_status)) { \ ++ return dcmask(8, neg); \ ++ } \ ++ /* signaling nan, as last remaining case */ \ ++ return dcmask(10, neg); \ ++} ++DEF_FLOAT_DCMASK(float32) ++DEF_FLOAT_DCMASK(float64) ++DEF_FLOAT_DCMASK(float128) ++ ++/* test data class 32-bit */ ++uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2) ++{ ++ return (m2 & float32_dcmask(env, f1)) != 0; ++} ++ ++/* test data class 64-bit */ ++uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2) ++{ ++ return (m2 & float64_dcmask(env, v1)) != 0; ++} ++ ++/* test data class 128-bit */ ++uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint64_t m2) ++{ ++ return (m2 & float128_dcmask(env, make_float128(ah, al))) != 0; ++} ++ ++/* square root 32-bit */ ++uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2) ++{ ++ float32 ret = float32_sqrt(f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* square root 64-bit */ ++uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2) ++{ ++ float64 ret = float64_sqrt(f2, &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return ret; ++} ++ ++/* square root 128-bit */ ++uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al) ++{ ++ float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status); ++ handle_exceptions(env, false, GETPC()); ++ return RET128(ret); ++} ++ ++static const int fpc_to_rnd[8] = { ++ float_round_nearest_even, ++ float_round_to_zero, ++ float_round_up, ++ float_round_down, ++ -1, ++ -1, ++ -1, ++ float_round_to_odd, ++}; ++ ++/* set fpc */ ++void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc) ++{ ++ if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || ++ (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++ ++ /* Install everything in the main FPC. */ ++ env->fpc = fpc; ++ ++ /* Install the rounding mode in the shadow fpu_status. */ ++ set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); ++} ++ ++/* set fpc and signal */ ++void HELPER(sfas)(CPUS390XState *env, uint64_t fpc) ++{ ++ uint32_t signalling = env->fpc; ++ uint32_t s390_exc; ++ ++ if (fpc_to_rnd[fpc & 0x7] == -1 || fpc & 0x03030088u || ++ (!s390_has_feat(S390_FEAT_FLOATING_POINT_EXT) && fpc & 0x4)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++ ++ /* ++ * FPC is set to the FPC operand with a bitwise OR of the signalling ++ * flags. ++ */ ++ env->fpc = fpc | (signalling & 0x00ff0000); ++ set_float_rounding_mode(fpc_to_rnd[fpc & 0x7], &env->fpu_status); ++ ++ /* ++ * If any signaling flag is enabled in the new FPC mask, a ++ * simulated-iee-exception exception occurs. ++ */ ++ s390_exc = (signalling >> 16) & (fpc >> 24); ++ if (s390_exc) { ++ if (s390_exc & S390_IEEE_MASK_INVALID) { ++ s390_exc = S390_IEEE_MASK_INVALID; ++ } else if (s390_exc & S390_IEEE_MASK_DIVBYZERO) { ++ s390_exc = S390_IEEE_MASK_DIVBYZERO; ++ } else if (s390_exc & S390_IEEE_MASK_OVERFLOW) { ++ s390_exc &= (S390_IEEE_MASK_OVERFLOW | S390_IEEE_MASK_INEXACT); ++ } else if (s390_exc & S390_IEEE_MASK_UNDERFLOW) { ++ s390_exc &= (S390_IEEE_MASK_UNDERFLOW | S390_IEEE_MASK_INEXACT); ++ } else if (s390_exc & S390_IEEE_MASK_INEXACT) { ++ s390_exc = S390_IEEE_MASK_INEXACT; ++ } else if (s390_exc & S390_IEEE_MASK_QUANTUM) { ++ s390_exc = S390_IEEE_MASK_QUANTUM; ++ } ++ tcg_s390_data_exception(env, s390_exc | 3, GETPC()); ++ } ++} ++ ++/* set bfp rounding mode */ ++void HELPER(srnm)(CPUS390XState *env, uint64_t rnd) ++{ ++ if (rnd > 0x7 || fpc_to_rnd[rnd & 0x7] == -1) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++ ++ env->fpc = deposit32(env->fpc, 0, 3, rnd); ++ set_float_rounding_mode(fpc_to_rnd[rnd & 0x7], &env->fpu_status); ++} +diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def +new file mode 100644 +index 0000000000..3e5594210c +--- /dev/null ++++ b/target/s390x/tcg/insn-data.def +@@ -0,0 +1,1398 @@ ++/* ++ * Arguments to the opcode prototypes ++ * ++ * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC) ++ * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA) ++ * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS) ++ * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS) ++ * ++ * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode ++ * NAME = name of the opcode, used internally ++ * FMT = format of the opcode (defined in insn-format.def) ++ * FAC = facility the opcode is available in (defined in DisasFacility) ++ * I1 = func in1_xx fills o->in1 ++ * I2 = func in2_xx fills o->in2 ++ * P = func prep_xx initializes o->*out* ++ * W = func wout_xx writes o->*out* somewhere ++ * OP = func op_xx does the bulk of the operation ++ * CC = func cout_xx defines how cc should get set ++ * DATA = immediate argument to op_xx function ++ * FLAGS = categorize the type of instruction (e.g. for advanced checks) ++ * ++ * The helpers get called in order: I1, I2, P, OP, W, CC ++ */ ++ ++/* ADD */ ++ C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32) ++ C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32) ++ C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32) ++ C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32) ++ C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64) ++ C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64) ++ C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64) ++ C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64) ++ C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64) ++ F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP) ++ F(0xb31a, ADBR, RRE, Z, f1, f2, new, f1, adb, f64, IF_BFP) ++ F(0xb34a, AXBR, RRE, Z, x2h, x2l, x1, x1, axb, f128, IF_BFP) ++ F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP) ++ F(0xed1a, ADB, RXE, Z, f1, m2_64, new, f1, adb, f64, IF_BFP) ++/* ADD HIGH */ ++ C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32) ++ C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32) ++/* ADD IMMEDIATE */ ++ C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32) ++ D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL) ++ C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32) ++ C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64) ++ D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ) ++ C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64) ++/* ADD IMMEDIATE HIGH */ ++ C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32) ++/* ADD HALFWORD */ ++ C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32) ++ C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32) ++ C(0xe338, AGH, RXY_a, MIE2,r1, m2_16s, r1, 0, add, adds64) ++/* ADD HALFWORD IMMEDIATE */ ++ C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32) ++ C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64) ++ ++/* ADD LOGICAL */ ++ C(0x1e00, ALR, RR_a, Z, r1_32u, r2_32u, new, r1_32, add, addu32) ++ C(0xb9fa, ALRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, add, addu32) ++ C(0x5e00, AL, RX_a, Z, r1_32u, m2_32u, new, r1_32, add, addu32) ++ C(0xe35e, ALY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, add, addu32) ++ C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, addu64, addu64) ++ C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, addu64, addu64) ++ C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, addu64, addu64) ++ C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, addu64, addu64) ++ C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, addu64, addu64) ++/* ADD LOGICAL HIGH */ ++ C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32) ++ C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, add, addu32) ++/* ADD LOGICAL IMMEDIATE */ ++ C(0xc20b, ALFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, add, addu32) ++ C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, addu64, addu64) ++/* ADD LOGICAL WITH SIGNED IMMEDIATE */ ++ D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL) ++ C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32) ++ D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ) ++ C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64) ++/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */ ++ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32) ++ C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, 0) ++/* ADD LOGICAL WITH CARRY */ ++ C(0xb998, ALCR, RRE, Z, r1_32u, r2_32u, new, r1_32, addc32, addu32) ++ C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc64, addu64) ++ C(0xe398, ALC, RXY_a, Z, r1_32u, m2_32u, new, r1_32, addc32, addu32) ++ C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc64, addu64) ++ ++/* AND */ ++ C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32) ++ C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32) ++ C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32) ++ C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32) ++ C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64) ++ C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64) ++ C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64) ++ C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0) ++/* AND IMMEDIATE */ ++ D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020) ++ D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000) ++ D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030) ++ D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020) ++ D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010) ++ D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000) ++ D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB) ++ D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB) ++ ++/* BRANCH AND LINK */ ++ C(0x0500, BALR, RR_a, Z, 0, r2_nz, r1, 0, bal, 0) ++ C(0x4500, BAL, RX_a, Z, 0, a2, r1, 0, bal, 0) ++/* BRANCH AND SAVE */ ++ C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0) ++ C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0) ++/* BRANCH RELATIVE AND SAVE */ ++ C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0) ++ C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0) ++/* BRANCH INDIRECT ON CONDITION */ ++ C(0xe347, BIC, RXY_b, MIE2,0, m2_64w, 0, 0, bc, 0) ++/* BRANCH ON CONDITION */ ++ C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0) ++ C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0) ++/* BRANCH RELATIVE ON CONDITION */ ++ C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0) ++ C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0) ++/* BRANCH ON COUNT */ ++ C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0) ++ C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0) ++ C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0) ++ C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0) ++/* BRANCH RELATIVE ON COUNT */ ++ C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0) ++ C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0) ++/* BRANCH RELATIVE ON COUNT HIGH */ ++ C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0) ++/* BRANCH ON INDEX */ ++ D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0) ++ D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1) ++ D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0) ++ D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1) ++/* BRANCH RELATIVE ON INDEX */ ++ D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0) ++ D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1) ++ D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0) ++ D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1) ++/* BRANCH PREDICTION PRELOAD */ ++ /* ??? Format is SMI, but implemented as NOP, so we need no fields. */ ++ C(0xc700, BPP, E, EH, 0, 0, 0, 0, 0, 0) ++/* BRANCH PREDICTION RELATIVE PRELOAD */ ++ /* ??? Format is MII, but implemented as NOP, so we need no fields. */ ++ C(0xc500, BPRP, E, EH, 0, 0, 0, 0, 0, 0) ++/* NEXT INSTRUCTION ACCESS INTENT */ ++ /* ??? Format is IE, but implemented as NOP, so we need no fields. */ ++ C(0xb2fa, NIAI, E, EH, 0, 0, 0, 0, 0, 0) ++ ++/* CHECKSUM */ ++ C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0) ++ ++/* COPY SIGN */ ++ F(0xb372, CPSDR, RRF_b, FPSSH, f3, f2, new, f1, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3) ++ ++/* COMPARE */ ++ C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32) ++ C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32) ++ C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32) ++ C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64) ++ C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64) ++ C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64) ++ C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64) ++ F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP) ++ F(0xb319, CDBR, RRE, Z, f1, f2, 0, 0, cdb, 0, IF_BFP) ++ F(0xb349, CXBR, RRE, Z, x2h, x2l, x1, 0, cxb, 0, IF_BFP) ++ F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP) ++ F(0xed19, CDB, RXE, Z, f1, m2_64, 0, 0, cdb, 0, IF_BFP) ++/* COMPARE AND SIGNAL */ ++ F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP) ++ F(0xb318, KDBR, RRE, Z, f1, f2, 0, 0, kdb, 0, IF_BFP) ++ F(0xb348, KXBR, RRE, Z, x2h, x2l, x1, 0, kxb, 0, IF_BFP) ++ F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP) ++ F(0xed18, KDB, RXE, Z, f1, m2_64, 0, 0, kdb, 0, IF_BFP) ++/* COMPARE IMMEDIATE */ ++ C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32) ++ C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64) ++/* COMPARE RELATIVE LONG */ ++ C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32) ++ C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64) ++ C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64) ++/* COMPARE HALFWORD */ ++ C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32) ++ C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32) ++ C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64) ++/* COMPARE HALFWORD IMMEDIATE */ ++ C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32) ++ C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64) ++ C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64) ++ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64) ++ C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64) ++/* COMPARE HALFWORD RELATIVE LONG */ ++ C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32) ++ C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64) ++/* COMPARE HIGH */ ++ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32) ++ C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32) ++ C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32) ++/* COMPARE IMMEDIATE HIGH */ ++ C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32) ++ ++/* COMPARE LOGICAL */ ++ C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32) ++ C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32) ++ C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32) ++ C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64) ++ C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64) ++ C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64) ++ C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64) ++ C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0) ++/* COMPARE LOGICAL HIGH */ ++ C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32) ++ C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32) ++ C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32) ++/* COMPARE LOGICAL IMMEDIATE */ ++ C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32) ++ C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64) ++ C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64) ++ C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64) ++ C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64) ++ C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64) ++ C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64) ++/* COMPARE LOGICAL IMMEDIATE HIGH */ ++ C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32) ++/* COMPARE LOGICAL RELATIVE LONG */ ++ C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32) ++ C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64) ++ C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64) ++ C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32) ++ C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64) ++/* COMPARE LOGICAL LONG */ ++ C(0x0f00, CLCL, RR_a, Z, 0, 0, 0, 0, clcl, 0) ++/* COMPARE LOGICAL LONG EXTENDED */ ++ C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0) ++/* COMPARE LOGICAL LONG UNICODE */ ++ C(0xeb8f, CLCLU, RSY_a, E2, 0, a2, 0, 0, clclu, 0) ++/* COMPARE LOGICAL CHARACTERS UNDER MASK */ ++ C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0) ++ C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0) ++ C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0) ++/* COMPARE LOGICAL STRING */ ++ C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0) ++ ++/* COMPARE AND BRANCH */ ++ D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) ++ D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) ++ D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0) ++ D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0) ++ D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0) ++ D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0) ++ D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0) ++ D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0) ++/* COMPARE LOGICAL AND BRANCH */ ++ D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) ++ D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) ++ D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1) ++ D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1) ++ D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) ++ D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) ++ D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1) ++ D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1) ++ ++/* COMPARE AND SWAP */ ++ D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) ++ D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL) ++ D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ) ++/* COMPARE DOUBLE AND SWAP */ ++ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) ++ D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ) ++ C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0) ++/* COMPARE AND SWAP AND STORE */ ++ C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0) ++ ++/* COMPARE AND TRAP */ ++ D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0) ++ D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0) ++ D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0) ++ D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0) ++/* COMPARE LOGICAL AND TRAP */ ++ D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1) ++ D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1) ++ D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1) ++ D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1) ++ D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1) ++ D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1) ++ ++/* CONVERT TO DECIMAL */ ++ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) ++ C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) ++/* CONVERT TO FIXED */ ++ F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) ++ F(0xb399, CFDBR, RRF_e, Z, 0, f2, new, r1_32, cfdb, 0, IF_BFP) ++ F(0xb39a, CFXBR, RRF_e, Z, x2h, x2l, new, r1_32, cfxb, 0, IF_BFP) ++ F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP) ++ F(0xb3a9, CGDBR, RRF_e, Z, 0, f2, r1, 0, cgdb, 0, IF_BFP) ++ F(0xb3aa, CGXBR, RRF_e, Z, x2h, x2l, r1, 0, cgxb, 0, IF_BFP) ++/* CONVERT FROM FIXED */ ++ F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP) ++ F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, new, f1, cdgb, 0, IF_BFP) ++ F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, new_P, x1, cxgb, 0, IF_BFP) ++ F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP) ++ F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, new, f1, cdgb, 0, IF_BFP) ++ F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, new_P, x1, cxgb, 0, IF_BFP) ++/* CONVERT TO LOGICAL */ ++ F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP) ++ F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2, new, r1_32, clfdb, 0, IF_BFP) ++ F(0xb39e, CLFXBR, RRF_e, FPE, x2h, x2l, new, r1_32, clfxb, 0, IF_BFP) ++ F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP) ++ F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2, r1, 0, clgdb, 0, IF_BFP) ++ F(0xb3ae, CLGXBR, RRF_e, FPE, x2h, x2l, r1, 0, clgxb, 0, IF_BFP) ++/* CONVERT FROM LOGICAL */ ++ F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP) ++ F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, new, f1, cdlgb, 0, IF_BFP) ++ F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, new_P, x1, cxlgb, 0, IF_BFP) ++ F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP) ++ F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, new, f1, cdlgb, 0, IF_BFP) ++ F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, new_P, x1, cxlgb, 0, IF_BFP) ++ ++/* CONVERT UTF-8 TO UTF-16 */ ++ D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12) ++/* CONVERT UTF-8 TO UTF-32 */ ++ D(0xb9b0, CU14, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 14) ++/* CONVERT UTF-16 to UTF-8 */ ++ D(0xb2a6, CU21, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 21) ++/* CONVERT UTF-16 to UTF-32 */ ++ D(0xb9b1, CU24, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 24) ++/* CONVERT UTF-32 to UTF-8 */ ++ D(0xb9b2, CU41, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 41) ++/* CONVERT UTF-32 to UTF-16 */ ++ D(0xb9b3, CU42, RRF_c, ETF3, 0, 0, 0, 0, cuXX, 0, 42) ++ ++/* DIVIDE */ ++ C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0) ++ C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0) ++ F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP) ++ F(0xb31d, DDBR, RRE, Z, f1, f2, new, f1, ddb, 0, IF_BFP) ++ F(0xb34d, DXBR, RRE, Z, x2h, x2l, x1, x1, dxb, 0, IF_BFP) ++ F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP) ++ F(0xed1d, DDB, RXE, Z, f1, m2_64, new, f1, ddb, 0, IF_BFP) ++/* DIVIDE LOGICAL */ ++ C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0) ++ C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0) ++ C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0) ++ C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0) ++/* DIVIDE SINGLE */ ++ C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0) ++ C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0) ++ C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0) ++ C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0) ++ ++/* EXCLUSIVE OR */ ++ C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32) ++ C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32) ++ C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32) ++ C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32) ++ C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64) ++ C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64) ++ C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64) ++ C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0) ++/* EXCLUSIVE OR IMMEDIATE */ ++ D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020) ++ D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000) ++ D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB) ++ D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB) ++ ++/* EXECUTE */ ++ C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0) ++/* EXECUTE RELATIVE LONG */ ++ C(0xc600, EXRL, RIL_b, EE, 0, ri2, 0, 0, ex, 0) ++ ++/* EXTRACT ACCESS */ ++ C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0) ++/* EXTRACT CPU ATTRIBUTE */ ++ C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0) ++/* EXTRACT CPU TIME */ ++ F(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0, IF_IO) ++/* EXTRACT FPC */ ++ F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP) ++/* EXTRACT PSW */ ++ C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0) ++ ++/* FIND LEFTMOST ONE */ ++ C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0) ++ ++/* INSERT CHARACTER */ ++ C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0) ++ C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0) ++/* INSERT CHARACTERS UNDER MASK */ ++ D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0) ++ D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0) ++ D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32) ++/* INSERT IMMEDIATE */ ++ D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020) ++ D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000) ++ D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030) ++ D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020) ++ D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010) ++ D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000) ++/* INSERT PROGRAM MASK */ ++ C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0) ++ ++/* LOAD */ ++ C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0) ++ C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0) ++ C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0) ++ C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0) ++ C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) ++ C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) ++ C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) ++ F(0x2800, LDR, RR_a, Z, 0, f2, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) ++ F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) ++ F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) ++ F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2) ++ F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) ++ F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) ++ F(0xb365, LXR, RRE, Z, x2h, x2l, 0, x1, movx, 0, IF_AFP1) ++/* LOAD IMMEDIATE */ ++ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) ++/* LOAD RELATIVE LONG */ ++ C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0) ++ C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0) ++ C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0) ++/* LOAD ADDRESS */ ++ C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0) ++ C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0) ++/* LOAD ADDRESS EXTENDED */ ++ C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0) ++ C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0) ++/* LOAD ADDRESS RELATIVE LONG */ ++ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0) ++/* LOAD AND ADD */ ++ D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL) ++ D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ) ++/* LOAD AND ADD LOGICAL */ ++ D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL) ++ D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ) ++/* LOAD AND AND */ ++ D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL) ++ D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ) ++/* LOAD AND EXCLUSIVE OR */ ++ D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL) ++ D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ) ++/* LOAD AND OR */ ++ D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL) ++ D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ) ++/* LOAD AND TEST */ ++ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32) ++ C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64) ++ C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64) ++ C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) ++ C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) ++ C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) ++ F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) ++ F(0xb312, LTDBR, RRE, Z, 0, f2, 0, f1, mov2, f64, IF_BFP) ++ F(0xb342, LTXBR, RRE, Z, x2h, x2l, 0, x1, movx, f128, IF_BFP) ++/* LOAD AND TRAP */ ++ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) ++ C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) ++/* LOAD AND ZERO RIGHTMOST BYTE */ ++ C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) ++ C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) ++/* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ ++ C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) ++/* LOAD BYTE */ ++ C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0) ++ C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0) ++ C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0) ++ C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0) ++/* LOAD BYTE HIGH */ ++ C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0) ++/* LOAD COMPLEMENT */ ++ C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32) ++ C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64) ++ C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64) ++ F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP) ++ F(0xb313, LCDBR, RRE, Z, 0, f2, new, f1, negf64, f64, IF_BFP) ++ F(0xb343, LCXBR, RRE, Z, x2h, x2l, new_P, x1, negf128, f128, IF_BFP) ++ F(0xb373, LCDFR, RRE, FPSSH, 0, f2, new, f1, negf64, 0, IF_AFP1 | IF_AFP2) ++/* LOAD COUNT TO BLOCK BOUNDARY */ ++ C(0xe727, LCBB, RXE, V, la2, 0, r1, 0, lcbb, 0) ++/* LOAD HALFWORD */ ++ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) ++ C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) ++ C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0) ++ C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0) ++ C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0) ++/* LOAD HALFWORD HIGH */ ++ C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0) ++/* LOAD HALFWORD IMMEDIATE */ ++ C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0) ++ C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0) ++/* LOAD HALFWORD RELATIVE LONG */ ++ C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0) ++ C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0) ++/* LOAD HIGH */ ++ C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0) ++/* LOAG HIGH AND TRAP */ ++ C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0) ++/* LOAD LOGICAL */ ++ C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0) ++ C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0) ++/* LOAD LOGICAL AND TRAP */ ++ C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0) ++/* LOAD LOGICAL RELATIVE LONG */ ++ C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0) ++/* LOAD LOGICAL CHARACTER */ ++ C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0) ++ C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0) ++ C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0) ++ C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0) ++/* LOAD LOGICAL CHARACTER HIGH */ ++ C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0) ++/* LOAD LOGICAL HALFWORD */ ++ C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0) ++ C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0) ++ C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0) ++ C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0) ++/* LOAD LOGICAL HALFWORD HIGH */ ++ C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0) ++/* LOAD LOGICAL HALFWORD RELATIVE LONG */ ++ C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0) ++ C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0) ++/* LOAD LOGICAL IMMEDATE */ ++ D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32) ++ D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0) ++ D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48) ++ D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32) ++ D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16) ++ D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0) ++/* LOAD LOGICAL THIRTY ONE BITS */ ++ C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0) ++ C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0) ++/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */ ++ C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0) ++ ++/* LOAD FPR FROM GR */ ++ F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1) ++/* LOAD GR FROM FPR */ ++ F(0xb3cd, LGDR, RRE, FPRGR, 0, f2, 0, r1, mov2, 0, IF_AFP2) ++/* LOAD NEGATIVE */ ++ C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32) ++ C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64) ++ C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64) ++ F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP) ++ F(0xb311, LNDBR, RRE, Z, 0, f2, new, f1, nabsf64, f64, IF_BFP) ++ F(0xb341, LNXBR, RRE, Z, x2h, x2l, new_P, x1, nabsf128, f128, IF_BFP) ++ F(0xb371, LNDFR, RRE, FPSSH, 0, f2, new, f1, nabsf64, 0, IF_AFP1 | IF_AFP2) ++/* LOAD ON CONDITION */ ++ C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0) ++ C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) ++ C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0) ++ C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0) ++/* LOAD HALFWORD IMMEDIATE ON CONDITION */ ++ C(0xec42, LOCHI, RIE_g, LOC2, r1, i2, new, r1_32, loc, 0) ++ C(0xec46, LOCGHI, RIE_g, LOC2, r1, i2, r1, 0, loc, 0) ++ C(0xec4e, LOCHHI, RIE_g, LOC2, r1_sr32, i2, new, r1_32h, loc, 0) ++/* LOAD HIGH ON CONDITION */ ++ C(0xb9e0, LOCFHR, RRF_c, LOC2, r1_sr32, r2, new, r1_32h, loc, 0) ++ C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0) ++/* LOAD PAIR DISJOINT */ ++ D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL) ++ D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ) ++/* LOAD PAIR FROM QUADWORD */ ++ C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0) ++/* LOAD POSITIVE */ ++ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) ++ C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) ++ C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64) ++ F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP) ++ F(0xb310, LPDBR, RRE, Z, 0, f2, new, f1, absf64, f64, IF_BFP) ++ F(0xb340, LPXBR, RRE, Z, x2h, x2l, new_P, x1, absf128, f128, IF_BFP) ++ F(0xb370, LPDFR, RRE, FPSSH, 0, f2, new, f1, absf64, 0, IF_AFP1 | IF_AFP2) ++/* LOAD REVERSED */ ++ C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0) ++ C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0) ++ C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0) ++ C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0) ++ C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0) ++/* LOAD ZERO */ ++ F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1) ++ F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1) ++ F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1) ++ ++/* LOAD FPC */ ++ F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP) ++/* LOAD FPC AND SIGNAL */ ++ F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP) ++/* LOAD FP INTEGER */ ++ F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP) ++ F(0xb35f, FIDBR, RRF_e, Z, 0, f2, new, f1, fidb, 0, IF_BFP) ++ F(0xb347, FIXBR, RRF_e, Z, x2h, x2l, new_P, x1, fixb, 0, IF_BFP) ++ ++/* LOAD LENGTHENED */ ++ F(0xb304, LDEBR, RRE, Z, 0, e2, new, f1, ldeb, 0, IF_BFP) ++ F(0xb305, LXDBR, RRE, Z, 0, f2, new_P, x1, lxdb, 0, IF_BFP) ++ F(0xb306, LXEBR, RRE, Z, 0, e2, new_P, x1, lxeb, 0, IF_BFP) ++ F(0xed04, LDEB, RXE, Z, 0, m2_32u, new, f1, ldeb, 0, IF_BFP) ++ F(0xed05, LXDB, RXE, Z, 0, m2_64, new_P, x1, lxdb, 0, IF_BFP) ++ F(0xed06, LXEB, RXE, Z, 0, m2_32u, new_P, x1, lxeb, 0, IF_BFP) ++ F(0xb324, LDER, RXE, Z, 0, e2, new, f1, lde, 0, IF_AFP1) ++ F(0xed24, LDE, RXE, Z, 0, m2_32u, new, f1, lde, 0, IF_AFP1) ++/* LOAD ROUNDED */ ++ F(0xb344, LEDBR, RRF_e, Z, 0, f2, new, e1, ledb, 0, IF_BFP) ++ F(0xb345, LDXBR, RRF_e, Z, x2h, x2l, new, f1, ldxb, 0, IF_BFP) ++ F(0xb346, LEXBR, RRF_e, Z, x2h, x2l, new, e1, lexb, 0, IF_BFP) ++ ++/* LOAD MULTIPLE */ ++ C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0) ++ C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0) ++ C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0) ++/* LOAD MULTIPLE HIGH */ ++ C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0) ++/* LOAD ACCESS MULTIPLE */ ++ C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0) ++ C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0) ++ ++/* MONITOR CALL */ ++ C(0xaf00, MC, SI, Z, la1, 0, 0, 0, mc, 0) ++ ++/* MOVE */ ++ C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0) ++ C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0) ++ C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0) ++ C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0) ++ C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0) ++ C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0) ++/* MOVE INVERSE */ ++ C(0xe800, MVCIN, SS_a, Z, la1, a2, 0, 0, mvcin, 0) ++/* MOVE LONG */ ++ C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0) ++/* MOVE LONG EXTENDED */ ++ C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0) ++/* MOVE LONG UNICODE */ ++ C(0xeb8e, MVCLU, RSY_a, E2, 0, a2, 0, 0, mvclu, 0) ++/* MOVE NUMERICS */ ++ C(0xd100, MVN, SS_a, Z, la1, a2, 0, 0, mvn, 0) ++/* MOVE PAGE */ ++ C(0xb254, MVPG, RRE, Z, 0, 0, 0, 0, mvpg, 0) ++/* MOVE STRING */ ++ C(0xb255, MVST, RRE, Z, 0, 0, 0, 0, mvst, 0) ++/* MOVE WITH OPTIONAL SPECIFICATION */ ++ C(0xc800, MVCOS, SSF, MVCOS, la1, a2, 0, 0, mvcos, 0) ++/* MOVE WITH OFFSET */ ++ /* Really format SS_b, but we pack both lengths into one argument ++ for the helper call, so we might as well leave one 8-bit field. */ ++ C(0xf100, MVO, SS_a, Z, la1, a2, 0, 0, mvo, 0) ++/* MOVE ZONES */ ++ C(0xd300, MVZ, SS_a, Z, la1, a2, 0, 0, mvz, 0) ++ ++/* MULTIPLY */ ++ C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0) ++ C(0xb9ec, MGRK, RRF_a, MIE2,r3_o, r2_o, r1_P, 0, muls128, 0) ++ C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0) ++ C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0) ++ C(0xe384, MG, RXY_a, MIE2,r1p1_o, m2_64, r1_P, 0, muls128, 0) ++ F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP) ++ F(0xb31c, MDBR, RRE, Z, f1, f2, new, f1, mdb, 0, IF_BFP) ++ F(0xb34c, MXBR, RRE, Z, x2h, x2l, x1, x1, mxb, 0, IF_BFP) ++ F(0xb30c, MDEBR, RRE, Z, f1, e2, new, f1, mdeb, 0, IF_BFP) ++ F(0xb307, MXDBR, RRE, Z, 0, f2, x1, x1, mxdb, 0, IF_BFP) ++ F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP) ++ F(0xed1c, MDB, RXE, Z, f1, m2_64, new, f1, mdb, 0, IF_BFP) ++ F(0xed0c, MDEB, RXE, Z, f1, m2_32u, new, f1, mdeb, 0, IF_BFP) ++ F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, x1, mxdb, 0, IF_BFP) ++/* MULTIPLY HALFWORD */ ++ C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0) ++ C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0) ++ C(0xe33c, MGH, RXY_a, MIE2,r1_o, m2_16s, r1, 0, mul, 0) ++/* MULTIPLY HALFWORD IMMEDIATE */ ++ C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0) ++ C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0) ++/* MULTIPLY LOGICAL */ ++ C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0) ++ C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0) ++ C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0) ++ C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0) ++/* MULTIPLY SINGLE */ ++ C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0) ++ C(0xb9fd, MSRKC, RRF_a, MIE2,r3_32s, r2_32s, new, r1_32, mul, muls32) ++ C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0) ++ C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0) ++ C(0xe353, MSC, RXY_a, MIE2,r1_32s, m2_32s, new, r1_32, mul, muls32) ++ C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0) ++ C(0xb9ed, MSGRKC, RRF_a, MIE2,r3_o, r2_o, new_P, out2_r1, muls128, muls64) ++ C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0) ++ C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0) ++ C(0xe383, MSGC, RXY_a, MIE2,r1_o, m2_64, new_P, out2_r1, muls128, muls64) ++ C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0) ++/* MULTIPLY SINGLE IMMEDIATE */ ++ C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0) ++ C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0) ++ ++/* MULTIPLY AND ADD */ ++ F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP) ++ F(0xb31e, MADBR, RRD, Z, f1, f2, new, f1, madb, 0, IF_BFP) ++ F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP) ++ F(0xed1e, MADB, RXF, Z, f1, m2_64, new, f1, madb, 0, IF_BFP) ++/* MULTIPLY AND SUBTRACT */ ++ F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP) ++ F(0xb31f, MSDBR, RRD, Z, f1, f2, new, f1, msdb, 0, IF_BFP) ++ F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) ++ F(0xed1f, MSDB, RXF, Z, f1, m2_64, new, f1, msdb, 0, IF_BFP) ++ ++/* OR */ ++ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) ++ C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32) ++ C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32) ++ C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32) ++ C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64) ++ C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64) ++ C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64) ++ C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0) ++/* OR IMMEDIATE */ ++ D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020) ++ D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000) ++ D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030) ++ D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020) ++ D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010) ++ D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000) ++ D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB) ++ D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB) ++ ++/* PACK */ ++ /* Really format SS_b, but we pack both lengths into one argument ++ for the helper call, so we might as well leave one 8-bit field. */ ++ C(0xf200, PACK, SS_a, Z, la1, a2, 0, 0, pack, 0) ++/* PACK ASCII */ ++ C(0xe900, PKA, SS_f, E2, la1, a2, 0, 0, pka, 0) ++/* PACK UNICODE */ ++ C(0xe100, PKU, SS_f, E2, la1, a2, 0, 0, pku, 0) ++ ++/* PREFETCH */ ++ /* Implemented as nops of course. */ ++ C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0) ++ C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0) ++/* PERFORM PROCESSOR ASSIST */ ++ /* Implemented as nop of course. */ ++ C(0xb2e8, PPA, RRF_c, PPA, 0, 0, 0, 0, 0, 0) ++ ++/* POPULATION COUNT */ ++ C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64) ++ ++/* ROTATE LEFT SINGLE LOGICAL */ ++ C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0) ++ C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0) ++ ++/* ROTATE THEN INSERT SELECTED BITS */ ++ C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64) ++ C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0) ++ C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) ++ C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0) ++/* ROTATE_THEN SELECTED BITS */ ++ C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) ++ C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) ++ C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0) ++ ++/* SEARCH STRING */ ++ C(0xb25e, SRST, RRE, Z, 0, 0, 0, 0, srst, 0) ++/* SEARCH STRING UNICODE */ ++ C(0xb9be, SRSTU, RRE, ETF3, 0, 0, 0, 0, srstu, 0) ++ ++/* SET ACCESS */ ++ C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0) ++/* SET ADDRESSING MODE */ ++ D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0) ++ D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1) ++ D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3) ++/* SET FPC */ ++ F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP) ++/* SET FPC AND SIGNAL */ ++ F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP) ++/* SET BFP ROUNDING MODE */ ++ F(0xb299, SRNM, S, Z, la2, 0, 0, 0, srnm, 0, IF_BFP) ++ F(0xb2b8, SRNMB, S, FPE, la2, 0, 0, 0, srnmb, 0, IF_BFP) ++/* SET DFP ROUNDING MODE */ ++ F(0xb2b9, SRNMT, S, DFPR, la2, 0, 0, 0, srnmt, 0, IF_DFP) ++/* SET PROGRAM MASK */ ++ C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) ++ ++/* SHIFT LEFT SINGLE */ ++ D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31) ++ D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31) ++ D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63) ++/* SHIFT LEFT SINGLE LOGICAL */ ++ C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0) ++ C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0) ++ C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0) ++/* SHIFT RIGHT SINGLE */ ++ C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32) ++ C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32) ++ C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64) ++/* SHIFT RIGHT SINGLE LOGICAL */ ++ C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0) ++ C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0) ++ C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0) ++/* SHIFT LEFT DOUBLE */ ++ D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31) ++/* SHIFT LEFT DOUBLE LOGICAL */ ++ C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0) ++/* SHIFT RIGHT DOUBLE */ ++ C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64) ++/* SHIFT RIGHT DOUBLE LOGICAL */ ++ C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) ++ ++/* SQUARE ROOT */ ++ F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) ++ F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP) ++ F(0xb316, SQXBR, RRE, Z, x2h, x2l, new_P, x1, sqxb, 0, IF_BFP) ++ F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) ++ F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) ++ ++/* STORE */ ++ C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) ++ C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) ++ C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) ++ F(0x6000, STD, RX_a, Z, f1, a2, 0, 0, st64, 0, IF_AFP1) ++ F(0xed67, STDY, RXY_a, LD, f1, a2, 0, 0, st64, 0, IF_AFP1) ++ F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) ++ F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) ++/* STORE RELATIVE LONG */ ++ C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) ++ C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) ++/* STORE CHARACTER */ ++ C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0) ++ C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0) ++/* STORE CHARACTER HIGH */ ++ C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0) ++/* STORE CHARACTERS UNDER MASK */ ++ D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0) ++ D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0) ++ D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32) ++/* STORE HALFWORD */ ++ C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0) ++ C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0) ++/* STORE HALFWORD HIGH */ ++ C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0) ++/* STORE HALFWORD RELATIVE LONG */ ++ C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0) ++/* STORE HIGH */ ++ C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0) ++/* STORE ON CONDITION */ ++ D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0) ++ D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1) ++/* STORE HIGH ON CONDITION */ ++ D(0xebe1, STOCFH, RSY_b, LOC2, 0, 0, 0, 0, soc, 0, 2) ++/* STORE REVERSED */ ++ C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0) ++ C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0) ++ C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0) ++ ++/* STORE CLOCK */ ++ F(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0, IF_IO) ++ F(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0, IF_IO) ++/* STORE CLOCK EXTENDED */ ++ F(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0, IF_IO) ++ ++/* STORE FACILITY LIST EXTENDED */ ++ C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) ++/* STORE FPC */ ++ F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP) ++ ++/* STORE MULTIPLE */ ++ D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4) ++ D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4) ++ D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8) ++/* STORE MULTIPLE HIGH */ ++ C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0) ++/* STORE ACCESS MULTIPLE */ ++ C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0) ++ C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0) ++/* STORE PAIR TO QUADWORD */ ++ C(0xe38e, STPQ, RXY_a, Z, 0, a2, r1_P, 0, stpq, 0) ++ ++/* SUBTRACT */ ++ C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32) ++ C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32) ++ C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32) ++ C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32) ++ C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64) ++ C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64) ++ C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64) ++ C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64) ++ C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64) ++ F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP) ++ F(0xb31b, SDBR, RRE, Z, f1, f2, new, f1, sdb, f64, IF_BFP) ++ F(0xb34b, SXBR, RRE, Z, x2h, x2l, x1, x1, sxb, f128, IF_BFP) ++ F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP) ++ F(0xed1b, SDB, RXE, Z, f1, m2_64, new, f1, sdb, f64, IF_BFP) ++/* SUBTRACT HALFWORD */ ++ C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32) ++ C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32) ++ C(0xe339, SGH, RXY_a, MIE2,r1, m2_16s, r1, 0, sub, subs64) ++/* SUBTRACT HIGH */ ++ C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32) ++ C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32) ++/* SUBTRACT LOGICAL */ ++ C(0x1f00, SLR, RR_a, Z, r1_32u, r2_32u, new, r1_32, sub, subu32) ++ C(0xb9fb, SLRK, RRF_a, DO, r2_32u, r3_32u, new, r1_32, sub, subu32) ++ C(0x5f00, SL, RX_a, Z, r1_32u, m2_32u, new, r1_32, sub, subu32) ++ C(0xe35f, SLY, RXY_a, LD, r1_32u, m2_32u, new, r1_32, sub, subu32) ++ C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, subu64, subu64) ++ C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, subu64, subu64) ++ C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, subu64, subu64) ++ C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, subu64, subu64) ++ C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, subu64, subu64) ++/* SUBTRACT LOCICAL HIGH */ ++ C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32) ++ C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3_32u, new, r1_32h, sub, subu32) ++/* SUBTRACT LOGICAL IMMEDIATE */ ++ C(0xc205, SLFI, RIL_a, EI, r1_32u, i2_32u, new, r1_32, sub, subu32) ++ C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, subu64, subu64) ++/* SUBTRACT LOGICAL WITH BORROW */ ++ C(0xb999, SLBR, RRE, Z, r1_32u, r2_32u, new, r1_32, subb32, subu32) ++ C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb64, subu64) ++ C(0xe399, SLB, RXY_a, Z, r1_32u, m2_32u, new, r1_32, subb32, subu32) ++ C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb64, subu64) ++ ++/* SUPERVISOR CALL */ ++ C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0) ++ ++/* TEST ADDRESSING MODE */ ++ C(0x010b, TAM, E, Z, 0, 0, 0, 0, tam, 0) ++ ++/* TEST AND SET */ ++ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0) ++ ++/* TEST DATA CLASS */ ++ F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP) ++ F(0xed11, TCDB, RXE, Z, f1, a2, 0, 0, tcdb, 0, IF_BFP) ++ F(0xed12, TCXB, RXE, Z, 0, a2, x1, 0, tcxb, 0, IF_BFP) ++ ++/* TEST DECIMAL */ ++ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0) ++ ++/* TEST UNDER MASK */ ++ C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32) ++ C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32) ++ D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48) ++ D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32) ++ D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16) ++ D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0) ++ ++/* TRANSLATE */ ++ C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0) ++/* TRANSLATE AND TEST */ ++ C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0) ++/* TRANSLATE AND TEST REVERSE */ ++ C(0xd000, TRTR, SS_a, ETF3, la1, a2, 0, 0, trtr, 0) ++/* TRANSLATE EXTENDED */ ++ C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0) ++ ++/* TRANSLATE ONE TO ONE */ ++ C(0xb993, TROO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) ++/* TRANSLATE ONE TO TWO */ ++ C(0xb992, TROT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) ++/* TRANSLATE TWO TO ONE */ ++ C(0xb991, TRTO, RRF_c, E2, 0, 0, 0, 0, trXX, 0) ++/* TRANSLATE TWO TO TWO */ ++ C(0xb990, TRTT, RRF_c, E2, 0, 0, 0, 0, trXX, 0) ++ ++/* UNPACK */ ++ /* Really format SS_b, but we pack both lengths into one argument ++ for the helper call, so we might as well leave one 8-bit field. */ ++ C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0) ++/* UNPACK ASCII */ ++ C(0xea00, UNPKA, SS_a, E2, la1, a2, 0, 0, unpka, 0) ++/* UNPACK UNICODE */ ++ C(0xe200, UNPKU, SS_a, E2, la1, a2, 0, 0, unpku, 0) ++ ++/* MSA Instructions */ ++ D(0xb91e, KMAC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMAC) ++ D(0xb928, PCKMO, RRE, MSA3, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCKMO) ++ D(0xb92a, KMF, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMF) ++ D(0xb92b, KMO, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMO) ++ D(0xb92c, PCC, RRE, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PCC) ++ D(0xb92d, KMCTR, RRF_b, MSA4, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMCTR) ++ D(0xb92e, KM, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KM) ++ D(0xb92f, KMC, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMC) ++ D(0xb929, KMA, RRF_b, MSA8, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KMA) ++ D(0xb93c, PPNO, RRE, MSA5, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_PPNO) ++ D(0xb93e, KIMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KIMD) ++ D(0xb93f, KLMD, RRE, MSA, 0, 0, 0, 0, msa, 0, S390_FEAT_TYPE_KLMD) ++ ++/* === Vector Support Instructions === */ ++ ++/* VECTOR BIT PERMUTE */ ++ E(0xe785, VBPERM, VRR_c, VE, 0, 0, 0, 0, vbperm, 0, 0, IF_VEC) ++/* VECTOR GATHER ELEMENT */ ++ E(0xe713, VGEF, VRV, V, la2, 0, 0, 0, vge, 0, ES_32, IF_VEC) ++ E(0xe712, VGEG, VRV, V, la2, 0, 0, 0, vge, 0, ES_64, IF_VEC) ++/* VECTOR GENERATE BYTE MASK */ ++ F(0xe744, VGBM, VRI_a, V, 0, 0, 0, 0, vgbm, 0, IF_VEC) ++/* VECTOR GENERATE MASK */ ++ F(0xe746, VGM, VRI_b, V, 0, 0, 0, 0, vgm, 0, IF_VEC) ++/* VECTOR LOAD */ ++ F(0xe706, VL, VRX, V, la2, 0, 0, 0, vl, 0, IF_VEC) ++ F(0xe756, VLR, VRR_a, V, 0, 0, 0, 0, vlr, 0, IF_VEC) ++/* VECTOR LOAD AND REPLICATE */ ++ F(0xe705, VLREP, VRX, V, la2, 0, 0, 0, vlrep, 0, IF_VEC) ++/* VECTOR LOAD ELEMENT */ ++ E(0xe700, VLEB, VRX, V, la2, 0, 0, 0, vle, 0, ES_8, IF_VEC) ++ E(0xe701, VLEH, VRX, V, la2, 0, 0, 0, vle, 0, ES_16, IF_VEC) ++ E(0xe703, VLEF, VRX, V, la2, 0, 0, 0, vle, 0, ES_32, IF_VEC) ++ E(0xe702, VLEG, VRX, V, la2, 0, 0, 0, vle, 0, ES_64, IF_VEC) ++/* VECTOR LOAD ELEMENT IMMEDIATE */ ++ E(0xe740, VLEIB, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_8, IF_VEC) ++ E(0xe741, VLEIH, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_16, IF_VEC) ++ E(0xe743, VLEIF, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_32, IF_VEC) ++ E(0xe742, VLEIG, VRI_a, V, 0, 0, 0, 0, vlei, 0, ES_64, IF_VEC) ++/* VECTOR LOAD GR FROM VR ELEMENT */ ++ F(0xe721, VLGV, VRS_c, V, la2, 0, r1, 0, vlgv, 0, IF_VEC) ++/* VECTOR LOAD LOGICAL ELEMENT AND ZERO */ ++ F(0xe704, VLLEZ, VRX, V, la2, 0, 0, 0, vllez, 0, IF_VEC) ++/* VECTOR LOAD MULTIPLE */ ++ F(0xe736, VLM, VRS_a, V, la2, 0, 0, 0, vlm, 0, IF_VEC) ++/* VECTOR LOAD TO BLOCK BOUNDARY */ ++ F(0xe707, VLBB, VRX, V, la2, 0, 0, 0, vlbb, 0, IF_VEC) ++/* VECTOR LOAD VR ELEMENT FROM GR */ ++ F(0xe722, VLVG, VRS_b, V, la2, r3, 0, 0, vlvg, 0, IF_VEC) ++/* VECTOR LOAD VR FROM GRS DISJOINT */ ++ F(0xe762, VLVGP, VRR_f, V, r2, r3, 0, 0, vlvgp, 0, IF_VEC) ++/* VECTOR LOAD WITH LENGTH */ ++ F(0xe737, VLL, VRS_b, V, la2, r3_32u, 0, 0, vll, 0, IF_VEC) ++/* VECTOR MERGE HIGH */ ++ F(0xe761, VMRH, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) ++/* VECTOR MERGE LOW */ ++ F(0xe760, VMRL, VRR_c, V, 0, 0, 0, 0, vmr, 0, IF_VEC) ++/* VECTOR PACK */ ++ F(0xe794, VPK, VRR_c, V, 0, 0, 0, 0, vpk, 0, IF_VEC) ++/* VECTOR PACK SATURATE */ ++ F(0xe797, VPKS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) ++/* VECTOR PACK LOGICAL SATURATE */ ++ F(0xe795, VPKLS, VRR_b, V, 0, 0, 0, 0, vpk, 0, IF_VEC) ++ F(0xe78c, VPERM, VRR_e, V, 0, 0, 0, 0, vperm, 0, IF_VEC) ++/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ ++ F(0xe784, VPDI, VRR_c, V, 0, 0, 0, 0, vpdi, 0, IF_VEC) ++/* VECTOR REPLICATE */ ++ F(0xe74d, VREP, VRI_c, V, 0, 0, 0, 0, vrep, 0, IF_VEC) ++/* VECTOR REPLICATE IMMEDIATE */ ++ F(0xe745, VREPI, VRI_a, V, 0, 0, 0, 0, vrepi, 0, IF_VEC) ++/* VECTOR SCATTER ELEMENT */ ++ E(0xe71b, VSCEF, VRV, V, la2, 0, 0, 0, vsce, 0, ES_32, IF_VEC) ++ E(0xe71a, VSCEG, VRV, V, la2, 0, 0, 0, vsce, 0, ES_64, IF_VEC) ++/* VECTOR SELECT */ ++ F(0xe78d, VSEL, VRR_e, V, 0, 0, 0, 0, vsel, 0, IF_VEC) ++/* VECTOR SIGN EXTEND TO DOUBLEWORD */ ++ F(0xe75f, VSEG, VRR_a, V, 0, 0, 0, 0, vseg, 0, IF_VEC) ++/* VECTOR STORE */ ++ F(0xe70e, VST, VRX, V, la2, 0, 0, 0, vst, 0, IF_VEC) ++/* VECTOR STORE ELEMENT */ ++ E(0xe708, VSTEB, VRX, V, la2, 0, 0, 0, vste, 0, ES_8, IF_VEC) ++ E(0xe709, VSTEH, VRX, V, la2, 0, 0, 0, vste, 0, ES_16, IF_VEC) ++ E(0xe70b, VSTEF, VRX, V, la2, 0, 0, 0, vste, 0, ES_32, IF_VEC) ++ E(0xe70a, VSTEG, VRX, V, la2, 0, 0, 0, vste, 0, ES_64, IF_VEC) ++/* VECTOR STORE MULTIPLE */ ++ F(0xe73e, VSTM, VRS_a, V, la2, 0, 0, 0, vstm, 0, IF_VEC) ++/* VECTOR STORE WITH LENGTH */ ++ F(0xe73f, VSTL, VRS_b, V, la2, r3_32u, 0, 0, vstl, 0, IF_VEC) ++/* VECTOR UNPACK HIGH */ ++ F(0xe7d7, VUPH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) ++/* VECTOR UNPACK LOGICAL HIGH */ ++ F(0xe7d5, VUPLH, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) ++/* VECTOR UNPACK LOW */ ++ F(0xe7d6, VUPL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) ++/* VECTOR UNPACK LOGICAL LOW */ ++ F(0xe7d4, VUPLL, VRR_a, V, 0, 0, 0, 0, vup, 0, IF_VEC) ++ ++/* === Vector Integer Instructions === */ ++ ++/* VECTOR ADD */ ++ F(0xe7f3, VA, VRR_c, V, 0, 0, 0, 0, va, 0, IF_VEC) ++/* VECTOR ADD COMPUTE CARRY */ ++ F(0xe7f1, VACC, VRR_c, V, 0, 0, 0, 0, vacc, 0, IF_VEC) ++/* VECTOR ADD WITH CARRY */ ++ F(0xe7bb, VAC, VRR_d, V, 0, 0, 0, 0, vac, 0, IF_VEC) ++/* VECTOR ADD WITH CARRY COMPUTE CARRY */ ++ F(0xe7b9, VACCC, VRR_d, V, 0, 0, 0, 0, vaccc, 0, IF_VEC) ++/* VECTOR AND */ ++ F(0xe768, VN, VRR_c, V, 0, 0, 0, 0, vn, 0, IF_VEC) ++/* VECTOR AND WITH COMPLEMENT */ ++ F(0xe769, VNC, VRR_c, V, 0, 0, 0, 0, vnc, 0, IF_VEC) ++/* VECTOR AVERAGE */ ++ F(0xe7f2, VAVG, VRR_c, V, 0, 0, 0, 0, vavg, 0, IF_VEC) ++/* VECTOR AVERAGE LOGICAL */ ++ F(0xe7f0, VAVGL, VRR_c, V, 0, 0, 0, 0, vavgl, 0, IF_VEC) ++/* VECTOR CHECKSUM */ ++ F(0xe766, VCKSM, VRR_c, V, 0, 0, 0, 0, vcksm, 0, IF_VEC) ++/* VECTOR ELEMENT COMPARE */ ++ F(0xe7db, VEC, VRR_a, V, 0, 0, 0, 0, vec, cmps64, IF_VEC) ++/* VECTOR ELEMENT COMPARE LOGICAL */ ++ F(0xe7d9, VECL, VRR_a, V, 0, 0, 0, 0, vec, cmpu64, IF_VEC) ++/* VECTOR COMPARE EQUAL */ ++ E(0xe7f8, VCEQ, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_EQ, IF_VEC) ++/* VECTOR COMPARE HIGH */ ++ E(0xe7fb, VCH, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GT, IF_VEC) ++/* VECTOR COMPARE HIGH LOGICAL */ ++ E(0xe7f9, VCHL, VRR_b, V, 0, 0, 0, 0, vc, 0, TCG_COND_GTU, IF_VEC) ++/* VECTOR COUNT LEADING ZEROS */ ++ F(0xe753, VCLZ, VRR_a, V, 0, 0, 0, 0, vclz, 0, IF_VEC) ++/* VECTOR COUNT TRAILING ZEROS */ ++ F(0xe752, VCTZ, VRR_a, V, 0, 0, 0, 0, vctz, 0, IF_VEC) ++/* VECTOR EXCLUSIVE OR */ ++ F(0xe76d, VX, VRR_c, V, 0, 0, 0, 0, vx, 0, IF_VEC) ++/* VECTOR GALOIS FIELD MULTIPLY SUM */ ++ F(0xe7b4, VGFM, VRR_c, V, 0, 0, 0, 0, vgfm, 0, IF_VEC) ++/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ ++ F(0xe7bc, VGFMA, VRR_d, V, 0, 0, 0, 0, vgfma, 0, IF_VEC) ++/* VECTOR LOAD COMPLEMENT */ ++ F(0xe7de, VLC, VRR_a, V, 0, 0, 0, 0, vlc, 0, IF_VEC) ++/* VECTOR LOAD POSITIVE */ ++ F(0xe7df, VLP, VRR_a, V, 0, 0, 0, 0, vlp, 0, IF_VEC) ++/* VECTOR MAXIMUM */ ++ F(0xe7ff, VMX, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) ++/* VECTOR MAXIMUM LOGICAL */ ++ F(0xe7fd, VMXL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) ++/* VECTOR MINIMUM */ ++ F(0xe7fe, VMN, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) ++/* VECTOR MINIMUM LOGICAL */ ++ F(0xe7fc, VMNL, VRR_c, V, 0, 0, 0, 0, vmx, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD LOW */ ++ F(0xe7aa, VMAL, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD HIGH */ ++ F(0xe7ab, VMAH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD LOGICAL HIGH */ ++ F(0xe7a9, VMALH, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD EVEN */ ++ F(0xe7ae, VMAE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD LOGICAL EVEN */ ++ F(0xe7ac, VMALE, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD ODD */ ++ F(0xe7af, VMAO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY AND ADD LOGICAL ODD */ ++ F(0xe7ad, VMALO, VRR_d, V, 0, 0, 0, 0, vma, 0, IF_VEC) ++/* VECTOR MULTIPLY HIGH */ ++ F(0xe7a3, VMH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY LOGICAL HIGH */ ++ F(0xe7a1, VMLH, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY LOW */ ++ F(0xe7a2, VML, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY EVEN */ ++ F(0xe7a6, VME, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY LOGICAL EVEN */ ++ F(0xe7a4, VMLE, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY ODD */ ++ F(0xe7a7, VMO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY LOGICAL ODD */ ++ F(0xe7a5, VMLO, VRR_c, V, 0, 0, 0, 0, vm, 0, IF_VEC) ++/* VECTOR MULTIPLY SUM LOGICAL */ ++ F(0xe7b8, VMSL, VRR_d, VE, 0, 0, 0, 0, vmsl, 0, IF_VEC) ++/* VECTOR NAND */ ++ F(0xe76e, VNN, VRR_c, VE, 0, 0, 0, 0, vnn, 0, IF_VEC) ++/* VECTOR NOR */ ++ F(0xe76b, VNO, VRR_c, V, 0, 0, 0, 0, vno, 0, IF_VEC) ++/* VECTOR NOT EXCLUSIVE OR */ ++ F(0xe76c, VNX, VRR_c, VE, 0, 0, 0, 0, vnx, 0, IF_VEC) ++/* VECTOR OR */ ++ F(0xe76a, VO, VRR_c, V, 0, 0, 0, 0, vo, 0, IF_VEC) ++/* VECTOR OR WITH COMPLEMENT */ ++ F(0xe76f, VOC, VRR_c, VE, 0, 0, 0, 0, voc, 0, IF_VEC) ++/* VECTOR POPULATION COUNT */ ++ F(0xe750, VPOPCT, VRR_a, V, 0, 0, 0, 0, vpopct, 0, IF_VEC) ++/* VECTOR ELEMENT ROTATE LEFT LOGICAL */ ++ F(0xe773, VERLLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) ++ F(0xe733, VERLL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) ++/* VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ ++ F(0xe772, VERIM, VRI_d, V, 0, 0, 0, 0, verim, 0, IF_VEC) ++/* VECTOR ELEMENT SHIFT LEFT */ ++ F(0xe770, VESLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) ++ F(0xe730, VESL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) ++/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ ++ F(0xe77a, VESRAV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) ++ F(0xe73a, VESRA, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) ++/* VECTOR ELEMENT SHIFT RIGHT LOGICAL */ ++ F(0xe778, VESRLV, VRR_c, V, 0, 0, 0, 0, vesv, 0, IF_VEC) ++ F(0xe738, VESRL, VRS_a, V, la2, 0, 0, 0, ves, 0, IF_VEC) ++/* VECTOR SHIFT LEFT */ ++ F(0xe774, VSL, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) ++/* VECTOR SHIFT LEFT BY BYTE */ ++ F(0xe775, VSLB, VRR_c, V, 0, 0, 0, 0, vsl, 0, IF_VEC) ++/* VECTOR SHIFT LEFT DOUBLE BY BYTE */ ++ F(0xe777, VSLDB, VRI_d, V, 0, 0, 0, 0, vsldb, 0, IF_VEC) ++/* VECTOR SHIFT RIGHT ARITHMETIC */ ++ F(0xe77e, VSRA, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) ++/* VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ ++ F(0xe77f, VSRAB, VRR_c, V, 0, 0, 0, 0, vsra, 0, IF_VEC) ++/* VECTOR SHIFT RIGHT LOGICAL */ ++ F(0xe77c, VSRL, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) ++/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */ ++ F(0xe77d, VSRLB, VRR_c, V, 0, 0, 0, 0, vsrl, 0, IF_VEC) ++/* VECTOR SUBTRACT */ ++ F(0xe7f7, VS, VRR_c, V, 0, 0, 0, 0, vs, 0, IF_VEC) ++/* VECTOR SUBTRACT COMPUTE BORROW INDICATION */ ++ F(0xe7f5, VSCBI, VRR_c, V, 0, 0, 0, 0, vscbi, 0, IF_VEC) ++/* VECTOR SUBTRACT WITH BORROW INDICATION */ ++ F(0xe7bf, VSBI, VRR_d, V, 0, 0, 0, 0, vsbi, 0, IF_VEC) ++/* VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION */ ++ F(0xe7bd, VSBCBI, VRR_d, V, 0, 0, 0, 0, vsbcbi, 0, IF_VEC) ++/* VECTOR SUM ACROSS DOUBLEWORD */ ++ F(0xe765, VSUMG, VRR_c, V, 0, 0, 0, 0, vsumg, 0, IF_VEC) ++/* VECTOR SUM ACROSS QUADWORD */ ++ F(0xe767, VSUMQ, VRR_c, V, 0, 0, 0, 0, vsumq, 0, IF_VEC) ++/* VECTOR SUM ACROSS WORD */ ++ F(0xe764, VSUM, VRR_c, V, 0, 0, 0, 0, vsum, 0, IF_VEC) ++/* VECTOR TEST UNDER MASK */ ++ F(0xe7d8, VTM, VRR_a, V, 0, 0, 0, 0, vtm, 0, IF_VEC) ++ ++/* === Vector String Instructions === */ ++ ++/* VECTOR FIND ANY ELEMENT EQUAL */ ++ F(0xe782, VFAE, VRR_b, V, 0, 0, 0, 0, vfae, 0, IF_VEC) ++/* VECTOR FIND ELEMENT EQUAL */ ++ F(0xe780, VFEE, VRR_b, V, 0, 0, 0, 0, vfee, 0, IF_VEC) ++/* VECTOR FIND ELEMENT NOT EQUAL */ ++ F(0xe781, VFENE, VRR_b, V, 0, 0, 0, 0, vfene, 0, IF_VEC) ++/* VECTOR ISOLATE STRING */ ++ F(0xe75c, VISTR, VRR_a, V, 0, 0, 0, 0, vistr, 0, IF_VEC) ++/* VECTOR STRING RANGE COMPARE */ ++ F(0xe78a, VSTRC, VRR_d, V, 0, 0, 0, 0, vstrc, 0, IF_VEC) ++ ++/* === Vector Floating-Point Instructions */ ++ ++/* VECTOR FP ADD */ ++ F(0xe7e3, VFA, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) ++/* VECTOR FP COMPARE SCALAR */ ++ F(0xe7cb, WFC, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) ++/* VECTOR FP COMPARE AND SIGNAL SCALAR */ ++ F(0xe7ca, WFK, VRR_a, V, 0, 0, 0, 0, wfc, 0, IF_VEC) ++/* VECTOR FP COMPARE EQUAL */ ++ F(0xe7e8, VFCE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) ++/* VECTOR FP COMPARE HIGH */ ++ F(0xe7eb, VFCH, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) ++/* VECTOR FP COMPARE HIGH OR EQUAL */ ++ F(0xe7ea, VFCHE, VRR_c, V, 0, 0, 0, 0, vfc, 0, IF_VEC) ++/* VECTOR FP CONVERT FROM FIXED 64-BIT */ ++ F(0xe7c3, VCDG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP CONVERT FROM LOGICAL 64-BIT */ ++ F(0xe7c1, VCDLG, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP CONVERT TO FIXED 64-BIT */ ++ F(0xe7c2, VCGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP CONVERT TO LOGICAL 64-BIT */ ++ F(0xe7c0, VCLGD, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP DIVIDE */ ++ F(0xe7e5, VFD, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) ++/* VECTOR LOAD FP INTEGER */ ++ F(0xe7c7, VFI, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP LOAD LENGTHENED */ ++ F(0xe7c4, VFLL, VRR_a, V, 0, 0, 0, 0, vfll, 0, IF_VEC) ++/* VECTOR FP LOAD ROUNDED */ ++ F(0xe7c5, VFLR, VRR_a, V, 0, 0, 0, 0, vcdg, 0, IF_VEC) ++/* VECTOR FP MAXIMUM */ ++ F(0xe7ef, VFMAX, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) ++/* VECTOR FP MINIMUM */ ++ F(0xe7ee, VFMIN, VRR_c, VE, 0, 0, 0, 0, vfmax, 0, IF_VEC) ++/* VECTOR FP MULTIPLY */ ++ F(0xe7e7, VFM, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) ++/* VECTOR FP MULTIPLY AND ADD */ ++ F(0xe78f, VFMA, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) ++/* VECTOR FP MULTIPLY AND SUBTRACT */ ++ F(0xe78e, VFMS, VRR_e, V, 0, 0, 0, 0, vfma, 0, IF_VEC) ++/* VECTOR FP NEGATIVE MULTIPLY AND ADD */ ++ F(0xe79f, VFNMA, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) ++/* VECTOR FP NEGATIVE MULTIPLY AND SUBTRACT */ ++ F(0xe79e, VFNMS, VRR_e, VE, 0, 0, 0, 0, vfma, 0, IF_VEC) ++/* VECTOR FP PERFORM SIGN OPERATION */ ++ F(0xe7cc, VFPSO, VRR_a, V, 0, 0, 0, 0, vfpso, 0, IF_VEC) ++/* VECTOR FP SQUARE ROOT */ ++ F(0xe7ce, VFSQ, VRR_a, V, 0, 0, 0, 0, vfsq, 0, IF_VEC) ++/* VECTOR FP SUBTRACT */ ++ F(0xe7e2, VFS, VRR_c, V, 0, 0, 0, 0, vfa, 0, IF_VEC) ++/* VECTOR FP TEST DATA CLASS IMMEDIATE */ ++ F(0xe74a, VFTCI, VRI_e, V, 0, 0, 0, 0, vftci, 0, IF_VEC) ++ ++#ifndef CONFIG_USER_ONLY ++/* COMPARE AND SWAP AND PURGE */ ++ E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) ++ E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) ++/* DIAGNOSE (KVM hypercall) */ ++ F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO) ++/* INSERT STORAGE KEY EXTENDED */ ++ F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV) ++/* INVALIDATE DAT TABLE ENTRY */ ++ F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV) ++/* INVALIDATE PAGE TABLE ENTRY */ ++ F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV) ++/* LOAD CONTROL */ ++ F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV) ++ F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV) ++/* LOAD PROGRAM PARAMETER */ ++ F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV) ++/* LOAD PSW */ ++ F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV) ++/* LOAD PSW EXTENDED */ ++ F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV) ++/* LOAD REAL ADDRESS */ ++ F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) ++ F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV) ++ F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) ++/* LOAD USING REAL ADDRESS */ ++ E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV) ++ E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV) ++/* MOVE TO PRIMARY */ ++ F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) ++/* MOVE TO SECONDARY */ ++ F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) ++/* PURGE TLB */ ++ F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) ++/* RESET REFERENCE BIT EXTENDED */ ++ F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV) ++/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ ++ F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV | IF_IO) ++/* SET ADDRESS SPACE CONTROL FAST */ ++ F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) ++/* SET CLOCK */ ++ F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV | IF_IO) ++/* SET CLOCK COMPARATOR */ ++ F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV | IF_IO) ++/* SET CLOCK PROGRAMMABLE FIELD */ ++ F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV) ++/* SET CPU TIMER */ ++ F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV | IF_IO) ++/* SET PREFIX */ ++ F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV) ++/* SET PSW KEY FROM ADDRESS */ ++ F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV) ++/* SET STORAGE KEY EXTENDED */ ++ F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV) ++/* SET SYSTEM MASK */ ++ F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV) ++/* SIGNAL PROCESSOR */ ++ F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV | IF_IO) ++/* STORE CLOCK COMPARATOR */ ++ F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV) ++/* STORE CONTROL */ ++ F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV) ++ F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV) ++/* STORE CPU ADDRESS */ ++ F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV) ++/* STORE CPU ID */ ++ F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV) ++/* STORE CPU TIMER */ ++ F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV | IF_IO) ++/* STORE FACILITY LIST */ ++ F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV) ++/* STORE PREFIX */ ++ F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV) ++/* STORE SYSTEM INFORMATION */ ++ F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV) ++/* STORE THEN AND SYSTEM MASK */ ++ F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) ++/* STORE THEN OR SYSTEM MASK */ ++ F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) ++/* STORE USING REAL ADDRESS */ ++ E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV) ++ E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV) ++/* TEST BLOCK */ ++ F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) ++/* TEST PROTECTION */ ++ C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0) ++ ++/* CCW I/O Instructions */ ++ F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV | IF_IO) ++ F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV | IF_IO) ++ F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV | IF_IO) ++ F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV | IF_IO) ++ F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV | IF_IO) ++ F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV | IF_IO) ++ F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV | IF_IO) ++ F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV | IF_IO) ++ F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV | IF_IO) ++ F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV | IF_IO) ++ F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV | IF_IO) ++ F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV | IF_IO) ++ F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV | IF_IO) ++ F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV | IF_IO) ++ F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV | IF_IO) ++ /* ??? Not listed in PoO ninth edition, but there's a linux driver that ++ uses it: "A CHSC subchannel is usually present on LPAR only." */ ++ F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV | IF_IO) ++ ++/* zPCI Instructions */ ++ /* None of these instructions are documented in the PoP, so this is all ++ based upon target/s390x/kvm.c and Linux code and likely incomplete */ ++ F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV | IF_IO) ++ F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV | IF_IO) ++ F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV | IF_IO) ++ F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV | IF_IO) ++ F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV | IF_IO) ++ F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV | IF_IO) ++ F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV | IF_IO) ++ F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV | IF_IO) ++ ++#endif /* CONFIG_USER_ONLY */ +diff --git a/target/s390x/tcg/insn-format.def b/target/s390x/tcg/insn-format.def +new file mode 100644 +index 0000000000..6253edbd19 +--- /dev/null ++++ b/target/s390x/tcg/insn-format.def +@@ -0,0 +1,81 @@ ++/* Description of s390 insn formats. */ ++/* NAME F1, F2... */ ++F0(E) ++F1(I, I(1, 8, 8)) ++F2(RI_a, R(1, 8), I(2,16,16)) ++F2(RI_b, R(1, 8), I(2,16,16)) ++F2(RI_c, M(1, 8), I(2,16,16)) ++F3(RIE_a, R(1, 8), I(2,16,16), M(3,32)) ++F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16)) ++F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16)) ++F3(RIE_d, R(1, 8), I(2,16,16), R(3,12)) ++F3(RIE_e, R(1, 8), I(2,16,16), R(3,12)) ++F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8)) ++F3(RIE_g, R(1, 8), I(2,16,16), M(3,12)) ++F2(RIL_a, R(1, 8), I(2,16,32)) ++F2(RIL_b, R(1, 8), I(2,16,32)) ++F2(RIL_c, M(1, 8), I(2,16,32)) ++F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20)) ++/* ??? The PoO does not call out subtypes _a and _b for RR, as it does ++ for e.g. RX. Our checking requires this for e.g. BCR. */ ++F2(RR_a, R(1, 8), R(2,12)) ++F2(RR_b, M(1, 8), R(2,12)) ++F2(RRE, R(1,24), R(2,28)) ++F3(RRD, R(1,16), R(2,28), R(3,24)) ++F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20)) ++F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20)) ++F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20)) ++F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20)) ++F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20)) ++F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20)) ++F3(RS_a, R(1, 8), BD(2,16,20), R(3,12)) ++F3(RS_b, R(1, 8), BD(2,16,20), M(3,12)) ++F3(RSI, R(1, 8), I(2,16,16), R(3,12)) ++F2(RSL, L(1, 8, 4), BD(1,16,20)) ++F3(RSY_a, R(1, 8), BDL(2), R(3,12)) ++F3(RSY_b, R(1, 8), BDL(2), M(3,12)) ++F2(RX_a, R(1, 8), BXD(2)) ++F2(RX_b, M(1, 8), BXD(2)) ++F3(RXE, R(1, 8), BXD(2), M(3,32)) ++F3(RXF, R(1,32), BXD(2), R(3, 8)) ++F2(RXY_a, R(1, 8), BXDL(2)) ++F2(RXY_b, M(1, 8), BXDL(2)) ++F1(S, BD(2,16,20)) ++F2(SI, BD(1,16,20), I(2,8,8)) ++F2(SIL, BD(1,16,20), I(2,32,16)) ++F2(SIY, BDL(1), I(2, 8, 8)) ++F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36)) ++F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36)) ++F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4)) ++/* ??? Odd man out. The L1 field here is really a register, but the ++ easy way to compress the fields has R1 and B1 overlap. */ ++F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12)) ++F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36)) ++F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36)) ++F2(SSE, BD(1,16,20), BD(2,32,36)) ++F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8)) ++F3(VRI_a, V(1,8), I(2,16,16), M(3,32)) ++F4(VRI_b, V(1,8), I(2,16,8), I(3,24,8), M(4,32)) ++F4(VRI_c, V(1,8), V(3,12), I(2,16,16), M(4,32)) ++F5(VRI_d, V(1,8), V(2,12), V(3,16), I(4,24,8), M(5,32)) ++F5(VRI_e, V(1,8), V(2,12), I(3,16,12), M(5,28), M(4,32)) ++F5(VRI_f, V(1,8), V(2,12), V(3,16), M(5,24), I(4,28,8)) ++F5(VRI_g, V(1,8), V(2,12), I(4,16,8), M(5,24), I(3,28,8)) ++F3(VRI_h, V(1,8), I(2,16,16), I(3,32,4)) ++F4(VRI_i, V(1,8), R(2,12), M(4,24), I(3,28,8)) ++F5(VRR_a, V(1,8), V(2,12), M(5,24), M(4,28), M(3,32)) ++F5(VRR_b, V(1,8), V(2,12), V(3,16), M(5,24), M(4,32)) ++F6(VRR_c, V(1,8), V(2,12), V(3,16), M(6,24), M(5,28), M(4,32)) ++F6(VRR_d, V(1,8), V(2,12), V(3,16), M(5,20), M(6,24), V(4,32)) ++F6(VRR_e, V(1,8), V(2,12), V(3,16), M(6,20), M(5,28), V(4,32)) ++F3(VRR_f, V(1,8), R(2,12), R(3,16)) ++F1(VRR_g, V(1,12)) ++F3(VRR_h, V(1,12), V(2,16), M(3,24)) ++F3(VRR_i, R(1,8), V(2,12), M(3,24)) ++F4(VRS_a, V(1,8), V(3,12), BD(2,16,20), M(4,32)) ++F4(VRS_b, V(1,8), R(3,12), BD(2,16,20), M(4,32)) ++F4(VRS_c, R(1,8), V(3,12), BD(2,16,20), M(4,32)) ++F3(VRS_d, R(3,12), BD(2,16,20), V(1,32)) ++F4(VRV, V(1,8), V(2,12), BD(2,16,20), M(3,32)) ++F3(VRX, V(1,8), BXD(2), M(3,32)) ++F3(VSI, I(3,8,8), BD(2,16,20), V(1,32)) +diff --git a/target/s390x/tcg/int_helper.c b/target/s390x/tcg/int_helper.c +new file mode 100644 +index 0000000000..954542388a +--- /dev/null ++++ b/target/s390x/tcg/int_helper.c +@@ -0,0 +1,148 @@ ++/* ++ * S/390 integer helper routines ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2009 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "tcg_s390x.h" ++#include "exec/exec-all.h" ++#include "qemu/host-utils.h" ++#include "exec/helper-proto.h" ++ ++/* #define DEBUG_HELPER */ ++#ifdef DEBUG_HELPER ++#define HELPER_LOG(x...) qemu_log(x) ++#else ++#define HELPER_LOG(x...) ++#endif ++ ++/* 64/32 -> 32 signed division */ ++int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64) ++{ ++ int32_t ret, b = b64; ++ int64_t q; ++ ++ if (b == 0) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ ++ ret = q = a / b; ++ env->retxl = a % b; ++ ++ /* Catch non-representable quotient. */ ++ if (ret != q) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ ++ return ret; ++} ++ ++/* 64/32 -> 32 unsigned division */ ++uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64) ++{ ++ uint32_t ret, b = b64; ++ uint64_t q; ++ ++ if (b == 0) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ ++ ret = q = a / b; ++ env->retxl = a % b; ++ ++ /* Catch non-representable quotient. */ ++ if (ret != q) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ ++ return ret; ++} ++ ++/* 64/64 -> 64 signed division */ ++int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b) ++{ ++ /* Catch divide by zero, and non-representable quotient (MIN / -1). */ ++ if (b == 0 || (b == -1 && a == (1ll << 63))) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ env->retxl = a % b; ++ return a / b; ++} ++ ++/* 128 -> 64/64 unsigned division */ ++uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al, ++ uint64_t b) ++{ ++ uint64_t ret; ++ /* Signal divide by zero. */ ++ if (b == 0) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++ if (ah == 0) { ++ /* 64 -> 64/64 case */ ++ env->retxl = al % b; ++ ret = al / b; ++ } else { ++ /* ??? Move i386 idivq helper to host-utils. */ ++#ifdef CONFIG_INT128 ++ __uint128_t a = ((__uint128_t)ah << 64) | al; ++ __uint128_t q = a / b; ++ env->retxl = a % b; ++ ret = q; ++ if (ret != q) { ++ tcg_s390_program_interrupt(env, PGM_FIXPT_DIVIDE, GETPC()); ++ } ++#else ++ /* 32-bit hosts would need special wrapper functionality - just abort if ++ we encounter such a case; it's very unlikely anyways. */ ++ cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n"); ++#endif ++ } ++ return ret; ++} ++ ++uint64_t HELPER(cvd)(int32_t reg) ++{ ++ /* positive 0 */ ++ uint64_t dec = 0x0c; ++ int64_t bin = reg; ++ int shift; ++ ++ if (bin < 0) { ++ bin = -bin; ++ dec = 0x0d; ++ } ++ ++ for (shift = 4; (shift < 64) && bin; shift += 4) { ++ dec |= (bin % 10) << shift; ++ bin /= 10; ++ } ++ ++ return dec; ++} ++ ++uint64_t HELPER(popcnt)(uint64_t val) ++{ ++ /* Note that we don't fold past bytes. */ ++ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); ++ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); ++ val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL; ++ return val; ++} +diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c +new file mode 100644 +index 0000000000..9bae13ecf0 +--- /dev/null ++++ b/target/s390x/tcg/mem_helper.c +@@ -0,0 +1,3008 @@ ++/* ++ * S/390 memory access helper routines ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2009 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "tcg_s390x.h" ++#include "exec/helper-proto.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++#include "qemu/int128.h" ++#include "qemu/atomic128.h" ++#include "tcg/tcg.h" ++ ++#if !defined(CONFIG_USER_ONLY) ++#include "hw/s390x/storage-keys.h" ++#include "hw/boards.h" ++#endif ++ ++/*****************************************************************************/ ++/* Softmmu support */ ++ ++/* #define DEBUG_HELPER */ ++#ifdef DEBUG_HELPER ++#define HELPER_LOG(x...) qemu_log(x) ++#else ++#define HELPER_LOG(x...) ++#endif ++ ++static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key) ++{ ++ uint16_t pkm = env->cregs[3] >> 16; ++ ++ if (env->psw.mask & PSW_MASK_PSTATE) { ++ /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */ ++ return pkm & (0x80 >> psw_key); ++ } ++ return true; ++} ++ ++static bool is_destructive_overlap(CPUS390XState *env, uint64_t dest, ++ uint64_t src, uint32_t len) ++{ ++ if (!len || src == dest) { ++ return false; ++ } ++ /* Take care of wrapping at the end of address space. */ ++ if (unlikely(wrap_address(env, src + len - 1) < src)) { ++ return dest > src || dest <= wrap_address(env, src + len - 1); ++ } ++ return dest > src && dest <= src + len - 1; ++} ++ ++/* Trigger a SPECIFICATION exception if an address or a length is not ++ naturally aligned. */ ++static inline void check_alignment(CPUS390XState *env, uint64_t v, ++ int wordsize, uintptr_t ra) ++{ ++ if (v % wordsize) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++} ++ ++/* Load a value from memory according to its size. */ ++static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr, ++ int wordsize, uintptr_t ra) ++{ ++ switch (wordsize) { ++ case 1: ++ return cpu_ldub_data_ra(env, addr, ra); ++ case 2: ++ return cpu_lduw_data_ra(env, addr, ra); ++ default: ++ abort(); ++ } ++} ++ ++/* Store a to memory according to its size. */ ++static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr, ++ uint64_t value, int wordsize, ++ uintptr_t ra) ++{ ++ switch (wordsize) { ++ case 1: ++ cpu_stb_data_ra(env, addr, value, ra); ++ break; ++ case 2: ++ cpu_stw_data_ra(env, addr, value, ra); ++ break; ++ default: ++ abort(); ++ } ++} ++ ++/* An access covers at most 4096 bytes and therefore at most two pages. */ ++typedef struct S390Access { ++ target_ulong vaddr1; ++ target_ulong vaddr2; ++ char *haddr1; ++ char *haddr2; ++ uint16_t size1; ++ uint16_t size2; ++ /* ++ * If we can't access the host page directly, we'll have to do I/O access ++ * via ld/st helpers. These are internal details, so we store the ++ * mmu idx to do the access here instead of passing it around in the ++ * helpers. Maybe, one day we can get rid of ld/st access - once we can ++ * handle TLB_NOTDIRTY differently. We don't expect these special accesses ++ * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP ++ * pages, we might trigger a new MMU translation - very unlikely that ++ * the mapping changes in between and we would trigger a fault. ++ */ ++ int mmu_idx; ++} S390Access; ++ ++/* ++ * With nonfault=1, return the PGM_ exception that would have been injected ++ * into the guest; return 0 if no exception was detected. ++ * ++ * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec. ++ * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr. ++ */ ++static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool nonfault, void **phost, uintptr_t ra) ++{ ++ int flags; ++ ++#if defined(CONFIG_USER_ONLY) ++ flags = page_get_flags(addr); ++ if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE_ORG))) { ++ env->__excp_addr = addr; ++ flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING; ++ if (nonfault) { ++ return flags; ++ } ++ tcg_s390_program_interrupt(env, flags, ra); ++ } ++ *phost = g2h(env_cpu(env), addr); ++#else ++ /* ++ * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL ++ * to detect if there was an exception during tlb_fill(). ++ */ ++ env->tlb_fill_exc = 0; ++ flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost, ++ ra); ++ if (env->tlb_fill_exc) { ++ return env->tlb_fill_exc; ++ } ++ ++ if (unlikely(flags & TLB_WATCHPOINT)) { ++ /* S390 does not presently use transaction attributes. */ ++ cpu_check_watchpoint(env_cpu(env), addr, size, ++ MEMTXATTRS_UNSPECIFIED, ++ (access_type == MMU_DATA_STORE ++ ? BP_MEM_WRITE : BP_MEM_READ), ra); ++ } ++#endif ++ return 0; ++} ++ ++static int access_prepare_nf(S390Access *access, CPUS390XState *env, ++ bool nonfault, vaddr vaddr1, int size, ++ MMUAccessType access_type, ++ int mmu_idx, uintptr_t ra) ++{ ++ void *haddr1, *haddr2 = NULL; ++ int size1, size2, exc; ++ vaddr vaddr2 = 0; ++ ++ assert(size > 0 && size <= 4096); ++ ++ size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)), ++ size2 = size - size1; ++ ++ exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault, ++ &haddr1, ra); ++ if (exc) { ++ return exc; ++ } ++ if (unlikely(size2)) { ++ /* The access crosses page boundaries. */ ++ vaddr2 = wrap_address(env, vaddr1 + size1); ++ exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx, ++ nonfault, &haddr2, ra); ++ if (exc) { ++ return exc; ++ } ++ } ++ ++ *access = (S390Access) { ++ .vaddr1 = vaddr1, ++ .vaddr2 = vaddr2, ++ .haddr1 = haddr1, ++ .haddr2 = haddr2, ++ .size1 = size1, ++ .size2 = size2, ++ .mmu_idx = mmu_idx ++ }; ++ return 0; ++} ++ ++static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size, ++ MMUAccessType access_type, int mmu_idx, ++ uintptr_t ra) ++{ ++ S390Access ret; ++ int exc = access_prepare_nf(&ret, env, false, vaddr, size, ++ access_type, mmu_idx, ra); ++ assert(!exc); ++ return ret; ++} ++ ++/* Helper to handle memset on a single page. */ ++static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, ++ uint8_t byte, uint16_t size, int mmu_idx, ++ uintptr_t ra) ++{ ++#ifdef CONFIG_USER_ONLY ++ g_assert(haddr); ++ memset(haddr, byte, size); ++#else ++ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); ++ int i; ++ ++ if (likely(haddr)) { ++ memset(haddr, byte, size); ++ } else { ++ /* ++ * Do a single access and test if we can then get access to the ++ * page. This is especially relevant to speed up TLB_NOTDIRTY. ++ */ ++ g_assert(size > 0); ++ helper_ret_stb_mmu(env, vaddr, byte, oi, ra); ++ haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); ++ if (likely(haddr)) { ++ memset(haddr + 1, byte, size - 1); ++ } else { ++ for (i = 1; i < size; i++) { ++ helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); ++ } ++ } ++ } ++#endif ++} ++ ++static void access_memset(CPUS390XState *env, S390Access *desta, ++ uint8_t byte, uintptr_t ra) ++{ ++ ++ do_access_memset(env, desta->vaddr1, desta->haddr1, byte, desta->size1, ++ desta->mmu_idx, ra); ++ if (likely(!desta->size2)) { ++ return; ++ } ++ do_access_memset(env, desta->vaddr2, desta->haddr2, byte, desta->size2, ++ desta->mmu_idx, ra); ++} ++ ++static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, ++ int offset, int mmu_idx, uintptr_t ra) ++{ ++#ifdef CONFIG_USER_ONLY ++ return ldub_p(*haddr + offset); ++#else ++ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); ++ uint8_t byte; ++ ++ if (likely(*haddr)) { ++ return ldub_p(*haddr + offset); ++ } ++ /* ++ * Do a single access and test if we can then get access to the ++ * page. This is especially relevant to speed up TLB_NOTDIRTY. ++ */ ++ byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); ++ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); ++ return byte; ++#endif ++} ++ ++static uint8_t access_get_byte(CPUS390XState *env, S390Access *access, ++ int offset, uintptr_t ra) ++{ ++ if (offset < access->size1) { ++ return do_access_get_byte(env, access->vaddr1, &access->haddr1, ++ offset, access->mmu_idx, ra); ++ } ++ return do_access_get_byte(env, access->vaddr2, &access->haddr2, ++ offset - access->size1, access->mmu_idx, ra); ++} ++ ++static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, ++ int offset, uint8_t byte, int mmu_idx, ++ uintptr_t ra) ++{ ++#ifdef CONFIG_USER_ONLY ++ stb_p(*haddr + offset, byte); ++#else ++ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); ++ ++ if (likely(*haddr)) { ++ stb_p(*haddr + offset, byte); ++ return; ++ } ++ /* ++ * Do a single access and test if we can then get access to the ++ * page. This is especially relevant to speed up TLB_NOTDIRTY. ++ */ ++ helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); ++ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); ++#endif ++} ++ ++static void access_set_byte(CPUS390XState *env, S390Access *access, ++ int offset, uint8_t byte, uintptr_t ra) ++{ ++ if (offset < access->size1) { ++ do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte, ++ access->mmu_idx, ra); ++ } else { ++ do_access_set_byte(env, access->vaddr2, &access->haddr2, ++ offset - access->size1, byte, access->mmu_idx, ra); ++ } ++} ++ ++/* ++ * Move data with the same semantics as memmove() in case ranges don't overlap ++ * or src > dest. Undefined behavior on destructive overlaps. ++ */ ++static void access_memmove(CPUS390XState *env, S390Access *desta, ++ S390Access *srca, uintptr_t ra) ++{ ++ int diff; ++ ++ g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2); ++ ++ /* Fallback to slow access in case we don't have access to all host pages */ ++ if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) || ++ !srca->haddr1 || (srca->size2 && !srca->haddr2))) { ++ int i; ++ ++ for (i = 0; i < desta->size1 + desta->size2; i++) { ++ uint8_t byte = access_get_byte(env, srca, i, ra); ++ ++ access_set_byte(env, desta, i, byte, ra); ++ } ++ return; ++ } ++ ++ if (srca->size1 == desta->size1) { ++ memmove(desta->haddr1, srca->haddr1, srca->size1); ++ if (unlikely(srca->size2)) { ++ memmove(desta->haddr2, srca->haddr2, srca->size2); ++ } ++ } else if (srca->size1 < desta->size1) { ++ diff = desta->size1 - srca->size1; ++ memmove(desta->haddr1, srca->haddr1, srca->size1); ++ memmove(desta->haddr1 + srca->size1, srca->haddr2, diff); ++ if (likely(desta->size2)) { ++ memmove(desta->haddr2, srca->haddr2 + diff, desta->size2); ++ } ++ } else { ++ diff = srca->size1 - desta->size1; ++ memmove(desta->haddr1, srca->haddr1, desta->size1); ++ memmove(desta->haddr2, srca->haddr1 + desta->size1, diff); ++ if (likely(srca->size2)) { ++ memmove(desta->haddr2 + diff, srca->haddr2, srca->size2); ++ } ++ } ++} ++ ++static int mmu_idx_from_as(uint8_t as) ++{ ++ switch (as) { ++ case AS_PRIMARY: ++ return MMU_PRIMARY_IDX; ++ case AS_SECONDARY: ++ return MMU_SECONDARY_IDX; ++ case AS_HOME: ++ return MMU_HOME_IDX; ++ default: ++ /* FIXME AS_ACCREG */ ++ g_assert_not_reached(); ++ } ++} ++ ++/* and on array */ ++static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src, uintptr_t ra) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca1, srca2, desta; ++ uint32_t i; ++ uint8_t c = 0; ++ ++ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", ++ __func__, l, dest, src); ++ ++ /* NC always processes one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < l; i++) { ++ const uint8_t x = access_get_byte(env, &srca1, i, ra) & ++ access_get_byte(env, &srca2, i, ra); ++ ++ c |= x; ++ access_set_byte(env, &desta, i, x, ra); ++ } ++ return c != 0; ++} ++ ++uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src) ++{ ++ return do_helper_nc(env, l, dest, src, GETPC()); ++} ++ ++/* xor on array */ ++static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src, uintptr_t ra) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca1, srca2, desta; ++ uint32_t i; ++ uint8_t c = 0; ++ ++ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", ++ __func__, l, dest, src); ++ ++ /* XC always processes one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ ++ /* xor with itself is the same as memset(0) */ ++ if (src == dest) { ++ access_memset(env, &desta, 0, ra); ++ return 0; ++ } ++ ++ for (i = 0; i < l; i++) { ++ const uint8_t x = access_get_byte(env, &srca1, i, ra) ^ ++ access_get_byte(env, &srca2, i, ra); ++ ++ c |= x; ++ access_set_byte(env, &desta, i, x, ra); ++ } ++ return c != 0; ++} ++ ++uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src) ++{ ++ return do_helper_xc(env, l, dest, src, GETPC()); ++} ++ ++/* or on array */ ++static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src, uintptr_t ra) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca1, srca2, desta; ++ uint32_t i; ++ uint8_t c = 0; ++ ++ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", ++ __func__, l, dest, src); ++ ++ /* OC always processes one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < l; i++) { ++ const uint8_t x = access_get_byte(env, &srca1, i, ra) | ++ access_get_byte(env, &srca2, i, ra); ++ ++ c |= x; ++ access_set_byte(env, &desta, i, x, ra); ++ } ++ return c != 0; ++} ++ ++uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src) ++{ ++ return do_helper_oc(env, l, dest, src, GETPC()); ++} ++ ++/* memmove */ ++static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest, ++ uint64_t src, uintptr_t ra) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca, desta; ++ uint32_t i; ++ ++ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n", ++ __func__, l, dest, src); ++ ++ /* MVC always copies one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ ++ /* ++ * "When the operands overlap, the result is obtained as if the operands ++ * were processed one byte at a time". Only non-destructive overlaps ++ * behave like memmove(). ++ */ ++ if (dest == src + 1) { ++ access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra); ++ } else if (!is_destructive_overlap(env, dest, src, l)) { ++ access_memmove(env, &desta, &srca, ra); ++ } else { ++ for (i = 0; i < l; i++) { ++ uint8_t byte = access_get_byte(env, &srca, i, ra); ++ ++ access_set_byte(env, &desta, i, byte, ra); ++ } ++ } ++ ++ return env->cc_op; ++} ++ ++void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) ++{ ++ do_helper_mvc(env, l, dest, src, GETPC()); ++} ++ ++/* move inverse */ ++void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca, desta; ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ /* MVCIN always copies one more byte than specified - maximum is 256 */ ++ l++; ++ ++ src = wrap_address(env, src - l + 1); ++ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < l; i++) { ++ const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra); ++ ++ access_set_byte(env, &desta, i, x, ra); ++ } ++} ++ ++/* move numerics */ ++void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca1, srca2, desta; ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ /* MVN always copies one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < l; i++) { ++ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) | ++ (access_get_byte(env, &srca2, i, ra) & 0xf0); ++ ++ access_set_byte(env, &desta, i, x, ra); ++ } ++} ++ ++/* move with offset */ ++void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ /* MVO always processes one more byte than specified - maximum is 16 */ ++ const int len_dest = (l >> 4) + 1; ++ const int len_src = (l & 0xf) + 1; ++ uintptr_t ra = GETPC(); ++ uint8_t byte_dest, byte_src; ++ S390Access srca, desta; ++ int i, j; ++ ++ srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra); ++ ++ /* Handle rightmost byte */ ++ byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra); ++ byte_src = access_get_byte(env, &srca, len_src - 1, ra); ++ byte_dest = (byte_dest & 0x0f) | (byte_src << 4); ++ access_set_byte(env, &desta, len_dest - 1, byte_dest, ra); ++ ++ /* Process remaining bytes from right to left */ ++ for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) { ++ byte_dest = byte_src >> 4; ++ if (j >= 0) { ++ byte_src = access_get_byte(env, &srca, j, ra); ++ } else { ++ byte_src = 0; ++ } ++ byte_dest |= byte_src << 4; ++ access_set_byte(env, &desta, i, byte_dest, ra); ++ } ++} ++ ++/* move zones */ ++void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ S390Access srca1, srca2, desta; ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ /* MVZ always copies one more byte than specified - maximum is 256 */ ++ l++; ++ ++ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra); ++ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < l; i++) { ++ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) | ++ (access_get_byte(env, &srca2, i, ra) & 0x0f); ++ ++ access_set_byte(env, &desta, i, x, ra); ++ } ++} ++ ++/* compare unsigned byte arrays */ ++static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1, ++ uint64_t s2, uintptr_t ra) ++{ ++ uint32_t i; ++ uint32_t cc = 0; ++ ++ HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n", ++ __func__, l, s1, s2); ++ ++ for (i = 0; i <= l; i++) { ++ uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra); ++ uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra); ++ HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y); ++ if (x < y) { ++ cc = 1; ++ break; ++ } else if (x > y) { ++ cc = 2; ++ break; ++ } ++ } ++ ++ HELPER_LOG("\n"); ++ return cc; ++} ++ ++uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2) ++{ ++ return do_helper_clc(env, l, s1, s2, GETPC()); ++} ++ ++/* compare logical under mask */ ++uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask, ++ uint64_t addr) ++{ ++ uintptr_t ra = GETPC(); ++ uint32_t cc = 0; ++ ++ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1, ++ mask, addr); ++ ++ while (mask) { ++ if (mask & 8) { ++ uint8_t d = cpu_ldub_data_ra(env, addr, ra); ++ uint8_t r = extract32(r1, 24, 8); ++ HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d, ++ addr); ++ if (r < d) { ++ cc = 1; ++ break; ++ } else if (r > d) { ++ cc = 2; ++ break; ++ } ++ addr++; ++ } ++ mask = (mask << 1) & 0xf; ++ r1 <<= 8; ++ } ++ ++ HELPER_LOG("\n"); ++ return cc; ++} ++ ++static inline uint64_t get_address(CPUS390XState *env, int reg) ++{ ++ return wrap_address(env, env->regs[reg]); ++} ++ ++/* ++ * Store the address to the given register, zeroing out unused leftmost ++ * bits in bit positions 32-63 (24-bit and 31-bit mode only). ++ */ ++static inline void set_address_zero(CPUS390XState *env, int reg, ++ uint64_t address) ++{ ++ if (env->psw.mask & PSW_MASK_64) { ++ env->regs[reg] = address; ++ } else { ++ if (!(env->psw.mask & PSW_MASK_32)) { ++ address &= 0x00ffffff; ++ } else { ++ address &= 0x7fffffff; ++ } ++ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); ++ } ++} ++ ++static inline void set_address(CPUS390XState *env, int reg, uint64_t address) ++{ ++ if (env->psw.mask & PSW_MASK_64) { ++ /* 64-Bit mode */ ++ env->regs[reg] = address; ++ } else { ++ if (!(env->psw.mask & PSW_MASK_32)) { ++ /* 24-Bit mode. According to the PoO it is implementation ++ dependent if bits 32-39 remain unchanged or are set to ++ zeros. Choose the former so that the function can also be ++ used for TRT. */ ++ env->regs[reg] = deposit64(env->regs[reg], 0, 24, address); ++ } else { ++ /* 31-Bit mode. According to the PoO it is implementation ++ dependent if bit 32 remains unchanged or is set to zero. ++ Choose the latter so that the function can also be used for ++ TRT. */ ++ address &= 0x7fffffff; ++ env->regs[reg] = deposit64(env->regs[reg], 0, 32, address); ++ } ++ } ++} ++ ++static inline uint64_t wrap_length32(CPUS390XState *env, uint64_t length) ++{ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ return (uint32_t)length; ++ } ++ return length; ++} ++ ++static inline uint64_t wrap_length31(CPUS390XState *env, uint64_t length) ++{ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ /* 24-Bit and 31-Bit mode */ ++ length &= 0x7fffffff; ++ } ++ return length; ++} ++ ++static inline uint64_t get_length(CPUS390XState *env, int reg) ++{ ++ return wrap_length31(env, env->regs[reg]); ++} ++ ++static inline void set_length(CPUS390XState *env, int reg, uint64_t length) ++{ ++ if (env->psw.mask & PSW_MASK_64) { ++ /* 64-Bit mode */ ++ env->regs[reg] = length; ++ } else { ++ /* 24-Bit and 31-Bit mode */ ++ env->regs[reg] = deposit64(env->regs[reg], 0, 32, length); ++ } ++} ++ ++/* search string (c is byte to search, r2 is string, r1 end of string) */ ++void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t end, str; ++ uint32_t len; ++ uint8_t v, c = env->regs[0]; ++ ++ /* Bits 32-55 must contain all 0. */ ++ if (env->regs[0] & 0xffffff00u) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ str = get_address(env, r2); ++ end = get_address(env, r1); ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 8k. */ ++ for (len = 0; len < 0x2000; ++len) { ++ if (str + len == end) { ++ /* Character not found. R1 & R2 are unmodified. */ ++ env->cc_op = 2; ++ return; ++ } ++ v = cpu_ldub_data_ra(env, str + len, ra); ++ if (v == c) { ++ /* Character found. Set R1 to the location; R2 is unmodified. */ ++ env->cc_op = 1; ++ set_address(env, r1, str + len); ++ return; ++ } ++ } ++ ++ /* CPU-determined bytes processed. Advance R2 to next byte to process. */ ++ env->cc_op = 3; ++ set_address(env, r2, str + len); ++} ++ ++void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ uintptr_t ra = GETPC(); ++ uint32_t len; ++ uint16_t v, c = env->regs[0]; ++ uint64_t end, str, adj_end; ++ ++ /* Bits 32-47 of R0 must be zero. */ ++ if (env->regs[0] & 0xffff0000u) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ str = get_address(env, r2); ++ end = get_address(env, r1); ++ ++ /* If the LSB of the two addresses differ, use one extra byte. */ ++ adj_end = end + ((str ^ end) & 1); ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 8k. */ ++ for (len = 0; len < 0x2000; len += 2) { ++ if (str + len == adj_end) { ++ /* End of input found. */ ++ env->cc_op = 2; ++ return; ++ } ++ v = cpu_lduw_data_ra(env, str + len, ra); ++ if (v == c) { ++ /* Character found. Set R1 to the location; R2 is unmodified. */ ++ env->cc_op = 1; ++ set_address(env, r1, str + len); ++ return; ++ } ++ } ++ ++ /* CPU-determined bytes processed. Advance R2 to next byte to process. */ ++ env->cc_op = 3; ++ set_address(env, r2, str + len); ++} ++ ++/* unsigned string compare (c is string terminator) */ ++uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2) ++{ ++ uintptr_t ra = GETPC(); ++ uint32_t len; ++ ++ c = c & 0xff; ++ s1 = wrap_address(env, s1); ++ s2 = wrap_address(env, s2); ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 8k. */ ++ for (len = 0; len < 0x2000; ++len) { ++ uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra); ++ uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra); ++ if (v1 == v2) { ++ if (v1 == c) { ++ /* Equal. CC=0, and don't advance the registers. */ ++ env->cc_op = 0; ++ env->retxl = s2; ++ return s1; ++ } ++ } else { ++ /* Unequal. CC={1,2}, and advance the registers. Note that ++ the terminator need not be zero, but the string that contains ++ the terminator is by definition "low". */ ++ env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2); ++ env->retxl = s2 + len; ++ return s1 + len; ++ } ++ } ++ ++ /* CPU-determined bytes equal; advance the registers. */ ++ env->cc_op = 3; ++ env->retxl = s2 + len; ++ return s1 + len; ++} ++ ++/* move page */ ++uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint32_t r1, uint32_t r2) ++{ ++ const uint64_t src = get_address(env, r2) & TARGET_PAGE_MASK; ++ const uint64_t dst = get_address(env, r1) & TARGET_PAGE_MASK; ++ const int mmu_idx = cpu_mmu_index(env, false); ++ const bool f = extract64(r0, 11, 1); ++ const bool s = extract64(r0, 10, 1); ++ const bool cco = extract64(r0, 8, 1); ++ uintptr_t ra = GETPC(); ++ S390Access srca, desta; ++ int exc; ++ ++ if ((f && s) || extract64(r0, 12, 4)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++ ++ /* ++ * We always manually handle exceptions such that we can properly store ++ * r1/r2 to the lowcore on page-translation exceptions. ++ * ++ * TODO: Access key handling ++ */ ++ exc = access_prepare_nf(&srca, env, true, src, TARGET_PAGE_SIZE, ++ MMU_DATA_LOAD, mmu_idx, ra); ++ if (exc) { ++ if (cco) { ++ return 2; ++ } ++ goto inject_exc; ++ } ++ exc = access_prepare_nf(&desta, env, true, dst, TARGET_PAGE_SIZE, ++ MMU_DATA_STORE, mmu_idx, ra); ++ if (exc) { ++ if (cco && exc != PGM_PROTECTION) { ++ return 1; ++ } ++ goto inject_exc; ++ } ++ access_memmove(env, &desta, &srca, ra); ++ return 0; /* data moved */ ++inject_exc: ++#if !defined(CONFIG_USER_ONLY) ++ if (exc != PGM_ADDRESSING) { ++ stq_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, trans_exc_code), ++ env->tlb_fill_tec); ++ } ++ if (exc == PGM_PAGE_TRANS) { ++ stb_phys(env_cpu(env)->as, env->psa + offsetof(LowCore, op_access_id), ++ r1 << 4 | r2); ++ } ++#endif ++ tcg_s390_program_interrupt(env, exc, ra); ++} ++ ++/* string copy */ ++uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ const uint64_t d = get_address(env, r1); ++ const uint64_t s = get_address(env, r2); ++ const uint8_t c = env->regs[0]; ++ const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK)); ++ S390Access srca, desta; ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ if (env->regs[0] & 0xffffff00ull) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ /* ++ * Our access should not exceed single pages, as we must not report access ++ * exceptions exceeding the actually copied range (which we don't know at ++ * this point). We might over-indicate watchpoints within the pages ++ * (if we ever care, we have to limit processing to a single byte). ++ */ ++ srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra); ++ for (i = 0; i < len; i++) { ++ const uint8_t v = access_get_byte(env, &srca, i, ra); ++ ++ access_set_byte(env, &desta, i, v, ra); ++ if (v == c) { ++ set_address_zero(env, r1, d + i); ++ return 1; ++ } ++ } ++ set_address_zero(env, r1, d + len); ++ set_address_zero(env, r2, s + len); ++ return 3; ++} ++ ++/* load access registers r1 to r3 from memory at a2 */ ++void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ if (a2 & 0x3) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ env->aregs[i] = cpu_ldl_data_ra(env, a2, ra); ++ a2 += 4; ++ ++ if (i == r3) { ++ break; ++ } ++ } ++} ++ ++/* store access registers r1 to r3 in memory at a2 */ ++void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ if (a2 & 0x3) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ cpu_stl_data_ra(env, a2, env->aregs[i], ra); ++ a2 += 4; ++ ++ if (i == r3) { ++ break; ++ } ++ } ++} ++ ++/* move long helper */ ++static inline uint32_t do_mvcl(CPUS390XState *env, ++ uint64_t *dest, uint64_t *destlen, ++ uint64_t *src, uint64_t *srclen, ++ uint16_t pad, int wordsize, uintptr_t ra) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK)); ++ S390Access srca, desta; ++ int i, cc; ++ ++ if (*destlen == *srclen) { ++ cc = 0; ++ } else if (*destlen < *srclen) { ++ cc = 1; ++ } else { ++ cc = 2; ++ } ++ ++ if (!*destlen) { ++ return cc; ++ } ++ ++ /* ++ * Only perform one type of type of operation (move/pad) at a time. ++ * Stay within single pages. ++ */ ++ if (*srclen) { ++ /* Copy the src array */ ++ len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len); ++ *destlen -= len; ++ *srclen -= len; ++ srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra); ++ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); ++ access_memmove(env, &desta, &srca, ra); ++ *src = wrap_address(env, *src + len); ++ *dest = wrap_address(env, *dest + len); ++ } else if (wordsize == 1) { ++ /* Pad the remaining area */ ++ *destlen -= len; ++ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); ++ access_memset(env, &desta, pad, ra); ++ *dest = wrap_address(env, *dest + len); ++ } else { ++ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra); ++ ++ /* The remaining length selects the padding byte. */ ++ for (i = 0; i < len; (*destlen)--, i++) { ++ if (*destlen & 1) { ++ access_set_byte(env, &desta, i, pad, ra); ++ } else { ++ access_set_byte(env, &desta, i, pad >> 8, ra); ++ } ++ } ++ *dest = wrap_address(env, *dest + len); ++ } ++ ++ return *destlen ? 3 : cc; ++} ++ ++/* move long */ ++uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ const int mmu_idx = cpu_mmu_index(env, false); ++ uintptr_t ra = GETPC(); ++ uint64_t destlen = env->regs[r1 + 1] & 0xffffff; ++ uint64_t dest = get_address(env, r1); ++ uint64_t srclen = env->regs[r2 + 1] & 0xffffff; ++ uint64_t src = get_address(env, r2); ++ uint8_t pad = env->regs[r2 + 1] >> 24; ++ CPUState *cs = env_cpu(env); ++ S390Access srca, desta; ++ uint32_t cc, cur_len; ++ ++ if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) { ++ cc = 3; ++ } else if (srclen == destlen) { ++ cc = 0; ++ } else if (destlen < srclen) { ++ cc = 1; ++ } else { ++ cc = 2; ++ } ++ ++ /* We might have to zero-out some bits even if there was no action. */ ++ if (unlikely(!destlen || cc == 3)) { ++ set_address_zero(env, r2, src); ++ set_address_zero(env, r1, dest); ++ return cc; ++ } else if (!srclen) { ++ set_address_zero(env, r2, src); ++ } ++ ++ /* ++ * Only perform one type of type of operation (move/pad) in one step. ++ * Stay within single pages. ++ */ ++ while (destlen) { ++ cur_len = MIN(destlen, -(dest | TARGET_PAGE_MASK)); ++ if (!srclen) { ++ desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, ++ ra); ++ access_memset(env, &desta, pad, ra); ++ } else { ++ cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len); ++ ++ srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx, ++ ra); ++ desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx, ++ ra); ++ access_memmove(env, &desta, &srca, ra); ++ src = wrap_address(env, src + cur_len); ++ srclen -= cur_len; ++ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen); ++ set_address_zero(env, r2, src); ++ } ++ dest = wrap_address(env, dest + cur_len); ++ destlen -= cur_len; ++ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen); ++ set_address_zero(env, r1, dest); ++ ++ /* ++ * MVCL is interruptible. Return to the main loop if requested after ++ * writing back all state to registers. If no interrupt will get ++ * injected, we'll end up back in this handler and continue processing ++ * the remaining parts. ++ */ ++ if (destlen && unlikely(cpu_loop_exit_requested(cs))) { ++ cpu_loop_exit_restore(cs, ra); ++ } ++ } ++ return cc; ++} ++ ++/* move long extended */ ++uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, ++ uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t destlen = get_length(env, r1 + 1); ++ uint64_t dest = get_address(env, r1); ++ uint64_t srclen = get_length(env, r3 + 1); ++ uint64_t src = get_address(env, r3); ++ uint8_t pad = a2; ++ uint32_t cc; ++ ++ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra); ++ ++ set_length(env, r1 + 1, destlen); ++ set_length(env, r3 + 1, srclen); ++ set_address(env, r1, dest); ++ set_address(env, r3, src); ++ ++ return cc; ++} ++ ++/* move long unicode */ ++uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, ++ uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t destlen = get_length(env, r1 + 1); ++ uint64_t dest = get_address(env, r1); ++ uint64_t srclen = get_length(env, r3 + 1); ++ uint64_t src = get_address(env, r3); ++ uint16_t pad = a2; ++ uint32_t cc; ++ ++ cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra); ++ ++ set_length(env, r1 + 1, destlen); ++ set_length(env, r3 + 1, srclen); ++ set_address(env, r1, dest); ++ set_address(env, r3, src); ++ ++ return cc; ++} ++ ++/* compare logical long helper */ ++static inline uint32_t do_clcl(CPUS390XState *env, ++ uint64_t *src1, uint64_t *src1len, ++ uint64_t *src3, uint64_t *src3len, ++ uint16_t pad, uint64_t limit, ++ int wordsize, uintptr_t ra) ++{ ++ uint64_t len = MAX(*src1len, *src3len); ++ uint32_t cc = 0; ++ ++ check_alignment(env, *src1len | *src3len, wordsize, ra); ++ ++ if (!len) { ++ return cc; ++ } ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. */ ++ if (len > limit) { ++ len = limit; ++ cc = 3; ++ } ++ ++ for (; len; len -= wordsize) { ++ uint16_t v1 = pad; ++ uint16_t v3 = pad; ++ ++ if (*src1len) { ++ v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra); ++ } ++ if (*src3len) { ++ v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra); ++ } ++ ++ if (v1 != v3) { ++ cc = (v1 < v3) ? 1 : 2; ++ break; ++ } ++ ++ if (*src1len) { ++ *src1 += wordsize; ++ *src1len -= wordsize; ++ } ++ if (*src3len) { ++ *src3 += wordsize; ++ *src3len -= wordsize; ++ } ++ } ++ ++ return cc; ++} ++ ++ ++/* compare logical long */ ++uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24); ++ uint64_t src1 = get_address(env, r1); ++ uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24); ++ uint64_t src3 = get_address(env, r2); ++ uint8_t pad = env->regs[r2 + 1] >> 24; ++ uint32_t cc; ++ ++ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra); ++ ++ env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len); ++ env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len); ++ set_address(env, r1, src1); ++ set_address(env, r2, src3); ++ ++ return cc; ++} ++ ++/* compare logical long extended memcompare insn with padding */ ++uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, ++ uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t src1len = get_length(env, r1 + 1); ++ uint64_t src1 = get_address(env, r1); ++ uint64_t src3len = get_length(env, r3 + 1); ++ uint64_t src3 = get_address(env, r3); ++ uint8_t pad = a2; ++ uint32_t cc; ++ ++ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra); ++ ++ set_length(env, r1 + 1, src1len); ++ set_length(env, r3 + 1, src3len); ++ set_address(env, r1, src1); ++ set_address(env, r3, src3); ++ ++ return cc; ++} ++ ++/* compare logical long unicode memcompare insn with padding */ ++uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2, ++ uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t src1len = get_length(env, r1 + 1); ++ uint64_t src1 = get_address(env, r1); ++ uint64_t src3len = get_length(env, r3 + 1); ++ uint64_t src3 = get_address(env, r3); ++ uint16_t pad = a2; ++ uint32_t cc = 0; ++ ++ cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra); ++ ++ set_length(env, r1 + 1, src1len); ++ set_length(env, r3 + 1, src3len); ++ set_address(env, r1, src1); ++ set_address(env, r3, src3); ++ ++ return cc; ++} ++ ++/* checksum */ ++uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1, ++ uint64_t src, uint64_t src_len) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t max_len, len; ++ uint64_t cksm = (uint32_t)r1; ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 8k. */ ++ max_len = (src_len > 0x2000 ? 0x2000 : src_len); ++ ++ /* Process full words as available. */ ++ for (len = 0; len + 4 <= max_len; len += 4, src += 4) { ++ cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra); ++ } ++ ++ switch (max_len - len) { ++ case 1: ++ cksm += cpu_ldub_data_ra(env, src, ra) << 24; ++ len += 1; ++ break; ++ case 2: ++ cksm += cpu_lduw_data_ra(env, src, ra) << 16; ++ len += 2; ++ break; ++ case 3: ++ cksm += cpu_lduw_data_ra(env, src, ra) << 16; ++ cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8; ++ len += 3; ++ break; ++ } ++ ++ /* Fold the carry from the checksum. Note that we can see carry-out ++ during folding more than once (but probably not more than twice). */ ++ while (cksm > 0xffffffffull) { ++ cksm = (uint32_t)cksm + (cksm >> 32); ++ } ++ ++ /* Indicate whether or not we've processed everything. */ ++ env->cc_op = (len == src_len ? 0 : 3); ++ ++ /* Return both cksm and processed length. */ ++ env->retxl = cksm; ++ return len; ++} ++ ++void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src) ++{ ++ uintptr_t ra = GETPC(); ++ int len_dest = len >> 4; ++ int len_src = len & 0xf; ++ uint8_t b; ++ ++ dest += len_dest; ++ src += len_src; ++ ++ /* last byte is special, it only flips the nibbles */ ++ b = cpu_ldub_data_ra(env, src, ra); ++ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); ++ src--; ++ len_src--; ++ ++ /* now pack every value */ ++ while (len_dest > 0) { ++ b = 0; ++ ++ if (len_src >= 0) { ++ b = cpu_ldub_data_ra(env, src, ra) & 0x0f; ++ src--; ++ len_src--; ++ } ++ if (len_src >= 0) { ++ b |= cpu_ldub_data_ra(env, src, ra) << 4; ++ src--; ++ len_src--; ++ } ++ ++ len_dest--; ++ dest--; ++ cpu_stb_data_ra(env, dest, b, ra); ++ } ++} ++ ++static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src, ++ uint32_t srclen, int ssize, uintptr_t ra) ++{ ++ int i; ++ /* The destination operand is always 16 bytes long. */ ++ const int destlen = 16; ++ ++ /* The operands are processed from right to left. */ ++ src += srclen - 1; ++ dest += destlen - 1; ++ ++ for (i = 0; i < destlen; i++) { ++ uint8_t b = 0; ++ ++ /* Start with a positive sign */ ++ if (i == 0) { ++ b = 0xc; ++ } else if (srclen > ssize) { ++ b = cpu_ldub_data_ra(env, src, ra) & 0x0f; ++ src -= ssize; ++ srclen -= ssize; ++ } ++ ++ if (srclen > ssize) { ++ b |= cpu_ldub_data_ra(env, src, ra) << 4; ++ src -= ssize; ++ srclen -= ssize; ++ } ++ ++ cpu_stb_data_ra(env, dest, b, ra); ++ dest--; ++ } ++} ++ ++ ++void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src, ++ uint32_t srclen) ++{ ++ do_pkau(env, dest, src, srclen, 1, GETPC()); ++} ++ ++void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src, ++ uint32_t srclen) ++{ ++ do_pkau(env, dest, src, srclen, 2, GETPC()); ++} ++ ++void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest, ++ uint64_t src) ++{ ++ uintptr_t ra = GETPC(); ++ int len_dest = len >> 4; ++ int len_src = len & 0xf; ++ uint8_t b; ++ int second_nibble = 0; ++ ++ dest += len_dest; ++ src += len_src; ++ ++ /* last byte is special, it only flips the nibbles */ ++ b = cpu_ldub_data_ra(env, src, ra); ++ cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra); ++ src--; ++ len_src--; ++ ++ /* now pad every nibble with 0xf0 */ ++ ++ while (len_dest > 0) { ++ uint8_t cur_byte = 0; ++ ++ if (len_src > 0) { ++ cur_byte = cpu_ldub_data_ra(env, src, ra); ++ } ++ ++ len_dest--; ++ dest--; ++ ++ /* only advance one nibble at a time */ ++ if (second_nibble) { ++ cur_byte >>= 4; ++ len_src--; ++ src--; ++ } ++ second_nibble = !second_nibble; ++ ++ /* digit */ ++ cur_byte = (cur_byte & 0xf); ++ /* zone bits */ ++ cur_byte |= 0xf0; ++ ++ cpu_stb_data_ra(env, dest, cur_byte, ra); ++ } ++} ++ ++static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest, ++ uint32_t destlen, int dsize, uint64_t src, ++ uintptr_t ra) ++{ ++ int i; ++ uint32_t cc; ++ uint8_t b; ++ /* The source operand is always 16 bytes long. */ ++ const int srclen = 16; ++ ++ /* The operands are processed from right to left. */ ++ src += srclen - 1; ++ dest += destlen - dsize; ++ ++ /* Check for the sign. */ ++ b = cpu_ldub_data_ra(env, src, ra); ++ src--; ++ switch (b & 0xf) { ++ case 0xa: ++ case 0xc: ++ case 0xe ... 0xf: ++ cc = 0; /* plus */ ++ break; ++ case 0xb: ++ case 0xd: ++ cc = 1; /* minus */ ++ break; ++ default: ++ case 0x0 ... 0x9: ++ cc = 3; /* invalid */ ++ break; ++ } ++ ++ /* Now pad every nibble with 0x30, advancing one nibble at a time. */ ++ for (i = 0; i < destlen; i += dsize) { ++ if (i == (31 * dsize)) { ++ /* If length is 32/64 bytes, the leftmost byte is 0. */ ++ b = 0; ++ } else if (i % (2 * dsize)) { ++ b = cpu_ldub_data_ra(env, src, ra); ++ src--; ++ } else { ++ b >>= 4; ++ } ++ cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra); ++ dest -= dsize; ++ } ++ ++ return cc; ++} ++ ++uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen, ++ uint64_t src) ++{ ++ return do_unpkau(env, dest, destlen, 1, src, GETPC()); ++} ++ ++uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen, ++ uint64_t src) ++{ ++ return do_unpkau(env, dest, destlen, 2, src, GETPC()); ++} ++ ++uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen) ++{ ++ uintptr_t ra = GETPC(); ++ uint32_t cc = 0; ++ int i; ++ ++ for (i = 0; i < destlen; i++) { ++ uint8_t b = cpu_ldub_data_ra(env, dest + i, ra); ++ /* digit */ ++ cc |= (b & 0xf0) > 0x90 ? 2 : 0; ++ ++ if (i == (destlen - 1)) { ++ /* sign */ ++ cc |= (b & 0xf) < 0xa ? 1 : 0; ++ } else { ++ /* digit */ ++ cc |= (b & 0xf) > 0x9 ? 2 : 0; ++ } ++ } ++ ++ return cc; ++} ++ ++static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array, ++ uint64_t trans, uintptr_t ra) ++{ ++ uint32_t i; ++ ++ for (i = 0; i <= len; i++) { ++ uint8_t byte = cpu_ldub_data_ra(env, array + i, ra); ++ uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra); ++ cpu_stb_data_ra(env, array + i, new_byte, ra); ++ } ++ ++ return env->cc_op; ++} ++ ++void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array, ++ uint64_t trans) ++{ ++ do_helper_tr(env, len, array, trans, GETPC()); ++} ++ ++uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array, ++ uint64_t len, uint64_t trans) ++{ ++ uintptr_t ra = GETPC(); ++ uint8_t end = env->regs[0] & 0xff; ++ uint64_t l = len; ++ uint64_t i; ++ uint32_t cc = 0; ++ ++ if (!(env->psw.mask & PSW_MASK_64)) { ++ array &= 0x7fffffff; ++ l = (uint32_t)l; ++ } ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 8k. */ ++ if (l > 0x2000) { ++ l = 0x2000; ++ cc = 3; ++ } ++ ++ for (i = 0; i < l; i++) { ++ uint8_t byte, new_byte; ++ ++ byte = cpu_ldub_data_ra(env, array + i, ra); ++ ++ if (byte == end) { ++ cc = 1; ++ break; ++ } ++ ++ new_byte = cpu_ldub_data_ra(env, trans + byte, ra); ++ cpu_stb_data_ra(env, array + i, new_byte, ra); ++ } ++ ++ env->cc_op = cc; ++ env->retxl = len - i; ++ return array + i; ++} ++ ++static inline uint32_t do_helper_trt(CPUS390XState *env, int len, ++ uint64_t array, uint64_t trans, ++ int inc, uintptr_t ra) ++{ ++ int i; ++ ++ for (i = 0; i <= len; i++) { ++ uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra); ++ uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra); ++ ++ if (sbyte != 0) { ++ set_address(env, 1, array + i * inc); ++ env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte); ++ return (i == len) ? 2 : 1; ++ } ++ } ++ ++ return 0; ++} ++ ++static uint32_t do_helper_trt_fwd(CPUS390XState *env, uint32_t len, ++ uint64_t array, uint64_t trans, ++ uintptr_t ra) ++{ ++ return do_helper_trt(env, len, array, trans, 1, ra); ++} ++ ++uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array, ++ uint64_t trans) ++{ ++ return do_helper_trt(env, len, array, trans, 1, GETPC()); ++} ++ ++static uint32_t do_helper_trt_bkwd(CPUS390XState *env, uint32_t len, ++ uint64_t array, uint64_t trans, ++ uintptr_t ra) ++{ ++ return do_helper_trt(env, len, array, trans, -1, ra); ++} ++ ++uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array, ++ uint64_t trans) ++{ ++ return do_helper_trt(env, len, array, trans, -1, GETPC()); ++} ++ ++/* Translate one/two to one/two */ ++uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, ++ uint32_t tst, uint32_t sizes) ++{ ++ uintptr_t ra = GETPC(); ++ int dsize = (sizes & 1) ? 1 : 2; ++ int ssize = (sizes & 2) ? 1 : 2; ++ uint64_t tbl = get_address(env, 1); ++ uint64_t dst = get_address(env, r1); ++ uint64_t len = get_length(env, r1 + 1); ++ uint64_t src = get_address(env, r2); ++ uint32_t cc = 3; ++ int i; ++ ++ /* The lower address bits of TBL are ignored. For TROO, TROT, it's ++ the low 3 bits (double-word aligned). For TRTO, TRTT, it's either ++ the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */ ++ if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) { ++ tbl &= -4096; ++ } else { ++ tbl &= -8; ++ } ++ ++ check_alignment(env, len, ssize, ra); ++ ++ /* Lest we fail to service interrupts in a timely manner, */ ++ /* limit the amount of work we're willing to do. */ ++ for (i = 0; i < 0x2000; i++) { ++ uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra); ++ uint64_t tble = tbl + (sval * dsize); ++ uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra); ++ if (dval == tst) { ++ cc = 1; ++ break; ++ } ++ cpu_stsize_data_ra(env, dst, dval, dsize, ra); ++ ++ len -= ssize; ++ src += ssize; ++ dst += dsize; ++ ++ if (len == 0) { ++ cc = 0; ++ break; ++ } ++ } ++ ++ set_address(env, r1, dst); ++ set_length(env, r1 + 1, len); ++ set_address(env, r2, src); ++ ++ return cc; ++} ++ ++void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, ++ uint32_t r1, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); ++ Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); ++ Int128 oldv; ++ uint64_t oldh, oldl; ++ bool fail; ++ ++ check_alignment(env, addr, 16, ra); ++ ++ oldh = cpu_ldq_data_ra(env, addr + 0, ra); ++ oldl = cpu_ldq_data_ra(env, addr + 8, ra); ++ ++ oldv = int128_make128(oldl, oldh); ++ fail = !int128_eq(oldv, cmpv); ++ if (fail) { ++ newv = oldv; ++ } ++ ++ cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); ++ cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); ++ ++ env->cc_op = fail; ++ env->regs[r1] = int128_gethi(oldv); ++ env->regs[r1 + 1] = int128_getlo(oldv); ++} ++ ++void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, ++ uint32_t r1, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); ++ Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); ++ int mem_idx; ++ TCGMemOpIdx oi; ++ Int128 oldv; ++ bool fail; ++ ++ assert(HAVE_CMPXCHG128); ++ ++ mem_idx = cpu_mmu_index(env, false); ++ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ++ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); ++ fail = !int128_eq(oldv, cmpv); ++ ++ env->cc_op = fail; ++ env->regs[r1] = int128_gethi(oldv); ++ env->regs[r1 + 1] = int128_getlo(oldv); ++} ++ ++static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, ++ uint64_t a2, bool parallel) ++{ ++ uint32_t mem_idx = cpu_mmu_index(env, false); ++ uintptr_t ra = GETPC(); ++ uint32_t fc = extract32(env->regs[0], 0, 8); ++ uint32_t sc = extract32(env->regs[0], 8, 8); ++ uint64_t pl = get_address(env, 1) & -16; ++ uint64_t svh, svl; ++ uint32_t cc; ++ ++ /* Sanity check the function code and storage characteristic. */ ++ if (fc > 1 || sc > 3) { ++ if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { ++ goto spec_exception; ++ } ++ if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { ++ goto spec_exception; ++ } ++ } ++ ++ /* Sanity check the alignments. */ ++ if (extract32(a1, 0, fc + 2) || extract32(a2, 0, sc)) { ++ goto spec_exception; ++ } ++ ++ /* Sanity check writability of the store address. */ ++ probe_write(env, a2, 1 << sc, mem_idx, ra); ++ ++ /* ++ * Note that the compare-and-swap is atomic, and the store is atomic, ++ * but the complete operation is not. Therefore we do not need to ++ * assert serial context in order to implement this. That said, ++ * restart early if we can't support either operation that is supposed ++ * to be atomic. ++ */ ++ if (parallel) { ++ uint32_t max = 2; ++#ifdef CONFIG_ATOMIC64 ++ max = 3; ++#endif ++ if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || ++ (HAVE_ATOMIC128 ? 0 : sc > max)) { ++ cpu_loop_exit_atomic(env_cpu(env), ra); ++ } ++ } ++ ++ /* All loads happen before all stores. For simplicity, load the entire ++ store value area from the parameter list. */ ++ svh = cpu_ldq_data_ra(env, pl + 16, ra); ++ svl = cpu_ldq_data_ra(env, pl + 24, ra); ++ ++ switch (fc) { ++ case 0: ++ { ++ uint32_t nv = cpu_ldl_data_ra(env, pl, ra); ++ uint32_t cv = env->regs[r3]; ++ uint32_t ov; ++ ++ if (parallel) { ++#ifdef CONFIG_USER_ONLY ++ uint32_t *haddr = g2h(env_cpu(env), a1); ++ ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); ++#else ++ TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); ++ ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); ++#endif ++ } else { ++ ov = cpu_ldl_data_ra(env, a1, ra); ++ cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); ++ } ++ cc = (ov != cv); ++ env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); ++ } ++ break; ++ ++ case 1: ++ { ++ uint64_t nv = cpu_ldq_data_ra(env, pl, ra); ++ uint64_t cv = env->regs[r3]; ++ uint64_t ov; ++ ++ if (parallel) { ++#ifdef CONFIG_ATOMIC64 ++# ifdef CONFIG_USER_ONLY ++ uint64_t *haddr = g2h(env_cpu(env), a1); ++ ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); ++# else ++ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); ++ ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); ++# endif ++#else ++ /* Note that we asserted !parallel above. */ ++ g_assert_not_reached(); ++#endif ++ } else { ++ ov = cpu_ldq_data_ra(env, a1, ra); ++ cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); ++ } ++ cc = (ov != cv); ++ env->regs[r3] = ov; ++ } ++ break; ++ ++ case 2: ++ { ++ uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); ++ uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); ++ Int128 nv = int128_make128(nvl, nvh); ++ Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); ++ Int128 ov; ++ ++ if (!parallel) { ++ uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); ++ uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); ++ ++ ov = int128_make128(ol, oh); ++ cc = !int128_eq(ov, cv); ++ if (cc) { ++ nv = ov; ++ } ++ ++ cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); ++ cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); ++ } else if (HAVE_CMPXCHG128) { ++ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ++ ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); ++ cc = !int128_eq(ov, cv); ++ } else { ++ /* Note that we asserted !parallel above. */ ++ g_assert_not_reached(); ++ } ++ ++ env->regs[r3 + 0] = int128_gethi(ov); ++ env->regs[r3 + 1] = int128_getlo(ov); ++ } ++ break; ++ ++ default: ++ g_assert_not_reached(); ++ } ++ ++ /* Store only if the comparison succeeded. Note that above we use a pair ++ of 64-bit big-endian loads, so for sc < 3 we must extract the value ++ from the most-significant bits of svh. */ ++ if (cc == 0) { ++ switch (sc) { ++ case 0: ++ cpu_stb_data_ra(env, a2, svh >> 56, ra); ++ break; ++ case 1: ++ cpu_stw_data_ra(env, a2, svh >> 48, ra); ++ break; ++ case 2: ++ cpu_stl_data_ra(env, a2, svh >> 32, ra); ++ break; ++ case 3: ++ cpu_stq_data_ra(env, a2, svh, ra); ++ break; ++ case 4: ++ if (!parallel) { ++ cpu_stq_data_ra(env, a2 + 0, svh, ra); ++ cpu_stq_data_ra(env, a2 + 8, svl, ra); ++ } else if (HAVE_ATOMIC128) { ++ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ++ Int128 sv = int128_make128(svl, svh); ++ helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); ++ } else { ++ /* Note that we asserted !parallel above. */ ++ g_assert_not_reached(); ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } ++ ++ return cc; ++ ++ spec_exception: ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++} ++ ++uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2) ++{ ++ return do_csst(env, r3, a1, a2, false); ++} ++ ++uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1, ++ uint64_t a2) ++{ ++ return do_csst(env, r3, a1, a2, true); ++} ++ ++#if !defined(CONFIG_USER_ONLY) ++void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ bool PERchanged = false; ++ uint64_t src = a2; ++ uint32_t i; ++ ++ if (src & 0x7) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ uint64_t val = cpu_ldq_data_ra(env, src, ra); ++ if (env->cregs[i] != val && i >= 9 && i <= 11) { ++ PERchanged = true; ++ } ++ env->cregs[i] = val; ++ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n", ++ i, src, val); ++ src += sizeof(uint64_t); ++ ++ if (i == r3) { ++ break; ++ } ++ } ++ ++ if (PERchanged && env->psw.mask & PSW_MASK_PER) { ++ s390_cpu_recompute_watchpoints(env_cpu(env)); ++ } ++ ++ tlb_flush(env_cpu(env)); ++} ++ ++void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ bool PERchanged = false; ++ uint64_t src = a2; ++ uint32_t i; ++ ++ if (src & 0x3) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ uint32_t val = cpu_ldl_data_ra(env, src, ra); ++ if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) { ++ PERchanged = true; ++ } ++ env->cregs[i] = deposit64(env->cregs[i], 0, 32, val); ++ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val); ++ src += sizeof(uint32_t); ++ ++ if (i == r3) { ++ break; ++ } ++ } ++ ++ if (PERchanged && env->psw.mask & PSW_MASK_PER) { ++ s390_cpu_recompute_watchpoints(env_cpu(env)); ++ } ++ ++ tlb_flush(env_cpu(env)); ++} ++ ++void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t dest = a2; ++ uint32_t i; ++ ++ if (dest & 0x7) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ cpu_stq_data_ra(env, dest, env->cregs[i], ra); ++ dest += sizeof(uint64_t); ++ ++ if (i == r3) { ++ break; ++ } ++ } ++} ++ ++void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t dest = a2; ++ uint32_t i; ++ ++ if (dest & 0x3) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ for (i = r1;; i = (i + 1) % 16) { ++ cpu_stl_data_ra(env, dest, env->cregs[i], ra); ++ dest += sizeof(uint32_t); ++ ++ if (i == r3) { ++ break; ++ } ++ } ++} ++ ++uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr) ++{ ++ uintptr_t ra = GETPC(); ++ int i; ++ ++ real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK; ++ ++ for (i = 0; i < TARGET_PAGE_SIZE; i += 8) { ++ cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra); ++ } ++ ++ return 0; ++} ++ ++uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ CPUState *cs = env_cpu(env); ++ ++ /* ++ * TODO: we currently don't handle all access protection types ++ * (including access-list and key-controlled) as well as AR mode. ++ */ ++ if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { ++ /* Fetching permitted; storing permitted */ ++ return 0; ++ } ++ ++ if (env->int_pgm_code == PGM_PROTECTION) { ++ /* retry if reading is possible */ ++ cs->exception_index = -1; ++ if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) { ++ /* Fetching permitted; storing not permitted */ ++ return 1; ++ } ++ } ++ ++ switch (env->int_pgm_code) { ++ case PGM_PROTECTION: ++ /* Fetching not permitted; storing not permitted */ ++ cs->exception_index = -1; ++ return 2; ++ case PGM_ADDRESSING: ++ case PGM_TRANS_SPEC: ++ /* exceptions forwarded to the guest */ ++ s390_cpu_virt_mem_handle_exc(cpu, GETPC()); ++ return 0; ++ } ++ ++ /* Translation not available */ ++ cs->exception_index = -1; ++ return 3; ++} ++ ++/* insert storage key extended */ ++uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ static S390SKeysState *ss; ++ static S390SKeysClass *skeyclass; ++ uint64_t addr = wrap_address(env, r2); ++ uint8_t key; ++ ++ if (addr > ms->ram_size) { ++ return 0; ++ } ++ ++ if (unlikely(!ss)) { ++ ss = s390_get_skeys_device(); ++ skeyclass = S390_SKEYS_GET_CLASS(ss); ++ } ++ ++ if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) { ++ return 0; ++ } ++ return key; ++} ++ ++/* set storage key extended */ ++void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ static S390SKeysState *ss; ++ static S390SKeysClass *skeyclass; ++ uint64_t addr = wrap_address(env, r2); ++ uint8_t key; ++ ++ if (addr > ms->ram_size) { ++ return; ++ } ++ ++ if (unlikely(!ss)) { ++ ss = s390_get_skeys_device(); ++ skeyclass = S390_SKEYS_GET_CLASS(ss); ++ } ++ ++ key = (uint8_t) r1; ++ skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key); ++ /* ++ * As we can only flush by virtual address and not all the entries ++ * that point to a physical address we have to flush the whole TLB. ++ */ ++ tlb_flush_all_cpus_synced(env_cpu(env)); ++} ++ ++/* reset reference bit extended */ ++uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ static S390SKeysState *ss; ++ static S390SKeysClass *skeyclass; ++ uint8_t re, key; ++ ++ if (r2 > ms->ram_size) { ++ return 0; ++ } ++ ++ if (unlikely(!ss)) { ++ ss = s390_get_skeys_device(); ++ skeyclass = S390_SKEYS_GET_CLASS(ss); ++ } ++ ++ if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { ++ return 0; ++ } ++ ++ re = key & (SK_R | SK_C); ++ key &= ~SK_R; ++ ++ if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) { ++ return 0; ++ } ++ /* ++ * As we can only flush by virtual address and not all the entries ++ * that point to a physical address we have to flush the whole TLB. ++ */ ++ tlb_flush_all_cpus_synced(env_cpu(env)); ++ ++ /* ++ * cc ++ * ++ * 0 Reference bit zero; change bit zero ++ * 1 Reference bit zero; change bit one ++ * 2 Reference bit one; change bit zero ++ * 3 Reference bit one; change bit one ++ */ ++ ++ return re >> 1; ++} ++ ++uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) ++{ ++ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; ++ S390Access srca, desta; ++ uintptr_t ra = GETPC(); ++ int cc = 0; ++ ++ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", ++ __func__, l, a1, a2); ++ ++ if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || ++ psw_as == AS_HOME || psw_as == AS_ACCREG) { ++ s390_program_interrupt(env, PGM_SPECIAL_OP, ra); ++ } ++ ++ l = wrap_length32(env, l); ++ if (l > 256) { ++ /* max 256 */ ++ l = 256; ++ cc = 3; ++ } else if (!l) { ++ return cc; ++ } ++ ++ /* TODO: Access key handling */ ++ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra); ++ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra); ++ access_memmove(env, &desta, &srca, ra); ++ return cc; ++} ++ ++uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2) ++{ ++ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; ++ S390Access srca, desta; ++ uintptr_t ra = GETPC(); ++ int cc = 0; ++ ++ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n", ++ __func__, l, a1, a2); ++ ++ if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) || ++ psw_as == AS_HOME || psw_as == AS_ACCREG) { ++ s390_program_interrupt(env, PGM_SPECIAL_OP, ra); ++ } ++ ++ l = wrap_length32(env, l); ++ if (l > 256) { ++ /* max 256 */ ++ l = 256; ++ cc = 3; ++ } else if (!l) { ++ return cc; ++ } ++ ++ /* TODO: Access key handling */ ++ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra); ++ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra); ++ access_memmove(env, &desta, &srca, ra); ++ return cc; ++} ++ ++void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) ++{ ++ CPUState *cs = env_cpu(env); ++ const uintptr_t ra = GETPC(); ++ uint64_t table, entry, raddr; ++ uint16_t entries, i, index = 0; ++ ++ if (r2 & 0xff000) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ if (!(r2 & 0x800)) { ++ /* invalidation-and-clearing operation */ ++ table = r1 & ASCE_ORIGIN; ++ entries = (r2 & 0x7ff) + 1; ++ ++ switch (r1 & ASCE_TYPE_MASK) { ++ case ASCE_TYPE_REGION1: ++ index = (r2 >> 53) & 0x7ff; ++ break; ++ case ASCE_TYPE_REGION2: ++ index = (r2 >> 42) & 0x7ff; ++ break; ++ case ASCE_TYPE_REGION3: ++ index = (r2 >> 31) & 0x7ff; ++ break; ++ case ASCE_TYPE_SEGMENT: ++ index = (r2 >> 20) & 0x7ff; ++ break; ++ } ++ for (i = 0; i < entries; i++) { ++ /* addresses are not wrapped in 24/31bit mode but table index is */ ++ raddr = table + ((index + i) & 0x7ff) * sizeof(entry); ++ entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra); ++ if (!(entry & REGION_ENTRY_I)) { ++ /* we are allowed to not store if already invalid */ ++ entry |= REGION_ENTRY_I; ++ cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra); ++ } ++ } ++ } ++ ++ /* We simply flush the complete tlb, therefore we can ignore r3. */ ++ if (m4 & 1) { ++ tlb_flush(cs); ++ } else { ++ tlb_flush_all_cpus_synced(cs); ++ } ++} ++ ++/* invalidate pte */ ++void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, ++ uint32_t m4) ++{ ++ CPUState *cs = env_cpu(env); ++ const uintptr_t ra = GETPC(); ++ uint64_t page = vaddr & TARGET_PAGE_MASK; ++ uint64_t pte_addr, pte; ++ ++ /* Compute the page table entry address */ ++ pte_addr = (pto & SEGMENT_ENTRY_ORIGIN); ++ pte_addr += VADDR_PAGE_TX(vaddr) * 8; ++ ++ /* Mark the page table entry as invalid */ ++ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra); ++ pte |= PAGE_ENTRY_I; ++ cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra); ++ ++ /* XXX we exploit the fact that Linux passes the exact virtual ++ address here - it's not obliged to! */ ++ if (m4 & 1) { ++ if (vaddr & ~VADDR_PAGE_TX_MASK) { ++ tlb_flush_page(cs, page); ++ /* XXX 31-bit hack */ ++ tlb_flush_page(cs, page ^ 0x80000000); ++ } else { ++ /* looks like we don't have a valid virtual address */ ++ tlb_flush(cs); ++ } ++ } else { ++ if (vaddr & ~VADDR_PAGE_TX_MASK) { ++ tlb_flush_page_all_cpus_synced(cs, page); ++ /* XXX 31-bit hack */ ++ tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000); ++ } else { ++ /* looks like we don't have a valid virtual address */ ++ tlb_flush_all_cpus_synced(cs); ++ } ++ } ++} ++ ++/* flush local tlb */ ++void HELPER(ptlb)(CPUS390XState *env) ++{ ++ tlb_flush(env_cpu(env)); ++} ++ ++/* flush global tlb */ ++void HELPER(purge)(CPUS390XState *env) ++{ ++ tlb_flush_all_cpus_synced(env_cpu(env)); ++} ++ ++/* load real address */ ++uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) ++{ ++ uint64_t asc = env->psw.mask & PSW_MASK_ASC; ++ uint64_t ret, tec; ++ int flags, exc, cc; ++ ++ /* XXX incomplete - has more corner cases */ ++ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC()); ++ } ++ ++ exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec); ++ if (exc) { ++ cc = 3; ++ ret = exc | 0x80000000; ++ } else { ++ cc = 0; ++ ret |= addr & ~TARGET_PAGE_MASK; ++ } ++ ++ env->cc_op = cc; ++ return ret; ++} ++#endif ++ ++/* load pair from quadword */ ++uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t hi, lo; ++ ++ check_alignment(env, addr, 16, ra); ++ hi = cpu_ldq_data_ra(env, addr + 0, ra); ++ lo = cpu_ldq_data_ra(env, addr + 8, ra); ++ ++ env->retxl = lo; ++ return hi; ++} ++ ++uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) ++{ ++ uintptr_t ra = GETPC(); ++ uint64_t hi, lo; ++ int mem_idx; ++ TCGMemOpIdx oi; ++ Int128 v; ++ ++ assert(HAVE_ATOMIC128); ++ ++ mem_idx = cpu_mmu_index(env, false); ++ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ++ v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); ++ hi = int128_gethi(v); ++ lo = int128_getlo(v); ++ ++ env->retxl = lo; ++ return hi; ++} ++ ++/* store pair to quadword */ ++void HELPER(stpq)(CPUS390XState *env, uint64_t addr, ++ uint64_t low, uint64_t high) ++{ ++ uintptr_t ra = GETPC(); ++ ++ check_alignment(env, addr, 16, ra); ++ cpu_stq_data_ra(env, addr + 0, high, ra); ++ cpu_stq_data_ra(env, addr + 8, low, ra); ++} ++ ++void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, ++ uint64_t low, uint64_t high) ++{ ++ uintptr_t ra = GETPC(); ++ int mem_idx; ++ TCGMemOpIdx oi; ++ Int128 v; ++ ++ assert(HAVE_ATOMIC128); ++ ++ mem_idx = cpu_mmu_index(env, false); ++ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ++ v = int128_make128(low, high); ++ helper_atomic_sto_be_mmu(env, addr, v, oi, ra); ++} ++ ++/* Execute instruction. This instruction executes an insn modified with ++ the contents of r1. It does not change the executed instruction in memory; ++ it does not change the program counter. ++ ++ Perform this by recording the modified instruction in env->ex_value. ++ This will be noticed by cpu_get_tb_cpu_state and thus tb translation. ++*/ ++void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr) ++{ ++ uint64_t insn = cpu_lduw_code(env, addr); ++ uint8_t opc = insn >> 8; ++ ++ /* Or in the contents of R1[56:63]. */ ++ insn |= r1 & 0xff; ++ ++ /* Load the rest of the instruction. */ ++ insn <<= 48; ++ switch (get_ilen(opc)) { ++ case 2: ++ break; ++ case 4: ++ insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32; ++ break; ++ case 6: ++ insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ /* The very most common cases can be sped up by avoiding a new TB. */ ++ if ((opc & 0xf0) == 0xd0) { ++ typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t, ++ uint64_t, uintptr_t); ++ static const dx_helper dx[16] = { ++ [0x0] = do_helper_trt_bkwd, ++ [0x2] = do_helper_mvc, ++ [0x4] = do_helper_nc, ++ [0x5] = do_helper_clc, ++ [0x6] = do_helper_oc, ++ [0x7] = do_helper_xc, ++ [0xc] = do_helper_tr, ++ [0xd] = do_helper_trt_fwd, ++ }; ++ dx_helper helper = dx[opc & 0xf]; ++ ++ if (helper) { ++ uint32_t l = extract64(insn, 48, 8); ++ uint32_t b1 = extract64(insn, 44, 4); ++ uint32_t d1 = extract64(insn, 32, 12); ++ uint32_t b2 = extract64(insn, 28, 4); ++ uint32_t d2 = extract64(insn, 16, 12); ++ uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1); ++ uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2); ++ ++ env->cc_op = helper(env, l, a1, a2, 0); ++ env->psw.addr += ilen; ++ return; ++ } ++ } else if (opc == 0x0a) { ++ env->int_svc_code = extract64(insn, 48, 8); ++ env->int_svc_ilen = ilen; ++ helper_exception(env, EXCP_SVC); ++ g_assert_not_reached(); ++ } ++ ++ /* Record the insn we want to execute as well as the ilen to use ++ during the execution of the target insn. This will also ensure ++ that ex_value is non-zero, which flags that we are in a state ++ that requires such execution. */ ++ env->ex_value = insn | ilen; ++} ++ ++uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, ++ uint64_t len) ++{ ++ const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; ++ const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; ++ const uint64_t r0 = env->regs[0]; ++ const uintptr_t ra = GETPC(); ++ uint8_t dest_key, dest_as, dest_k, dest_a; ++ uint8_t src_key, src_as, src_k, src_a; ++ uint64_t val; ++ int cc = 0; ++ ++ HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n", ++ __func__, dest, src, len); ++ ++ if (!(env->psw.mask & PSW_MASK_DAT)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); ++ } ++ ++ /* OAC (operand access control) for the first operand -> dest */ ++ val = (r0 & 0xffff0000ULL) >> 16; ++ dest_key = (val >> 12) & 0xf; ++ dest_as = (val >> 6) & 0x3; ++ dest_k = (val >> 1) & 0x1; ++ dest_a = val & 0x1; ++ ++ /* OAC (operand access control) for the second operand -> src */ ++ val = (r0 & 0x0000ffffULL); ++ src_key = (val >> 12) & 0xf; ++ src_as = (val >> 6) & 0x3; ++ src_k = (val >> 1) & 0x1; ++ src_a = val & 0x1; ++ ++ if (!dest_k) { ++ dest_key = psw_key; ++ } ++ if (!src_k) { ++ src_key = psw_key; ++ } ++ if (!dest_a) { ++ dest_as = psw_as; ++ } ++ if (!src_a) { ++ src_as = psw_as; ++ } ++ ++ if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); ++ } ++ if (!(env->cregs[0] & CR0_SECONDARY) && ++ (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra); ++ } ++ if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { ++ tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra); ++ } ++ ++ len = wrap_length32(env, len); ++ if (len > 4096) { ++ cc = 3; ++ len = 4096; ++ } ++ ++ /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ ++ if (src_as == AS_ACCREG || dest_as == AS_ACCREG || ++ (env->psw.mask & PSW_MASK_PSTATE)) { ++ qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n", ++ __func__); ++ tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra); ++ } ++ ++ /* FIXME: Access using correct keys and AR-mode */ ++ if (len) { ++ S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD, ++ mmu_idx_from_as(src_as), ra); ++ S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE, ++ mmu_idx_from_as(dest_as), ra); ++ ++ access_memmove(env, &desta, &srca, ra); ++ } ++ ++ return cc; ++} ++ ++/* Decode a Unicode character. A return value < 0 indicates success, storing ++ the UTF-32 result into OCHAR and the input length into OLEN. A return ++ value >= 0 indicates failure, and the CC value to be returned. */ ++typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr, ++ uint64_t ilen, bool enh_check, uintptr_t ra, ++ uint32_t *ochar, uint32_t *olen); ++ ++/* Encode a Unicode character. A return value < 0 indicates success, storing ++ the bytes into ADDR and the output length into OLEN. A return value >= 0 ++ indicates failure, and the CC value to be returned. */ ++typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr, ++ uint64_t ilen, uintptr_t ra, uint32_t c, ++ uint32_t *olen); ++ ++static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ bool enh_check, uintptr_t ra, ++ uint32_t *ochar, uint32_t *olen) ++{ ++ uint8_t s0, s1, s2, s3; ++ uint32_t c, l; ++ ++ if (ilen < 1) { ++ return 0; ++ } ++ s0 = cpu_ldub_data_ra(env, addr, ra); ++ if (s0 <= 0x7f) { ++ /* one byte character */ ++ l = 1; ++ c = s0; ++ } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) { ++ /* invalid character */ ++ return 2; ++ } else if (s0 <= 0xdf) { ++ /* two byte character */ ++ l = 2; ++ if (ilen < 2) { ++ return 0; ++ } ++ s1 = cpu_ldub_data_ra(env, addr + 1, ra); ++ c = s0 & 0x1f; ++ c = (c << 6) | (s1 & 0x3f); ++ if (enh_check && (s1 & 0xc0) != 0x80) { ++ return 2; ++ } ++ } else if (s0 <= 0xef) { ++ /* three byte character */ ++ l = 3; ++ if (ilen < 3) { ++ return 0; ++ } ++ s1 = cpu_ldub_data_ra(env, addr + 1, ra); ++ s2 = cpu_ldub_data_ra(env, addr + 2, ra); ++ c = s0 & 0x0f; ++ c = (c << 6) | (s1 & 0x3f); ++ c = (c << 6) | (s2 & 0x3f); ++ /* Fold the byte-by-byte range descriptions in the PoO into ++ tests against the complete value. It disallows encodings ++ that could be smaller, and the UTF-16 surrogates. */ ++ if (enh_check ++ && ((s1 & 0xc0) != 0x80 ++ || (s2 & 0xc0) != 0x80 ++ || c < 0x1000 ++ || (c >= 0xd800 && c <= 0xdfff))) { ++ return 2; ++ } ++ } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) { ++ /* four byte character */ ++ l = 4; ++ if (ilen < 4) { ++ return 0; ++ } ++ s1 = cpu_ldub_data_ra(env, addr + 1, ra); ++ s2 = cpu_ldub_data_ra(env, addr + 2, ra); ++ s3 = cpu_ldub_data_ra(env, addr + 3, ra); ++ c = s0 & 0x07; ++ c = (c << 6) | (s1 & 0x3f); ++ c = (c << 6) | (s2 & 0x3f); ++ c = (c << 6) | (s3 & 0x3f); ++ /* See above. */ ++ if (enh_check ++ && ((s1 & 0xc0) != 0x80 ++ || (s2 & 0xc0) != 0x80 ++ || (s3 & 0xc0) != 0x80 ++ || c < 0x010000 ++ || c > 0x10ffff)) { ++ return 2; ++ } ++ } else { ++ /* invalid character */ ++ return 2; ++ } ++ ++ *ochar = c; ++ *olen = l; ++ return -1; ++} ++ ++static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ bool enh_check, uintptr_t ra, ++ uint32_t *ochar, uint32_t *olen) ++{ ++ uint16_t s0, s1; ++ uint32_t c, l; ++ ++ if (ilen < 2) { ++ return 0; ++ } ++ s0 = cpu_lduw_data_ra(env, addr, ra); ++ if ((s0 & 0xfc00) != 0xd800) { ++ /* one word character */ ++ l = 2; ++ c = s0; ++ } else { ++ /* two word character */ ++ l = 4; ++ if (ilen < 4) { ++ return 0; ++ } ++ s1 = cpu_lduw_data_ra(env, addr + 2, ra); ++ c = extract32(s0, 6, 4) + 1; ++ c = (c << 6) | (s0 & 0x3f); ++ c = (c << 10) | (s1 & 0x3ff); ++ if (enh_check && (s1 & 0xfc00) != 0xdc00) { ++ /* invalid surrogate character */ ++ return 2; ++ } ++ } ++ ++ *ochar = c; ++ *olen = l; ++ return -1; ++} ++ ++static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ bool enh_check, uintptr_t ra, ++ uint32_t *ochar, uint32_t *olen) ++{ ++ uint32_t c; ++ ++ if (ilen < 4) { ++ return 0; ++ } ++ c = cpu_ldl_data_ra(env, addr, ra); ++ if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) { ++ /* invalid unicode character */ ++ return 2; ++ } ++ ++ *ochar = c; ++ *olen = 4; ++ return -1; ++} ++ ++static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ uintptr_t ra, uint32_t c, uint32_t *olen) ++{ ++ uint8_t d[4]; ++ uint32_t l, i; ++ ++ if (c <= 0x7f) { ++ /* one byte character */ ++ l = 1; ++ d[0] = c; ++ } else if (c <= 0x7ff) { ++ /* two byte character */ ++ l = 2; ++ d[1] = 0x80 | extract32(c, 0, 6); ++ d[0] = 0xc0 | extract32(c, 6, 5); ++ } else if (c <= 0xffff) { ++ /* three byte character */ ++ l = 3; ++ d[2] = 0x80 | extract32(c, 0, 6); ++ d[1] = 0x80 | extract32(c, 6, 6); ++ d[0] = 0xe0 | extract32(c, 12, 4); ++ } else { ++ /* four byte character */ ++ l = 4; ++ d[3] = 0x80 | extract32(c, 0, 6); ++ d[2] = 0x80 | extract32(c, 6, 6); ++ d[1] = 0x80 | extract32(c, 12, 6); ++ d[0] = 0xf0 | extract32(c, 18, 3); ++ } ++ ++ if (ilen < l) { ++ return 1; ++ } ++ for (i = 0; i < l; ++i) { ++ cpu_stb_data_ra(env, addr + i, d[i], ra); ++ } ++ ++ *olen = l; ++ return -1; ++} ++ ++static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ uintptr_t ra, uint32_t c, uint32_t *olen) ++{ ++ uint16_t d0, d1; ++ ++ if (c <= 0xffff) { ++ /* one word character */ ++ if (ilen < 2) { ++ return 1; ++ } ++ cpu_stw_data_ra(env, addr, c, ra); ++ *olen = 2; ++ } else { ++ /* two word character */ ++ if (ilen < 4) { ++ return 1; ++ } ++ d1 = 0xdc00 | extract32(c, 0, 10); ++ d0 = 0xd800 | extract32(c, 10, 6); ++ d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1); ++ cpu_stw_data_ra(env, addr + 0, d0, ra); ++ cpu_stw_data_ra(env, addr + 2, d1, ra); ++ *olen = 4; ++ } ++ ++ return -1; ++} ++ ++static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen, ++ uintptr_t ra, uint32_t c, uint32_t *olen) ++{ ++ if (ilen < 4) { ++ return 1; ++ } ++ cpu_stl_data_ra(env, addr, c, ra); ++ *olen = 4; ++ return -1; ++} ++ ++static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1, ++ uint32_t r2, uint32_t m3, uintptr_t ra, ++ decode_unicode_fn decode, ++ encode_unicode_fn encode) ++{ ++ uint64_t dst = get_address(env, r1); ++ uint64_t dlen = get_length(env, r1 + 1); ++ uint64_t src = get_address(env, r2); ++ uint64_t slen = get_length(env, r2 + 1); ++ bool enh_check = m3 & 1; ++ int cc, i; ++ ++ /* Lest we fail to service interrupts in a timely manner, limit the ++ amount of work we're willing to do. For now, let's cap at 256. */ ++ for (i = 0; i < 256; ++i) { ++ uint32_t c, ilen, olen; ++ ++ cc = decode(env, src, slen, enh_check, ra, &c, &ilen); ++ if (unlikely(cc >= 0)) { ++ break; ++ } ++ cc = encode(env, dst, dlen, ra, c, &olen); ++ if (unlikely(cc >= 0)) { ++ break; ++ } ++ ++ src += ilen; ++ slen -= ilen; ++ dst += olen; ++ dlen -= olen; ++ cc = 3; ++ } ++ ++ set_address(env, r1, dst); ++ set_length(env, r1 + 1, dlen); ++ set_address(env, r2, src); ++ set_length(env, r2 + 1, slen); ++ ++ return cc; ++} ++ ++uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf8, encode_utf16); ++} ++ ++uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf8, encode_utf32); ++} ++ ++uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf16, encode_utf8); ++} ++ ++uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf16, encode_utf32); ++} ++ ++uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf32, encode_utf8); ++} ++ ++uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3) ++{ ++ return convert_unicode(env, r1, r2, m3, GETPC(), ++ decode_utf32, encode_utf16); ++} ++ ++void probe_write_access(CPUS390XState *env, uint64_t addr, uint64_t len, ++ uintptr_t ra) ++{ ++ /* test the actual access, not just any access to the page due to LAP */ ++ while (len) { ++ const uint64_t pagelen = -(addr | TARGET_PAGE_MASK); ++ const uint64_t curlen = MIN(pagelen, len); ++ ++ probe_write(env, addr, curlen, cpu_mmu_index(env, false), ra); ++ addr = wrap_address(env, addr + curlen); ++ len -= curlen; ++ } ++} ++ ++void HELPER(probe_write_access)(CPUS390XState *env, uint64_t addr, uint64_t len) ++{ ++ probe_write_access(env, addr, len, GETPC()); ++} +diff --git a/target/s390x/tcg/meson.build b/target/s390x/tcg/meson.build +new file mode 100644 +index 0000000000..ee4e8fec77 +--- /dev/null ++++ b/target/s390x/tcg/meson.build +@@ -0,0 +1,14 @@ ++s390x_ss.add(when: 'CONFIG_TCG', if_true: files( ++ 'cc_helper.c', ++ 'crypto_helper.c', ++ 'excp_helper.c', ++ 'fpu_helper.c', ++ 'int_helper.c', ++ 'mem_helper.c', ++ 'misc_helper.c', ++ 'translate.c', ++ 'vec_fpu_helper.c', ++ 'vec_helper.c', ++ 'vec_int_helper.c', ++ 'vec_string_helper.c', ++)) +diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c +new file mode 100644 +index 0000000000..33e6999e15 +--- /dev/null ++++ b/target/s390x/tcg/misc_helper.c +@@ -0,0 +1,785 @@ ++/* ++ * S/390 misc helper routines ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2009 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/cutils.h" ++#include "qemu/main-loop.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "exec/memory.h" ++#include "qemu/host-utils.h" ++#include "exec/helper-proto.h" ++#include "qemu/timer.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++#include "qapi/error.h" ++#include "tcg_s390x.h" ++#include "s390-tod.h" ++ ++#if !defined(CONFIG_USER_ONLY) ++#include "sysemu/cpus.h" ++#include "sysemu/sysemu.h" ++#include "hw/s390x/ebcdic.h" ++#include "hw/s390x/s390-virtio-hcall.h" ++#include "hw/s390x/sclp.h" ++#include "hw/s390x/s390_flic.h" ++#include "hw/s390x/ioinst.h" ++#include "hw/s390x/s390-pci-inst.h" ++#include "hw/boards.h" ++#include "hw/s390x/tod.h" ++#endif ++ ++/* #define DEBUG_HELPER */ ++#ifdef DEBUG_HELPER ++#define HELPER_LOG(x...) qemu_log(x) ++#else ++#define HELPER_LOG(x...) ++#endif ++ ++/* Raise an exception statically from a TB. */ ++void HELPER(exception)(CPUS390XState *env, uint32_t excp) ++{ ++ CPUState *cs = env_cpu(env); ++ ++ HELPER_LOG("%s: exception %d\n", __func__, excp); ++ cs->exception_index = excp; ++ cpu_loop_exit(cs); ++} ++ ++/* Store CPU Timer (also used for EXTRACT CPU TIME) */ ++uint64_t HELPER(stpt)(CPUS390XState *env) ++{ ++#if defined(CONFIG_USER_ONLY) ++ /* ++ * Fake a descending CPU timer. We could get negative values here, ++ * but we don't care as it is up to the OS when to process that ++ * interrupt and reset to > 0. ++ */ ++ return UINT64_MAX - (uint64_t)cpu_get_host_ticks(); ++#else ++ return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); ++#endif ++} ++ ++/* Store Clock */ ++uint64_t HELPER(stck)(CPUS390XState *env) ++{ ++#ifdef CONFIG_USER_ONLY ++ struct timespec ts; ++ uint64_t ns; ++ ++ clock_gettime(CLOCK_REALTIME, &ts); ++ ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec; ++ ++ return TOD_UNIX_EPOCH + time2tod(ns); ++#else ++ S390TODState *td = s390_get_todstate(); ++ S390TODClass *tdc = S390_TOD_GET_CLASS(td); ++ S390TOD tod; ++ ++ tdc->get(td, &tod, &error_abort); ++ return tod.low; ++#endif ++} ++ ++#ifndef CONFIG_USER_ONLY ++/* SCLP service call */ ++uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) ++{ ++ qemu_mutex_lock_iothread(); ++ int r = sclp_service_call(env, r1, r2); ++ qemu_mutex_unlock_iothread(); ++ if (r < 0) { ++ tcg_s390_program_interrupt(env, -r, GETPC()); ++ } ++ return r; ++} ++ ++void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) ++{ ++ uint64_t r; ++ ++ switch (num) { ++ case 0x500: ++ /* KVM hypercall */ ++ qemu_mutex_lock_iothread(); ++ r = s390_virtio_hypercall(env); ++ qemu_mutex_unlock_iothread(); ++ break; ++ case 0x44: ++ /* yield */ ++ r = 0; ++ break; ++ case 0x308: ++ /* ipl */ ++ qemu_mutex_lock_iothread(); ++ handle_diag_308(env, r1, r3, GETPC()); ++ qemu_mutex_unlock_iothread(); ++ r = 0; ++ break; ++ case 0x288: ++ /* time bomb (watchdog) */ ++ r = handle_diag_288(env, r1, r3); ++ break; ++ default: ++ r = -1; ++ break; ++ } ++ ++ if (r) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++} ++ ++/* Set Prefix */ ++void HELPER(spx)(CPUS390XState *env, uint64_t a1) ++{ ++ CPUState *cs = env_cpu(env); ++ uint32_t prefix = a1 & 0x7fffe000; ++ ++ env->psa = prefix; ++ HELPER_LOG("prefix: %#x\n", prefix); ++ tlb_flush_page(cs, 0); ++ tlb_flush_page(cs, TARGET_PAGE_SIZE); ++} ++ ++static void update_ckc_timer(CPUS390XState *env) ++{ ++ S390TODState *td = s390_get_todstate(); ++ uint64_t time; ++ ++ /* stop the timer and remove pending CKC IRQs */ ++ timer_del(env->tod_timer); ++ g_assert(qemu_mutex_iothread_locked()); ++ env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; ++ ++ /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ ++ if (env->ckc == -1ULL) { ++ return; ++ } ++ ++ /* difference between origins */ ++ time = env->ckc - td->base.low; ++ ++ /* nanoseconds */ ++ time = tod2time(time); ++ ++ timer_mod(env->tod_timer, time); ++} ++ ++/* Set Clock Comparator */ ++void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) ++{ ++ env->ckc = ckc; ++ ++ qemu_mutex_lock_iothread(); ++ update_ckc_timer(env); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) ++{ ++ S390CPU *cpu = S390_CPU(cs); ++ ++ update_ckc_timer(&cpu->env); ++} ++ ++/* Set Clock */ ++uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) ++{ ++ S390TODState *td = s390_get_todstate(); ++ S390TODClass *tdc = S390_TOD_GET_CLASS(td); ++ S390TOD tod = { ++ .high = 0, ++ .low = tod_low, ++ }; ++ ++ qemu_mutex_lock_iothread(); ++ tdc->set(td, &tod, &error_abort); ++ qemu_mutex_unlock_iothread(); ++ return 0; ++} ++ ++/* Set Tod Programmable Field */ ++void HELPER(sckpf)(CPUS390XState *env, uint64_t r0) ++{ ++ uint32_t val = r0; ++ ++ if (val & 0xffff0000) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC()); ++ } ++ env->todpr = val; ++} ++ ++/* Store Clock Comparator */ ++uint64_t HELPER(stckc)(CPUS390XState *env) ++{ ++ return env->ckc; ++} ++ ++/* Set CPU Timer */ ++void HELPER(spt)(CPUS390XState *env, uint64_t time) ++{ ++ if (time == -1ULL) { ++ return; ++ } ++ ++ /* nanoseconds */ ++ time = tod2time(time); ++ ++ env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time; ++ ++ timer_mod(env->cpu_timer, env->cputm); ++} ++ ++/* Store System Information */ ++uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) ++{ ++ const uintptr_t ra = GETPC(); ++ const uint32_t sel1 = r0 & STSI_R0_SEL1_MASK; ++ const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; ++ const MachineState *ms = MACHINE(qdev_get_machine()); ++ uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; ++ S390CPU *cpu = env_archcpu(env); ++ SysIB sysib = { }; ++ int i, cc = 0; ++ ++ if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) { ++ /* invalid function code: no other checks are performed */ ++ return 3; ++ } ++ ++ if ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK)) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ if ((r0 & STSI_R0_FC_MASK) == STSI_R0_FC_CURRENT) { ++ /* query the current level: no further checks are performed */ ++ env->regs[0] = STSI_R0_FC_LEVEL_3; ++ return 0; ++ } ++ ++ if (a0 & ~TARGET_PAGE_MASK) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ /* count the cpus and split them into configured and reserved ones */ ++ for (i = 0; i < ms->possible_cpus->len; i++) { ++ total_cpus++; ++ if (ms->possible_cpus->cpus[i].cpu) { ++ conf_cpus++; ++ } else { ++ reserved_cpus++; ++ } ++ } ++ ++ /* ++ * In theory, we could report Level 1 / Level 2 as current. However, ++ * the Linux kernel will detect this as running under LPAR and assume ++ * that we have a sclp linemode console (which is always present on ++ * LPAR, but not the default for QEMU), therefore not displaying boot ++ * messages and making booting a Linux kernel under TCG harder. ++ * ++ * For now we fake the same SMP configuration on all levels. ++ * ++ * TODO: We could later make the level configurable via the machine ++ * and change defaults (linemode console) based on machine type ++ * and accelerator. ++ */ ++ switch (r0 & STSI_R0_FC_MASK) { ++ case STSI_R0_FC_LEVEL_1: ++ if ((sel1 == 1) && (sel2 == 1)) { ++ /* Basic Machine Configuration */ ++ char type[5] = {}; ++ ++ ebcdic_put(sysib.sysib_111.manuf, "QEMU ", 16); ++ /* same as machine type number in STORE CPU ID, but in EBCDIC */ ++ snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); ++ ebcdic_put(sysib.sysib_111.type, type, 4); ++ /* model number (not stored in STORE CPU ID for z/Architecure) */ ++ ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); ++ ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); ++ ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); ++ } else if ((sel1 == 2) && (sel2 == 1)) { ++ /* Basic Machine CPU */ ++ ebcdic_put(sysib.sysib_121.sequence, "QEMUQEMUQEMUQEMU", 16); ++ ebcdic_put(sysib.sysib_121.plant, "QEMU", 4); ++ sysib.sysib_121.cpu_addr = cpu_to_be16(env->core_id); ++ } else if ((sel1 == 2) && (sel2 == 2)) { ++ /* Basic Machine CPUs */ ++ sysib.sysib_122.capability = cpu_to_be32(0x443afc29); ++ sysib.sysib_122.total_cpus = cpu_to_be16(total_cpus); ++ sysib.sysib_122.conf_cpus = cpu_to_be16(conf_cpus); ++ sysib.sysib_122.reserved_cpus = cpu_to_be16(reserved_cpus); ++ } else { ++ cc = 3; ++ } ++ break; ++ case STSI_R0_FC_LEVEL_2: ++ if ((sel1 == 2) && (sel2 == 1)) { ++ /* LPAR CPU */ ++ ebcdic_put(sysib.sysib_221.sequence, "QEMUQEMUQEMUQEMU", 16); ++ ebcdic_put(sysib.sysib_221.plant, "QEMU", 4); ++ sysib.sysib_221.cpu_addr = cpu_to_be16(env->core_id); ++ } else if ((sel1 == 2) && (sel2 == 2)) { ++ /* LPAR CPUs */ ++ sysib.sysib_222.lcpuc = 0x80; /* dedicated */ ++ sysib.sysib_222.total_cpus = cpu_to_be16(total_cpus); ++ sysib.sysib_222.conf_cpus = cpu_to_be16(conf_cpus); ++ sysib.sysib_222.reserved_cpus = cpu_to_be16(reserved_cpus); ++ ebcdic_put(sysib.sysib_222.name, "QEMU ", 8); ++ sysib.sysib_222.caf = cpu_to_be32(1000); ++ sysib.sysib_222.dedicated_cpus = cpu_to_be16(conf_cpus); ++ } else { ++ cc = 3; ++ } ++ break; ++ case STSI_R0_FC_LEVEL_3: ++ if ((sel1 == 2) && (sel2 == 2)) { ++ /* VM CPUs */ ++ sysib.sysib_322.count = 1; ++ sysib.sysib_322.vm[0].total_cpus = cpu_to_be16(total_cpus); ++ sysib.sysib_322.vm[0].conf_cpus = cpu_to_be16(conf_cpus); ++ sysib.sysib_322.vm[0].reserved_cpus = cpu_to_be16(reserved_cpus); ++ sysib.sysib_322.vm[0].caf = cpu_to_be32(1000); ++ /* Linux kernel uses this to distinguish us from z/VM */ ++ ebcdic_put(sysib.sysib_322.vm[0].cpi, "KVM/Linux ", 16); ++ sysib.sysib_322.vm[0].ext_name_encoding = 2; /* UTF-8 */ ++ ++ /* If our VM has a name, use the real name */ ++ if (qemu_name) { ++ memset(sysib.sysib_322.vm[0].name, 0x40, ++ sizeof(sysib.sysib_322.vm[0].name)); ++ ebcdic_put(sysib.sysib_322.vm[0].name, qemu_name, ++ MIN(sizeof(sysib.sysib_322.vm[0].name), ++ strlen(qemu_name))); ++ strpadcpy((char *)sysib.sysib_322.ext_names[0], ++ sizeof(sysib.sysib_322.ext_names[0]), ++ qemu_name, '\0'); ++ ++ } else { ++ ebcdic_put(sysib.sysib_322.vm[0].name, "TCGguest", 8); ++ strcpy((char *)sysib.sysib_322.ext_names[0], "TCGguest"); ++ } ++ ++ /* add the uuid */ ++ memcpy(sysib.sysib_322.vm[0].uuid, &qemu_uuid, ++ sizeof(sysib.sysib_322.vm[0].uuid)); ++ } else { ++ cc = 3; ++ } ++ break; ++ } ++ ++ if (cc == 0) { ++ if (s390_cpu_virt_mem_write(cpu, a0, 0, &sysib, sizeof(sysib))) { ++ s390_cpu_virt_mem_handle_exc(cpu, ra); ++ } ++ } ++ ++ return cc; ++} ++ ++uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, ++ uint32_t r3) ++{ ++ int cc; ++ ++ /* TODO: needed to inject interrupts - push further down */ ++ qemu_mutex_lock_iothread(); ++ cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); ++ qemu_mutex_unlock_iothread(); ++ ++ return cc; ++} ++#endif ++ ++#ifndef CONFIG_USER_ONLY ++void HELPER(xsch)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_xsch(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(csch)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_csch(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(hsch)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_hsch(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(rchp)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_rchp(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(rsch)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_rsch(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(sal)(CPUS390XState *env, uint64_t r1) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_sal(cpu, r1, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) ++{ ++ const uintptr_t ra = GETPC(); ++ S390CPU *cpu = env_archcpu(env); ++ QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); ++ QEMUS390FlicIO *io = NULL; ++ LowCore *lowcore; ++ ++ if (addr & 0x3) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ qemu_mutex_lock_iothread(); ++ io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); ++ if (!io) { ++ qemu_mutex_unlock_iothread(); ++ return 0; ++ } ++ ++ if (addr) { ++ struct { ++ uint16_t id; ++ uint16_t nr; ++ uint32_t parm; ++ } intc = { ++ .id = cpu_to_be16(io->id), ++ .nr = cpu_to_be16(io->nr), ++ .parm = cpu_to_be32(io->parm), ++ }; ++ ++ if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { ++ /* writing failed, reinject and properly clean up */ ++ s390_io_interrupt(io->id, io->nr, io->parm, io->word); ++ qemu_mutex_unlock_iothread(); ++ g_free(io); ++ s390_cpu_virt_mem_handle_exc(cpu, ra); ++ return 0; ++ } ++ } else { ++ /* no protection applies */ ++ lowcore = cpu_map_lowcore(env); ++ lowcore->subchannel_id = cpu_to_be16(io->id); ++ lowcore->subchannel_nr = cpu_to_be16(io->nr); ++ lowcore->io_int_parm = cpu_to_be32(io->parm); ++ lowcore->io_int_word = cpu_to_be32(io->word); ++ cpu_unmap_lowcore(lowcore); ++ } ++ ++ g_free(io); ++ qemu_mutex_unlock_iothread(); ++ return 1; ++} ++ ++void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(chsc)(CPUS390XState *env, uint64_t inst) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ qemu_mutex_lock_iothread(); ++ ioinst_handle_chsc(cpu, inst >> 16, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++#endif ++ ++#ifndef CONFIG_USER_ONLY ++void HELPER(per_check_exception)(CPUS390XState *env) ++{ ++ if (env->per_perc_atmid) { ++ tcg_s390_program_interrupt(env, PGM_PER, GETPC()); ++ } ++} ++ ++/* Check if an address is within the PER starting address and the PER ++ ending address. The address range might loop. */ ++static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr) ++{ ++ if (env->cregs[10] <= env->cregs[11]) { ++ return env->cregs[10] <= addr && addr <= env->cregs[11]; ++ } else { ++ return env->cregs[10] <= addr || addr <= env->cregs[11]; ++ } ++} ++ ++void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to) ++{ ++ if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) { ++ if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS) ++ || get_per_in_range(env, to)) { ++ env->per_address = from; ++ env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env); ++ } ++ } ++} ++ ++void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr) ++{ ++ if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) { ++ env->per_address = addr; ++ env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env); ++ ++ /* If the instruction has to be nullified, trigger the ++ exception immediately. */ ++ if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { ++ CPUState *cs = env_cpu(env); ++ ++ env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; ++ env->int_pgm_code = PGM_PER; ++ env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr)); ++ ++ cs->exception_index = EXCP_PGM; ++ cpu_loop_exit(cs); ++ } ++ } ++} ++ ++void HELPER(per_store_real)(CPUS390XState *env) ++{ ++ if ((env->cregs[9] & PER_CR9_EVENT_STORE) && ++ (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) { ++ /* PSW is saved just before calling the helper. */ ++ env->per_address = env->psw.addr; ++ env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env); ++ } ++} ++#endif ++ ++static uint8_t stfl_bytes[2048]; ++static unsigned int used_stfl_bytes; ++ ++static void prepare_stfl(void) ++{ ++ static bool initialized; ++ int i; ++ ++ /* racy, but we don't care, the same values are always written */ ++ if (initialized) { ++ return; ++ } ++ ++ s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes); ++ for (i = 0; i < sizeof(stfl_bytes); i++) { ++ if (stfl_bytes[i]) { ++ used_stfl_bytes = i + 1; ++ } ++ } ++ initialized = true; ++} ++ ++#ifndef CONFIG_USER_ONLY ++void HELPER(stfl)(CPUS390XState *env) ++{ ++ LowCore *lowcore; ++ ++ lowcore = cpu_map_lowcore(env); ++ prepare_stfl(); ++ memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); ++ cpu_unmap_lowcore(lowcore); ++} ++#endif ++ ++uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) ++{ ++ const uintptr_t ra = GETPC(); ++ const int count_bytes = ((env->regs[0] & 0xff) + 1) * 8; ++ int max_bytes; ++ int i; ++ ++ if (addr & 0x7) { ++ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); ++ } ++ ++ prepare_stfl(); ++ max_bytes = ROUND_UP(used_stfl_bytes, 8); ++ ++ /* ++ * The PoP says that doublewords beyond the highest-numbered facility ++ * bit may or may not be stored. However, existing hardware appears to ++ * not store the words, and existing software depend on that. ++ */ ++ for (i = 0; i < MIN(count_bytes, max_bytes); ++i) { ++ cpu_stb_data_ra(env, addr + i, stfl_bytes[i], ra); ++ } ++ ++ env->regs[0] = deposit64(env->regs[0], 0, 8, (max_bytes / 8) - 1); ++ return count_bytes >= max_bytes ? 0 : 3; ++} ++ ++#ifndef CONFIG_USER_ONLY ++/* ++ * Note: we ignore any return code of the functions called for the pci ++ * instructions, as the only time they return !0 is when the stub is ++ * called, and in that case we didn't even offer the zpci facility. ++ * The only exception is SIC, where program checks need to be handled ++ * by the caller. ++ */ ++void HELPER(clp)(CPUS390XState *env, uint32_t r2) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ clp_service_call(cpu, r2, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ pcilg_service_call(cpu, r1, r2, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ pcistg_service_call(cpu, r1, r2, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, ++ uint32_t ar) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) ++{ ++ int r; ++ ++ qemu_mutex_lock_iothread(); ++ r = css_do_sic(env, (r3 >> 27) & 0x7, r1 & 0xffff); ++ qemu_mutex_unlock_iothread(); ++ /* css_do_sic() may actually return a PGM_xxx value to inject */ ++ if (r) { ++ tcg_s390_program_interrupt(env, -r, GETPC()); ++ } ++} ++ ++void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ rpcit_service_call(cpu, r1, r2, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, ++ uint64_t gaddr, uint32_t ar) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++ ++void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, ++ uint32_t ar) ++{ ++ S390CPU *cpu = env_archcpu(env); ++ ++ qemu_mutex_lock_iothread(); ++ mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); ++ qemu_mutex_unlock_iothread(); ++} ++#endif +diff --git a/target/s390x/tcg/s390-tod.h b/target/s390x/tcg/s390-tod.h +new file mode 100644 +index 0000000000..8b74d6a6d8 +--- /dev/null ++++ b/target/s390x/tcg/s390-tod.h +@@ -0,0 +1,29 @@ ++/* ++ * TOD (Time Of Day) clock ++ * ++ * Copyright 2018 Red Hat, Inc. ++ * Author(s): David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef TARGET_S390_TOD_H ++#define TARGET_S390_TOD_H ++ ++/* The value of the TOD clock for 1.1.1970. */ ++#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL ++ ++/* Converts ns to s390's clock format */ ++static inline uint64_t time2tod(uint64_t ns) ++{ ++ return (ns << 9) / 125 + (((ns & 0xff80000000000000ull) / 125) << 9); ++} ++ ++/* Converts s390's clock format to ns */ ++static inline uint64_t tod2time(uint64_t t) ++{ ++ return ((t >> 9) * 125) + (((t & 0x1ff) * 125) >> 9); ++} ++ ++#endif +diff --git a/target/s390x/tcg/tcg_s390x.h b/target/s390x/tcg/tcg_s390x.h +new file mode 100644 +index 0000000000..2f54ccb027 +--- /dev/null ++++ b/target/s390x/tcg/tcg_s390x.h +@@ -0,0 +1,24 @@ ++/* ++ * QEMU TCG support -- s390x specific functions. ++ * ++ * Copyright 2018 Red Hat, Inc. ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef TCG_S390X_H ++#define TCG_S390X_H ++ ++void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); ++void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, ++ uint32_t code, uintptr_t ra); ++void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, ++ uintptr_t ra); ++void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, ++ uintptr_t ra); ++ ++#endif /* TCG_S390X_H */ +diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c +new file mode 100644 +index 0000000000..92fa7656c2 +--- /dev/null ++++ b/target/s390x/tcg/translate.c +@@ -0,0 +1,6672 @@ ++/* ++ * S/390 translation ++ * ++ * Copyright (c) 2009 Ulrich Hecht ++ * Copyright (c) 2010 Alexander Graf ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++/* #define DEBUG_INLINE_BRANCHES */ ++#define S390X_DEBUG_DISAS ++/* #define S390X_DEBUG_DISAS_VERBOSE */ ++ ++#ifdef S390X_DEBUG_DISAS_VERBOSE ++# define LOG_DISAS(...) qemu_log(__VA_ARGS__) ++#else ++# define LOG_DISAS(...) do { } while (0) ++#endif ++ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "disas/disas.h" ++#include "exec/exec-all.h" ++#include "tcg/tcg-op.h" ++#include "tcg/tcg-op-gvec.h" ++#include "qemu/log.h" ++#include "qemu/host-utils.h" ++#include "exec/cpu_ldst.h" ++#include "exec/gen-icount.h" ++#include "exec/helper-proto.h" ++#include "exec/helper-gen.h" ++ ++#include "exec/translator.h" ++#include "exec/log.h" ++#include "qemu/atomic128.h" ++ ++ ++/* Information that (most) every instruction needs to manipulate. */ ++typedef struct DisasContext DisasContext; ++typedef struct DisasInsn DisasInsn; ++typedef struct DisasFields DisasFields; ++ ++/* ++ * Define a structure to hold the decoded fields. We'll store each inside ++ * an array indexed by an enum. In order to conserve memory, we'll arrange ++ * for fields that do not exist at the same time to overlap, thus the "C" ++ * for compact. For checking purposes there is an "O" for original index ++ * as well that will be applied to availability bitmaps. ++ */ ++ ++enum DisasFieldIndexO { ++ FLD_O_r1, ++ FLD_O_r2, ++ FLD_O_r3, ++ FLD_O_m1, ++ FLD_O_m3, ++ FLD_O_m4, ++ FLD_O_m5, ++ FLD_O_m6, ++ FLD_O_b1, ++ FLD_O_b2, ++ FLD_O_b4, ++ FLD_O_d1, ++ FLD_O_d2, ++ FLD_O_d4, ++ FLD_O_x2, ++ FLD_O_l1, ++ FLD_O_l2, ++ FLD_O_i1, ++ FLD_O_i2, ++ FLD_O_i3, ++ FLD_O_i4, ++ FLD_O_i5, ++ FLD_O_v1, ++ FLD_O_v2, ++ FLD_O_v3, ++ FLD_O_v4, ++}; ++ ++enum DisasFieldIndexC { ++ FLD_C_r1 = 0, ++ FLD_C_m1 = 0, ++ FLD_C_b1 = 0, ++ FLD_C_i1 = 0, ++ FLD_C_v1 = 0, ++ ++ FLD_C_r2 = 1, ++ FLD_C_b2 = 1, ++ FLD_C_i2 = 1, ++ ++ FLD_C_r3 = 2, ++ FLD_C_m3 = 2, ++ FLD_C_i3 = 2, ++ FLD_C_v3 = 2, ++ ++ FLD_C_m4 = 3, ++ FLD_C_b4 = 3, ++ FLD_C_i4 = 3, ++ FLD_C_l1 = 3, ++ FLD_C_v4 = 3, ++ ++ FLD_C_i5 = 4, ++ FLD_C_d1 = 4, ++ FLD_C_m5 = 4, ++ ++ FLD_C_d2 = 5, ++ FLD_C_m6 = 5, ++ ++ FLD_C_d4 = 6, ++ FLD_C_x2 = 6, ++ FLD_C_l2 = 6, ++ FLD_C_v2 = 6, ++ ++ NUM_C_FIELD = 7 ++}; ++ ++struct DisasFields { ++ uint64_t raw_insn; ++ unsigned op:8; ++ unsigned op2:8; ++ unsigned presentC:16; ++ unsigned int presentO; ++ int c[NUM_C_FIELD]; ++}; ++ ++struct DisasContext { ++ DisasContextBase base; ++ const DisasInsn *insn; ++ DisasFields fields; ++ uint64_t ex_value; ++ /* ++ * During translate_one(), pc_tmp is used to determine the instruction ++ * to be executed after base.pc_next - e.g. next sequential instruction ++ * or a branch target. ++ */ ++ uint64_t pc_tmp; ++ uint32_t ilen; ++ enum cc_op cc_op; ++ bool do_debug; ++}; ++ ++/* Information carried about a condition to be evaluated. */ ++typedef struct { ++ TCGCond cond:8; ++ bool is_64; ++ bool g1; ++ bool g2; ++ union { ++ struct { TCGv_i64 a, b; } s64; ++ struct { TCGv_i32 a, b; } s32; ++ } u; ++} DisasCompare; ++ ++#ifdef DEBUG_INLINE_BRANCHES ++static uint64_t inline_branch_hit[CC_OP_MAX]; ++static uint64_t inline_branch_miss[CC_OP_MAX]; ++#endif ++ ++static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc) ++{ ++ TCGv_i64 tmp; ++ ++ if (s->base.tb->flags & FLAG_MASK_32) { ++ if (s->base.tb->flags & FLAG_MASK_64) { ++ tcg_gen_movi_i64(out, pc); ++ return; ++ } ++ pc |= 0x80000000; ++ } ++ assert(!(s->base.tb->flags & FLAG_MASK_64)); ++ tmp = tcg_const_i64(pc); ++ tcg_gen_deposit_i64(out, out, tmp, 0, 32); ++ tcg_temp_free_i64(tmp); ++} ++ ++static TCGv_i64 psw_addr; ++static TCGv_i64 psw_mask; ++static TCGv_i64 gbea; ++ ++static TCGv_i32 cc_op; ++static TCGv_i64 cc_src; ++static TCGv_i64 cc_dst; ++static TCGv_i64 cc_vr; ++ ++static char cpu_reg_names[16][4]; ++static TCGv_i64 regs[16]; ++ ++void s390x_translate_init(void) ++{ ++ int i; ++ ++ psw_addr = tcg_global_mem_new_i64(cpu_env, ++ offsetof(CPUS390XState, psw.addr), ++ "psw_addr"); ++ psw_mask = tcg_global_mem_new_i64(cpu_env, ++ offsetof(CPUS390XState, psw.mask), ++ "psw_mask"); ++ gbea = tcg_global_mem_new_i64(cpu_env, ++ offsetof(CPUS390XState, gbea), ++ "gbea"); ++ ++ cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op), ++ "cc_op"); ++ cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src), ++ "cc_src"); ++ cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst), ++ "cc_dst"); ++ cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr), ++ "cc_vr"); ++ ++ for (i = 0; i < 16; i++) { ++ snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); ++ regs[i] = tcg_global_mem_new(cpu_env, ++ offsetof(CPUS390XState, regs[i]), ++ cpu_reg_names[i]); ++ } ++} ++ ++static inline int vec_full_reg_offset(uint8_t reg) ++{ ++ g_assert(reg < 32); ++ return offsetof(CPUS390XState, vregs[reg][0]); ++} ++ ++static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) ++{ ++ /* Convert element size (es) - e.g. MO_8 - to bytes */ ++ const uint8_t bytes = 1 << es; ++ int offs = enr * bytes; ++ ++ /* ++ * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte ++ * of the 16 byte vector, on both, little and big endian systems. ++ * ++ * Big Endian (target/possible host) ++ * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] ++ * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] ++ * W: [ 0][ 1] - [ 2][ 3] ++ * DW: [ 0] - [ 1] ++ * ++ * Little Endian (possible host) ++ * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] ++ * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] ++ * W: [ 1][ 0] - [ 3][ 2] ++ * DW: [ 0] - [ 1] ++ * ++ * For 16 byte elements, the two 8 byte halves will not form a host ++ * int128 if the host is little endian, since they're in the wrong order. ++ * Some operations (e.g. xor) do not care. For operations like addition, ++ * the two 8 byte elements have to be loaded separately. Let's force all ++ * 16 byte operations to handle it in a special way. ++ */ ++ g_assert(es <= MO_64); ++#ifndef HOST_WORDS_BIGENDIAN ++ offs ^= (8 - bytes); ++#endif ++ return offs + vec_full_reg_offset(reg); ++} ++ ++static inline int freg64_offset(uint8_t reg) ++{ ++ g_assert(reg < 16); ++ return vec_reg_offset(reg, 0, MO_64); ++} ++ ++static inline int freg32_offset(uint8_t reg) ++{ ++ g_assert(reg < 16); ++ return vec_reg_offset(reg, 0, MO_32); ++} ++ ++static TCGv_i64 load_reg(int reg) ++{ ++ TCGv_i64 r = tcg_temp_new_i64(); ++ tcg_gen_mov_i64(r, regs[reg]); ++ return r; ++} ++ ++static TCGv_i64 load_freg(int reg) ++{ ++ TCGv_i64 r = tcg_temp_new_i64(); ++ ++ tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg)); ++ return r; ++} ++ ++static TCGv_i64 load_freg32_i64(int reg) ++{ ++ TCGv_i64 r = tcg_temp_new_i64(); ++ ++ tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg)); ++ return r; ++} ++ ++static void store_reg(int reg, TCGv_i64 v) ++{ ++ tcg_gen_mov_i64(regs[reg], v); ++} ++ ++static void store_freg(int reg, TCGv_i64 v) ++{ ++ tcg_gen_st_i64(v, cpu_env, freg64_offset(reg)); ++} ++ ++static void store_reg32_i64(int reg, TCGv_i64 v) ++{ ++ /* 32 bit register writes keep the upper half */ ++ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); ++} ++ ++static void store_reg32h_i64(int reg, TCGv_i64 v) ++{ ++ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32); ++} ++ ++static void store_freg32_i64(int reg, TCGv_i64 v) ++{ ++ tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg)); ++} ++ ++static void return_low128(TCGv_i64 dest) ++{ ++ tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl)); ++} ++ ++static void update_psw_addr(DisasContext *s) ++{ ++ /* psw.addr */ ++ tcg_gen_movi_i64(psw_addr, s->base.pc_next); ++} ++ ++static void per_branch(DisasContext *s, bool to_next) ++{ ++#ifndef CONFIG_USER_ONLY ++ tcg_gen_movi_i64(gbea, s->base.pc_next); ++ ++ if (s->base.tb->flags & FLAG_MASK_PER) { ++ TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr; ++ gen_helper_per_branch(cpu_env, gbea, next_pc); ++ if (to_next) { ++ tcg_temp_free_i64(next_pc); ++ } ++ } ++#endif ++} ++ ++static void per_branch_cond(DisasContext *s, TCGCond cond, ++ TCGv_i64 arg1, TCGv_i64 arg2) ++{ ++#ifndef CONFIG_USER_ONLY ++ if (s->base.tb->flags & FLAG_MASK_PER) { ++ TCGLabel *lab = gen_new_label(); ++ tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab); ++ ++ tcg_gen_movi_i64(gbea, s->base.pc_next); ++ gen_helper_per_branch(cpu_env, gbea, psw_addr); ++ ++ gen_set_label(lab); ++ } else { ++ TCGv_i64 pc = tcg_const_i64(s->base.pc_next); ++ tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc); ++ tcg_temp_free_i64(pc); ++ } ++#endif ++} ++ ++static void per_breaking_event(DisasContext *s) ++{ ++ tcg_gen_movi_i64(gbea, s->base.pc_next); ++} ++ ++static void update_cc_op(DisasContext *s) ++{ ++ if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { ++ tcg_gen_movi_i32(cc_op, s->cc_op); ++ } ++} ++ ++static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) ++{ ++ return (uint64_t)cpu_lduw_code(env, pc); ++} ++ ++static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) ++{ ++ return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); ++} ++ ++static int get_mem_index(DisasContext *s) ++{ ++#ifdef CONFIG_USER_ONLY ++ return MMU_USER_IDX; ++#else ++ if (!(s->base.tb->flags & FLAG_MASK_DAT)) { ++ return MMU_REAL_IDX; ++ } ++ ++ switch (s->base.tb->flags & FLAG_MASK_ASC) { ++ case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: ++ return MMU_PRIMARY_IDX; ++ case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: ++ return MMU_SECONDARY_IDX; ++ case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: ++ return MMU_HOME_IDX; ++ default: ++ tcg_abort(); ++ break; ++ } ++#endif ++} ++ ++static void gen_exception(int excp) ++{ ++ TCGv_i32 tmp = tcg_const_i32(excp); ++ gen_helper_exception(cpu_env, tmp); ++ tcg_temp_free_i32(tmp); ++} ++ ++static void gen_program_exception(DisasContext *s, int code) ++{ ++ TCGv_i32 tmp; ++ ++ /* Remember what pgm exeption this was. */ ++ tmp = tcg_const_i32(code); ++ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); ++ tcg_temp_free_i32(tmp); ++ ++ tmp = tcg_const_i32(s->ilen); ++ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); ++ tcg_temp_free_i32(tmp); ++ ++ /* update the psw */ ++ update_psw_addr(s); ++ ++ /* Save off cc. */ ++ update_cc_op(s); ++ ++ /* Trigger exception. */ ++ gen_exception(EXCP_PGM); ++} ++ ++static inline void gen_illegal_opcode(DisasContext *s) ++{ ++ gen_program_exception(s, PGM_OPERATION); ++} ++ ++static inline void gen_data_exception(uint8_t dxc) ++{ ++ TCGv_i32 tmp = tcg_const_i32(dxc); ++ gen_helper_data_exception(cpu_env, tmp); ++ tcg_temp_free_i32(tmp); ++} ++ ++static inline void gen_trap(DisasContext *s) ++{ ++ /* Set DXC to 0xff */ ++ gen_data_exception(0xff); ++} ++ ++static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, ++ int64_t imm) ++{ ++ tcg_gen_addi_i64(dst, src, imm); ++ if (!(s->base.tb->flags & FLAG_MASK_64)) { ++ if (s->base.tb->flags & FLAG_MASK_32) { ++ tcg_gen_andi_i64(dst, dst, 0x7fffffff); ++ } else { ++ tcg_gen_andi_i64(dst, dst, 0x00ffffff); ++ } ++ } ++} ++ ++static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) ++{ ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ ++ /* ++ * Note that d2 is limited to 20 bits, signed. If we crop negative ++ * displacements early we create larger immedate addends. ++ */ ++ if (b2 && x2) { ++ tcg_gen_add_i64(tmp, regs[b2], regs[x2]); ++ gen_addi_and_wrap_i64(s, tmp, tmp, d2); ++ } else if (b2) { ++ gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); ++ } else if (x2) { ++ gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); ++ } else if (!(s->base.tb->flags & FLAG_MASK_64)) { ++ if (s->base.tb->flags & FLAG_MASK_32) { ++ tcg_gen_movi_i64(tmp, d2 & 0x7fffffff); ++ } else { ++ tcg_gen_movi_i64(tmp, d2 & 0x00ffffff); ++ } ++ } else { ++ tcg_gen_movi_i64(tmp, d2); ++ } ++ ++ return tmp; ++} ++ ++static inline bool live_cc_data(DisasContext *s) ++{ ++ return (s->cc_op != CC_OP_DYNAMIC ++ && s->cc_op != CC_OP_STATIC ++ && s->cc_op > 3); ++} ++ ++static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) ++{ ++ if (live_cc_data(s)) { ++ tcg_gen_discard_i64(cc_src); ++ tcg_gen_discard_i64(cc_dst); ++ tcg_gen_discard_i64(cc_vr); ++ } ++ s->cc_op = CC_OP_CONST0 + val; ++} ++ ++static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) ++{ ++ if (live_cc_data(s)) { ++ tcg_gen_discard_i64(cc_src); ++ tcg_gen_discard_i64(cc_vr); ++ } ++ tcg_gen_mov_i64(cc_dst, dst); ++ s->cc_op = op; ++} ++ ++static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, ++ TCGv_i64 dst) ++{ ++ if (live_cc_data(s)) { ++ tcg_gen_discard_i64(cc_vr); ++ } ++ tcg_gen_mov_i64(cc_src, src); ++ tcg_gen_mov_i64(cc_dst, dst); ++ s->cc_op = op; ++} ++ ++static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, ++ TCGv_i64 dst, TCGv_i64 vr) ++{ ++ tcg_gen_mov_i64(cc_src, src); ++ tcg_gen_mov_i64(cc_dst, dst); ++ tcg_gen_mov_i64(cc_vr, vr); ++ s->cc_op = op; ++} ++ ++static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NZ, val); ++} ++ ++/* CC value is in env->cc_op */ ++static void set_cc_static(DisasContext *s) ++{ ++ if (live_cc_data(s)) { ++ tcg_gen_discard_i64(cc_src); ++ tcg_gen_discard_i64(cc_dst); ++ tcg_gen_discard_i64(cc_vr); ++ } ++ s->cc_op = CC_OP_STATIC; ++} ++ ++/* calculates cc into cc_op */ ++static void gen_op_calc_cc(DisasContext *s) ++{ ++ TCGv_i32 local_cc_op = NULL; ++ TCGv_i64 dummy = NULL; ++ ++ switch (s->cc_op) { ++ default: ++ dummy = tcg_const_i64(0); ++ /* FALLTHRU */ ++ case CC_OP_ADD_64: ++ case CC_OP_SUB_64: ++ case CC_OP_ADD_32: ++ case CC_OP_SUB_32: ++ local_cc_op = tcg_const_i32(s->cc_op); ++ break; ++ case CC_OP_CONST0: ++ case CC_OP_CONST1: ++ case CC_OP_CONST2: ++ case CC_OP_CONST3: ++ case CC_OP_STATIC: ++ case CC_OP_DYNAMIC: ++ break; ++ } ++ ++ switch (s->cc_op) { ++ case CC_OP_CONST0: ++ case CC_OP_CONST1: ++ case CC_OP_CONST2: ++ case CC_OP_CONST3: ++ /* s->cc_op is the cc value */ ++ tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); ++ break; ++ case CC_OP_STATIC: ++ /* env->cc_op already is the cc value */ ++ break; ++ case CC_OP_NZ: ++ case CC_OP_ABS_64: ++ case CC_OP_NABS_64: ++ case CC_OP_ABS_32: ++ case CC_OP_NABS_32: ++ case CC_OP_LTGT0_32: ++ case CC_OP_LTGT0_64: ++ case CC_OP_COMP_32: ++ case CC_OP_COMP_64: ++ case CC_OP_NZ_F32: ++ case CC_OP_NZ_F64: ++ case CC_OP_FLOGR: ++ case CC_OP_LCBB: ++ case CC_OP_MULS_32: ++ /* 1 argument */ ++ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy); ++ break; ++ case CC_OP_ADDU: ++ case CC_OP_ICM: ++ case CC_OP_LTGT_32: ++ case CC_OP_LTGT_64: ++ case CC_OP_LTUGTU_32: ++ case CC_OP_LTUGTU_64: ++ case CC_OP_TM_32: ++ case CC_OP_TM_64: ++ case CC_OP_SLA_32: ++ case CC_OP_SLA_64: ++ case CC_OP_SUBU: ++ case CC_OP_NZ_F128: ++ case CC_OP_VC: ++ case CC_OP_MULS_64: ++ /* 2 arguments */ ++ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy); ++ break; ++ case CC_OP_ADD_64: ++ case CC_OP_SUB_64: ++ case CC_OP_ADD_32: ++ case CC_OP_SUB_32: ++ /* 3 arguments */ ++ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); ++ break; ++ case CC_OP_DYNAMIC: ++ /* unknown operation - assume 3 arguments and cc_op in env */ ++ gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr); ++ break; ++ default: ++ tcg_abort(); ++ } ++ ++ if (local_cc_op) { ++ tcg_temp_free_i32(local_cc_op); ++ } ++ if (dummy) { ++ tcg_temp_free_i64(dummy); ++ } ++ ++ /* We now have cc in cc_op as constant */ ++ set_cc_static(s); ++} ++ ++static bool use_goto_tb(DisasContext *s, uint64_t dest) ++{ ++ if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) { ++ return false; ++ } ++ return translator_use_goto_tb(&s->base, dest); ++} ++ ++static void account_noninline_branch(DisasContext *s, int cc_op) ++{ ++#ifdef DEBUG_INLINE_BRANCHES ++ inline_branch_miss[cc_op]++; ++#endif ++} ++ ++static void account_inline_branch(DisasContext *s, int cc_op) ++{ ++#ifdef DEBUG_INLINE_BRANCHES ++ inline_branch_hit[cc_op]++; ++#endif ++} ++ ++/* Table of mask values to comparison codes, given a comparison as input. ++ For such, CC=3 should not be possible. */ ++static const TCGCond ltgt_cond[16] = { ++ TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ ++ TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ ++ TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ ++ TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ ++ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ ++ TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ ++ TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ ++ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ ++}; ++ ++/* Table of mask values to comparison codes, given a logic op as input. ++ For such, only CC=0 and CC=1 should be possible. */ ++static const TCGCond nz_cond[16] = { ++ TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ ++ TCG_COND_NEVER, TCG_COND_NEVER, ++ TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ ++ TCG_COND_NE, TCG_COND_NE, ++ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ ++ TCG_COND_EQ, TCG_COND_EQ, ++ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ ++ TCG_COND_ALWAYS, TCG_COND_ALWAYS, ++}; ++ ++/* Interpret MASK in terms of S->CC_OP, and fill in C with all the ++ details required to generate a TCG comparison. */ ++static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) ++{ ++ TCGCond cond; ++ enum cc_op old_cc_op = s->cc_op; ++ ++ if (mask == 15 || mask == 0) { ++ c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); ++ c->u.s32.a = cc_op; ++ c->u.s32.b = cc_op; ++ c->g1 = c->g2 = true; ++ c->is_64 = false; ++ return; ++ } ++ ++ /* Find the TCG condition for the mask + cc op. */ ++ switch (old_cc_op) { ++ case CC_OP_LTGT0_32: ++ case CC_OP_LTGT0_64: ++ case CC_OP_LTGT_32: ++ case CC_OP_LTGT_64: ++ cond = ltgt_cond[mask]; ++ if (cond == TCG_COND_NEVER) { ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_LTUGTU_32: ++ case CC_OP_LTUGTU_64: ++ cond = tcg_unsigned_cond(ltgt_cond[mask]); ++ if (cond == TCG_COND_NEVER) { ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_NZ: ++ cond = nz_cond[mask]; ++ if (cond == TCG_COND_NEVER) { ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_TM_32: ++ case CC_OP_TM_64: ++ switch (mask) { ++ case 8: ++ cond = TCG_COND_EQ; ++ break; ++ case 4 | 2 | 1: ++ cond = TCG_COND_NE; ++ break; ++ default: ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_ICM: ++ switch (mask) { ++ case 8: ++ cond = TCG_COND_EQ; ++ break; ++ case 4 | 2 | 1: ++ case 4 | 2: ++ cond = TCG_COND_NE; ++ break; ++ default: ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_FLOGR: ++ switch (mask & 0xa) { ++ case 8: /* src == 0 -> no one bit found */ ++ cond = TCG_COND_EQ; ++ break; ++ case 2: /* src != 0 -> one bit found */ ++ cond = TCG_COND_NE; ++ break; ++ default: ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ case CC_OP_ADDU: ++ case CC_OP_SUBU: ++ switch (mask) { ++ case 8 | 2: /* result == 0 */ ++ cond = TCG_COND_EQ; ++ break; ++ case 4 | 1: /* result != 0 */ ++ cond = TCG_COND_NE; ++ break; ++ case 8 | 4: /* !carry (borrow) */ ++ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE; ++ break; ++ case 2 | 1: /* carry (!borrow) */ ++ cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ; ++ break; ++ default: ++ goto do_dynamic; ++ } ++ account_inline_branch(s, old_cc_op); ++ break; ++ ++ default: ++ do_dynamic: ++ /* Calculate cc value. */ ++ gen_op_calc_cc(s); ++ /* FALLTHRU */ ++ ++ case CC_OP_STATIC: ++ /* Jump based on CC. We'll load up the real cond below; ++ the assignment here merely avoids a compiler warning. */ ++ account_noninline_branch(s, old_cc_op); ++ old_cc_op = CC_OP_STATIC; ++ cond = TCG_COND_NEVER; ++ break; ++ } ++ ++ /* Load up the arguments of the comparison. */ ++ c->is_64 = true; ++ c->g1 = c->g2 = false; ++ switch (old_cc_op) { ++ case CC_OP_LTGT0_32: ++ c->is_64 = false; ++ c->u.s32.a = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); ++ c->u.s32.b = tcg_const_i32(0); ++ break; ++ case CC_OP_LTGT_32: ++ case CC_OP_LTUGTU_32: ++ c->is_64 = false; ++ c->u.s32.a = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); ++ c->u.s32.b = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); ++ break; ++ ++ case CC_OP_LTGT0_64: ++ case CC_OP_NZ: ++ case CC_OP_FLOGR: ++ c->u.s64.a = cc_dst; ++ c->u.s64.b = tcg_const_i64(0); ++ c->g1 = true; ++ break; ++ case CC_OP_LTGT_64: ++ case CC_OP_LTUGTU_64: ++ c->u.s64.a = cc_src; ++ c->u.s64.b = cc_dst; ++ c->g1 = c->g2 = true; ++ break; ++ ++ case CC_OP_TM_32: ++ case CC_OP_TM_64: ++ case CC_OP_ICM: ++ c->u.s64.a = tcg_temp_new_i64(); ++ c->u.s64.b = tcg_const_i64(0); ++ tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst); ++ break; ++ ++ case CC_OP_ADDU: ++ case CC_OP_SUBU: ++ c->is_64 = true; ++ c->u.s64.b = tcg_const_i64(0); ++ c->g1 = true; ++ switch (mask) { ++ case 8 | 2: ++ case 4 | 1: /* result */ ++ c->u.s64.a = cc_dst; ++ break; ++ case 8 | 4: ++ case 2 | 1: /* carry */ ++ c->u.s64.a = cc_src; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ break; ++ ++ case CC_OP_STATIC: ++ c->is_64 = false; ++ c->u.s32.a = cc_op; ++ c->g1 = true; ++ switch (mask) { ++ case 0x8 | 0x4 | 0x2: /* cc != 3 */ ++ cond = TCG_COND_NE; ++ c->u.s32.b = tcg_const_i32(3); ++ break; ++ case 0x8 | 0x4 | 0x1: /* cc != 2 */ ++ cond = TCG_COND_NE; ++ c->u.s32.b = tcg_const_i32(2); ++ break; ++ case 0x8 | 0x2 | 0x1: /* cc != 1 */ ++ cond = TCG_COND_NE; ++ c->u.s32.b = tcg_const_i32(1); ++ break; ++ case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ ++ cond = TCG_COND_EQ; ++ c->g1 = false; ++ c->u.s32.a = tcg_temp_new_i32(); ++ c->u.s32.b = tcg_const_i32(0); ++ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); ++ break; ++ case 0x8 | 0x4: /* cc < 2 */ ++ cond = TCG_COND_LTU; ++ c->u.s32.b = tcg_const_i32(2); ++ break; ++ case 0x8: /* cc == 0 */ ++ cond = TCG_COND_EQ; ++ c->u.s32.b = tcg_const_i32(0); ++ break; ++ case 0x4 | 0x2 | 0x1: /* cc != 0 */ ++ cond = TCG_COND_NE; ++ c->u.s32.b = tcg_const_i32(0); ++ break; ++ case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ ++ cond = TCG_COND_NE; ++ c->g1 = false; ++ c->u.s32.a = tcg_temp_new_i32(); ++ c->u.s32.b = tcg_const_i32(0); ++ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); ++ break; ++ case 0x4: /* cc == 1 */ ++ cond = TCG_COND_EQ; ++ c->u.s32.b = tcg_const_i32(1); ++ break; ++ case 0x2 | 0x1: /* cc > 1 */ ++ cond = TCG_COND_GTU; ++ c->u.s32.b = tcg_const_i32(1); ++ break; ++ case 0x2: /* cc == 2 */ ++ cond = TCG_COND_EQ; ++ c->u.s32.b = tcg_const_i32(2); ++ break; ++ case 0x1: /* cc == 3 */ ++ cond = TCG_COND_EQ; ++ c->u.s32.b = tcg_const_i32(3); ++ break; ++ default: ++ /* CC is masked by something else: (8 >> cc) & mask. */ ++ cond = TCG_COND_NE; ++ c->g1 = false; ++ c->u.s32.a = tcg_const_i32(8); ++ c->u.s32.b = tcg_const_i32(0); ++ tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op); ++ tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask); ++ break; ++ } ++ break; ++ ++ default: ++ abort(); ++ } ++ c->cond = cond; ++} ++ ++static void free_compare(DisasCompare *c) ++{ ++ if (!c->g1) { ++ if (c->is_64) { ++ tcg_temp_free_i64(c->u.s64.a); ++ } else { ++ tcg_temp_free_i32(c->u.s32.a); ++ } ++ } ++ if (!c->g2) { ++ if (c->is_64) { ++ tcg_temp_free_i64(c->u.s64.b); ++ } else { ++ tcg_temp_free_i32(c->u.s32.b); ++ } ++ } ++} ++ ++/* ====================================================================== */ ++/* Define the insn format enumeration. */ ++#define F0(N) FMT_##N, ++#define F1(N, X1) F0(N) ++#define F2(N, X1, X2) F0(N) ++#define F3(N, X1, X2, X3) F0(N) ++#define F4(N, X1, X2, X3, X4) F0(N) ++#define F5(N, X1, X2, X3, X4, X5) F0(N) ++#define F6(N, X1, X2, X3, X4, X5, X6) F0(N) ++ ++typedef enum { ++#include "insn-format.def" ++} DisasFormat; ++ ++#undef F0 ++#undef F1 ++#undef F2 ++#undef F3 ++#undef F4 ++#undef F5 ++#undef F6 ++ ++/* This is the way fields are to be accessed out of DisasFields. */ ++#define have_field(S, F) have_field1((S), FLD_O_##F) ++#define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) ++ ++static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) ++{ ++ return (s->fields.presentO >> c) & 1; ++} ++ ++static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, ++ enum DisasFieldIndexC c) ++{ ++ assert(have_field1(s, o)); ++ return s->fields.c[c]; ++} ++ ++/* Describe the layout of each field in each format. */ ++typedef struct DisasField { ++ unsigned int beg:8; ++ unsigned int size:8; ++ unsigned int type:2; ++ unsigned int indexC:6; ++ enum DisasFieldIndexO indexO:8; ++} DisasField; ++ ++typedef struct DisasFormatInfo { ++ DisasField op[NUM_C_FIELD]; ++} DisasFormatInfo; ++ ++#define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } ++#define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } ++#define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } ++#define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ ++ { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } ++#define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ ++ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ ++ { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } ++#define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ ++ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } ++#define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ ++ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ ++ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } ++#define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } ++#define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } ++ ++#define F0(N) { { } }, ++#define F1(N, X1) { { X1 } }, ++#define F2(N, X1, X2) { { X1, X2 } }, ++#define F3(N, X1, X2, X3) { { X1, X2, X3 } }, ++#define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, ++#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, ++#define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, ++ ++static const DisasFormatInfo format_info[] = { ++#include "insn-format.def" ++}; ++ ++#undef F0 ++#undef F1 ++#undef F2 ++#undef F3 ++#undef F4 ++#undef F5 ++#undef F6 ++#undef R ++#undef M ++#undef V ++#undef BD ++#undef BXD ++#undef BDL ++#undef BXDL ++#undef I ++#undef L ++ ++/* Generally, we'll extract operands into this structures, operate upon ++ them, and store them back. See the "in1", "in2", "prep", "wout" sets ++ of routines below for more details. */ ++typedef struct { ++ bool g_out, g_out2, g_in1, g_in2; ++ TCGv_i64 out, out2, in1, in2; ++ TCGv_i64 addr1; ++} DisasOps; ++ ++/* Instructions can place constraints on their operands, raising specification ++ exceptions if they are violated. To make this easy to automate, each "in1", ++ "in2", "prep", "wout" helper will have a SPEC_ define that equals one ++ of the following, or 0. To make this easy to document, we'll put the ++ SPEC_ defines next to . */ ++ ++#define SPEC_r1_even 1 ++#define SPEC_r2_even 2 ++#define SPEC_r3_even 4 ++#define SPEC_r1_f128 8 ++#define SPEC_r2_f128 16 ++ ++/* Return values from translate_one, indicating the state of the TB. */ ++ ++/* We are not using a goto_tb (for whatever reason), but have updated ++ the PC (for whatever reason), so there's no need to do it again on ++ exiting the TB. */ ++#define DISAS_PC_UPDATED DISAS_TARGET_0 ++ ++/* We have emitted one or more goto_tb. No fixup required. */ ++#define DISAS_GOTO_TB DISAS_TARGET_1 ++ ++/* We have updated the PC and CC values. */ ++#define DISAS_PC_CC_UPDATED DISAS_TARGET_2 ++ ++/* We are exiting the TB, but have neither emitted a goto_tb, nor ++ updated the PC for the next instruction to be executed. */ ++#define DISAS_PC_STALE DISAS_TARGET_3 ++ ++/* We are exiting the TB to the main loop. */ ++#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 ++ ++ ++/* Instruction flags */ ++#define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ ++#define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ ++#define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ ++#define IF_BFP 0x0008 /* binary floating point instruction */ ++#define IF_DFP 0x0010 /* decimal floating point instruction */ ++#define IF_PRIV 0x0020 /* privileged instruction */ ++#define IF_VEC 0x0040 /* vector instruction */ ++#define IF_IO 0x0080 /* input/output instruction */ ++ ++struct DisasInsn { ++ unsigned opc:16; ++ unsigned flags:16; ++ DisasFormat fmt:8; ++ unsigned fac:8; ++ unsigned spec:8; ++ ++ const char *name; ++ ++ /* Pre-process arguments before HELP_OP. */ ++ void (*help_in1)(DisasContext *, DisasOps *); ++ void (*help_in2)(DisasContext *, DisasOps *); ++ void (*help_prep)(DisasContext *, DisasOps *); ++ ++ /* ++ * Post-process output after HELP_OP. ++ * Note that these are not called if HELP_OP returns DISAS_NORETURN. ++ */ ++ void (*help_wout)(DisasContext *, DisasOps *); ++ void (*help_cout)(DisasContext *, DisasOps *); ++ ++ /* Implement the operation itself. */ ++ DisasJumpType (*help_op)(DisasContext *, DisasOps *); ++ ++ uint64_t data; ++}; ++ ++/* ====================================================================== */ ++/* Miscellaneous helpers, used by several operations. */ ++ ++static void help_l2_shift(DisasContext *s, DisasOps *o, int mask) ++{ ++ int b2 = get_field(s, b2); ++ int d2 = get_field(s, d2); ++ ++ if (b2 == 0) { ++ o->in2 = tcg_const_i64(d2 & mask); ++ } else { ++ o->in2 = get_address(s, 0, b2, d2); ++ tcg_gen_andi_i64(o->in2, o->in2, mask); ++ } ++} ++ ++static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) ++{ ++ if (dest == s->pc_tmp) { ++ per_branch(s, true); ++ return DISAS_NEXT; ++ } ++ if (use_goto_tb(s, dest)) { ++ update_cc_op(s); ++ per_breaking_event(s); ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(psw_addr, dest); ++ tcg_gen_exit_tb(s->base.tb, 0); ++ return DISAS_GOTO_TB; ++ } else { ++ tcg_gen_movi_i64(psw_addr, dest); ++ per_branch(s, false); ++ return DISAS_PC_UPDATED; ++ } ++} ++ ++static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, ++ bool is_imm, int imm, TCGv_i64 cdest) ++{ ++ DisasJumpType ret; ++ uint64_t dest = s->base.pc_next + 2 * imm; ++ TCGLabel *lab; ++ ++ /* Take care of the special cases first. */ ++ if (c->cond == TCG_COND_NEVER) { ++ ret = DISAS_NEXT; ++ goto egress; ++ } ++ if (is_imm) { ++ if (dest == s->pc_tmp) { ++ /* Branch to next. */ ++ per_branch(s, true); ++ ret = DISAS_NEXT; ++ goto egress; ++ } ++ if (c->cond == TCG_COND_ALWAYS) { ++ ret = help_goto_direct(s, dest); ++ goto egress; ++ } ++ } else { ++ if (!cdest) { ++ /* E.g. bcr %r0 -> no branch. */ ++ ret = DISAS_NEXT; ++ goto egress; ++ } ++ if (c->cond == TCG_COND_ALWAYS) { ++ tcg_gen_mov_i64(psw_addr, cdest); ++ per_branch(s, false); ++ ret = DISAS_PC_UPDATED; ++ goto egress; ++ } ++ } ++ ++ if (use_goto_tb(s, s->pc_tmp)) { ++ if (is_imm && use_goto_tb(s, dest)) { ++ /* Both exits can use goto_tb. */ ++ update_cc_op(s); ++ ++ lab = gen_new_label(); ++ if (c->is_64) { ++ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); ++ } else { ++ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); ++ } ++ ++ /* Branch not taken. */ ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(psw_addr, s->pc_tmp); ++ tcg_gen_exit_tb(s->base.tb, 0); ++ ++ /* Branch taken. */ ++ gen_set_label(lab); ++ per_breaking_event(s); ++ tcg_gen_goto_tb(1); ++ tcg_gen_movi_i64(psw_addr, dest); ++ tcg_gen_exit_tb(s->base.tb, 1); ++ ++ ret = DISAS_GOTO_TB; ++ } else { ++ /* Fallthru can use goto_tb, but taken branch cannot. */ ++ /* Store taken branch destination before the brcond. This ++ avoids having to allocate a new local temp to hold it. ++ We'll overwrite this in the not taken case anyway. */ ++ if (!is_imm) { ++ tcg_gen_mov_i64(psw_addr, cdest); ++ } ++ ++ lab = gen_new_label(); ++ if (c->is_64) { ++ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); ++ } else { ++ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); ++ } ++ ++ /* Branch not taken. */ ++ update_cc_op(s); ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(psw_addr, s->pc_tmp); ++ tcg_gen_exit_tb(s->base.tb, 0); ++ ++ gen_set_label(lab); ++ if (is_imm) { ++ tcg_gen_movi_i64(psw_addr, dest); ++ } ++ per_breaking_event(s); ++ ret = DISAS_PC_UPDATED; ++ } ++ } else { ++ /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. ++ Most commonly we're single-stepping or some other condition that ++ disables all use of goto_tb. Just update the PC and exit. */ ++ ++ TCGv_i64 next = tcg_const_i64(s->pc_tmp); ++ if (is_imm) { ++ cdest = tcg_const_i64(dest); ++ } ++ ++ if (c->is_64) { ++ tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b, ++ cdest, next); ++ per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); ++ } else { ++ TCGv_i32 t0 = tcg_temp_new_i32(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i64 z = tcg_const_i64(0); ++ tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); ++ tcg_gen_extu_i32_i64(t1, t0); ++ tcg_temp_free_i32(t0); ++ tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); ++ per_branch_cond(s, TCG_COND_NE, t1, z); ++ tcg_temp_free_i64(t1); ++ tcg_temp_free_i64(z); ++ } ++ ++ if (is_imm) { ++ tcg_temp_free_i64(cdest); ++ } ++ tcg_temp_free_i64(next); ++ ++ ret = DISAS_PC_UPDATED; ++ } ++ ++ egress: ++ free_compare(c); ++ return ret; ++} ++ ++/* ====================================================================== */ ++/* The operations. These perform the bulk of the work for any insn, ++ usually after the operands have been loaded and output initialized. */ ++ ++static DisasJumpType op_abs(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_abs_i64(o->out, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull); ++ tcg_gen_mov_i64(o->out2, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_add(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_add_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_addu64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_movi_i64(cc_src, 0); ++ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); ++ return DISAS_NEXT; ++} ++ ++/* Compute carry into cc_src. */ ++static void compute_carry(DisasContext *s) ++{ ++ switch (s->cc_op) { ++ case CC_OP_ADDU: ++ /* The carry value is already in cc_src (1,0). */ ++ break; ++ case CC_OP_SUBU: ++ tcg_gen_addi_i64(cc_src, cc_src, 1); ++ break; ++ default: ++ gen_op_calc_cc(s); ++ /* fall through */ ++ case CC_OP_STATIC: ++ /* The carry flag is the msb of CC; compute into cc_src. */ ++ tcg_gen_extu_i32_i64(cc_src, cc_op); ++ tcg_gen_shri_i64(cc_src, cc_src, 1); ++ break; ++ } ++} ++ ++static DisasJumpType op_addc32(DisasContext *s, DisasOps *o) ++{ ++ compute_carry(s); ++ tcg_gen_add_i64(o->out, o->in1, o->in2); ++ tcg_gen_add_i64(o->out, o->out, cc_src); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_addc64(DisasContext *s, DisasOps *o) ++{ ++ compute_carry(s); ++ ++ TCGv_i64 zero = tcg_const_i64(0); ++ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero); ++ tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); ++ tcg_temp_free_i64(zero); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_asi(DisasContext *s, DisasOps *o) ++{ ++ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); ++ ++ o->in1 = tcg_temp_new_i64(); ++ if (non_atomic) { ++ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); ++ } else { ++ /* Perform the atomic addition in memory. */ ++ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), ++ s->insn->data); ++ } ++ ++ /* Recompute also for atomic case: needed for setting CC. */ ++ tcg_gen_add_i64(o->out, o->in1, o->in2); ++ ++ if (non_atomic) { ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o) ++{ ++ bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); ++ ++ o->in1 = tcg_temp_new_i64(); ++ if (non_atomic) { ++ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); ++ } else { ++ /* Perform the atomic addition in memory. */ ++ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), ++ s->insn->data); ++ } ++ ++ /* Recompute also for atomic case: needed for setting CC. */ ++ tcg_gen_movi_i64(cc_src, 0); ++ tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); ++ ++ if (non_atomic) { ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_aeb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_adb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_adb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_axb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_and(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_and_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_andi(DisasContext *s, DisasOps *o) ++{ ++ int shift = s->insn->data & 0xff; ++ int size = s->insn->data >> 8; ++ uint64_t mask = ((1ull << size) - 1) << shift; ++ ++ assert(!o->g_in2); ++ tcg_gen_shli_i64(o->in2, o->in2, shift); ++ tcg_gen_ori_i64(o->in2, o->in2, ~mask); ++ tcg_gen_and_i64(o->out, o->in1, o->in2); ++ ++ /* Produce the CC from only the bits manipulated. */ ++ tcg_gen_andi_i64(cc_dst, o->out, mask); ++ set_cc_nz_u64(s, cc_dst); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ni(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); ++ } else { ++ /* Perform the atomic operation in memory. */ ++ tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s), ++ s->insn->data); ++ } ++ ++ /* Recompute also for atomic case: needed for setting CC. */ ++ tcg_gen_and_i64(o->out, o->in1, o->in2); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_bas(DisasContext *s, DisasOps *o) ++{ ++ pc_to_link_info(o->out, s, s->pc_tmp); ++ if (o->in2) { ++ tcg_gen_mov_i64(psw_addr, o->in2); ++ per_branch(s, false); ++ return DISAS_PC_UPDATED; ++ } else { ++ return DISAS_NEXT; ++ } ++} ++ ++static void save_link_info(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t; ++ ++ if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { ++ pc_to_link_info(o->out, s, s->pc_tmp); ++ return; ++ } ++ gen_op_calc_cc(s); ++ tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull); ++ tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); ++ t = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(t, psw_mask, 16); ++ tcg_gen_andi_i64(t, t, 0x0f000000); ++ tcg_gen_or_i64(o->out, o->out, t); ++ tcg_gen_extu_i32_i64(t, cc_op); ++ tcg_gen_shli_i64(t, t, 28); ++ tcg_gen_or_i64(o->out, o->out, t); ++ tcg_temp_free_i64(t); ++} ++ ++static DisasJumpType op_bal(DisasContext *s, DisasOps *o) ++{ ++ save_link_info(s, o); ++ if (o->in2) { ++ tcg_gen_mov_i64(psw_addr, o->in2); ++ per_branch(s, false); ++ return DISAS_PC_UPDATED; ++ } else { ++ return DISAS_NEXT; ++ } ++} ++ ++static DisasJumpType op_basi(DisasContext *s, DisasOps *o) ++{ ++ pc_to_link_info(o->out, s, s->pc_tmp); ++ return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2)); ++} ++ ++static DisasJumpType op_bc(DisasContext *s, DisasOps *o) ++{ ++ int m1 = get_field(s, m1); ++ bool is_imm = have_field(s, i2); ++ int imm = is_imm ? get_field(s, i2) : 0; ++ DisasCompare c; ++ ++ /* BCR with R2 = 0 causes no branching */ ++ if (have_field(s, r2) && get_field(s, r2) == 0) { ++ if (m1 == 14) { ++ /* Perform serialization */ ++ /* FIXME: check for fast-BCR-serialization facility */ ++ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); ++ } ++ if (m1 == 15) { ++ /* Perform serialization */ ++ /* FIXME: perform checkpoint-synchronisation */ ++ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); ++ } ++ return DISAS_NEXT; ++ } ++ ++ disas_jcc(s, &c, m1); ++ return help_branch(s, &c, is_imm, imm, o->in2); ++} ++ ++static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ bool is_imm = have_field(s, i2); ++ int imm = is_imm ? get_field(s, i2) : 0; ++ DisasCompare c; ++ TCGv_i64 t; ++ ++ c.cond = TCG_COND_NE; ++ c.is_64 = false; ++ c.g1 = false; ++ c.g2 = false; ++ ++ t = tcg_temp_new_i64(); ++ tcg_gen_subi_i64(t, regs[r1], 1); ++ store_reg32_i64(r1, t); ++ c.u.s32.a = tcg_temp_new_i32(); ++ c.u.s32.b = tcg_const_i32(0); ++ tcg_gen_extrl_i64_i32(c.u.s32.a, t); ++ tcg_temp_free_i64(t); ++ ++ return help_branch(s, &c, is_imm, imm, o->in2); ++} ++ ++static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int imm = get_field(s, i2); ++ DisasCompare c; ++ TCGv_i64 t; ++ ++ c.cond = TCG_COND_NE; ++ c.is_64 = false; ++ c.g1 = false; ++ c.g2 = false; ++ ++ t = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(t, regs[r1], 32); ++ tcg_gen_subi_i64(t, t, 1); ++ store_reg32h_i64(r1, t); ++ c.u.s32.a = tcg_temp_new_i32(); ++ c.u.s32.b = tcg_const_i32(0); ++ tcg_gen_extrl_i64_i32(c.u.s32.a, t); ++ tcg_temp_free_i64(t); ++ ++ return help_branch(s, &c, 1, imm, o->in2); ++} ++ ++static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ bool is_imm = have_field(s, i2); ++ int imm = is_imm ? get_field(s, i2) : 0; ++ DisasCompare c; ++ ++ c.cond = TCG_COND_NE; ++ c.is_64 = true; ++ c.g1 = true; ++ c.g2 = false; ++ ++ tcg_gen_subi_i64(regs[r1], regs[r1], 1); ++ c.u.s64.a = regs[r1]; ++ c.u.s64.b = tcg_const_i64(0); ++ ++ return help_branch(s, &c, is_imm, imm, o->in2); ++} ++ ++static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ bool is_imm = have_field(s, i2); ++ int imm = is_imm ? get_field(s, i2) : 0; ++ DisasCompare c; ++ TCGv_i64 t; ++ ++ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); ++ c.is_64 = false; ++ c.g1 = false; ++ c.g2 = false; ++ ++ t = tcg_temp_new_i64(); ++ tcg_gen_add_i64(t, regs[r1], regs[r3]); ++ c.u.s32.a = tcg_temp_new_i32(); ++ c.u.s32.b = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(c.u.s32.a, t); ++ tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); ++ store_reg32_i64(r1, t); ++ tcg_temp_free_i64(t); ++ ++ return help_branch(s, &c, is_imm, imm, o->in2); ++} ++ ++static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ bool is_imm = have_field(s, i2); ++ int imm = is_imm ? get_field(s, i2) : 0; ++ DisasCompare c; ++ ++ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); ++ c.is_64 = true; ++ ++ if (r1 == (r3 | 1)) { ++ c.u.s64.b = load_reg(r3 | 1); ++ c.g2 = false; ++ } else { ++ c.u.s64.b = regs[r3 | 1]; ++ c.g2 = true; ++ } ++ ++ tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); ++ c.u.s64.a = regs[r1]; ++ c.g1 = true; ++ ++ return help_branch(s, &c, is_imm, imm, o->in2); ++} ++ ++static DisasJumpType op_cj(DisasContext *s, DisasOps *o) ++{ ++ int imm, m3 = get_field(s, m3); ++ bool is_imm; ++ DisasCompare c; ++ ++ c.cond = ltgt_cond[m3]; ++ if (s->insn->data) { ++ c.cond = tcg_unsigned_cond(c.cond); ++ } ++ c.is_64 = c.g1 = c.g2 = true; ++ c.u.s64.a = o->in1; ++ c.u.s64.b = o->in2; ++ ++ is_imm = have_field(s, i4); ++ if (is_imm) { ++ imm = get_field(s, i4); ++ } else { ++ imm = 0; ++ o->out = get_address(s, 0, get_field(s, b4), ++ get_field(s, d4)); ++ } ++ ++ return help_branch(s, &c, is_imm, imm, o->out); ++} ++ ++static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, ++ bool m4_with_fpe) ++{ ++ const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT); ++ uint8_t m3 = get_field(s, m3); ++ uint8_t m4 = get_field(s, m4); ++ ++ /* m3 field was introduced with FPE */ ++ if (!fpe && m3_with_fpe) { ++ m3 = 0; ++ } ++ /* m4 field was introduced with FPE */ ++ if (!fpe && m4_with_fpe) { ++ m4 = 0; ++ } ++ ++ /* Check for valid rounding modes. Mode 3 was introduced later. */ ++ if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return NULL; ++ } ++ ++ return tcg_const_i32(deposit32(m3, 4, 4, m4)); ++} ++ ++static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cfeb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cfdb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cgeb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cgdb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clfeb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clfdb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clgeb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clgdb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cegb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cdgb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cxgb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_celgb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cdlgb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, false); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_cxlgb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) ++{ ++ int r2 = get_field(s, r2); ++ TCGv_i64 len = tcg_temp_new_i64(); ++ ++ gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]); ++ set_cc_static(s); ++ return_low128(o->out); ++ ++ tcg_gen_add_i64(regs[r2], regs[r2], len); ++ tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); ++ tcg_temp_free_i64(len); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clc(DisasContext *s, DisasOps *o) ++{ ++ int l = get_field(s, l1); ++ TCGv_i32 vl; ++ ++ switch (l + 1) { ++ case 1: ++ tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); ++ tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); ++ break; ++ case 2: ++ tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); ++ tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); ++ break; ++ case 4: ++ tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); ++ tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); ++ break; ++ case 8: ++ tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); ++ tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); ++ break; ++ default: ++ vl = tcg_const_i32(l); ++ gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); ++ tcg_temp_free_i32(vl); ++ set_cc_static(s); ++ return DISAS_NEXT; ++ } ++ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r2 = get_field(s, r2); ++ TCGv_i32 t1, t2; ++ ++ /* r1 and r2 must be even. */ ++ if (r1 & 1 || r2 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t2 = tcg_const_i32(r2); ++ gen_helper_clcl(cc_op, cpu_env, t1, t2); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i32 t1, t3; ++ ++ /* r1 and r3 must be even. */ ++ if (r1 & 1 || r3 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t3 = tcg_const_i32(r3); ++ gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t3); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i32 t1, t3; ++ ++ /* r1 and r3 must be even. */ ++ if (r1 & 1 || r3 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t3 = tcg_const_i32(r3); ++ gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t3); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clm(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m3 = tcg_const_i32(get_field(s, m3)); ++ TCGv_i32 t1 = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(t1, o->in1); ++ gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); ++ set_cc_static(s); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(m3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_clst(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2); ++ set_cc_static(s); ++ return_low128(o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cps(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); ++ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); ++ tcg_gen_or_i64(o->out, o->out, t); ++ tcg_temp_free_i64(t); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cs(DisasContext *s, DisasOps *o) ++{ ++ int d2 = get_field(s, d2); ++ int b2 = get_field(s, b2); ++ TCGv_i64 addr, cc; ++ ++ /* Note that in1 = R3 (new value) and ++ in2 = (zero-extended) R1 (expected value). */ ++ ++ addr = get_address(s, 0, b2, d2); ++ tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, ++ get_mem_index(s), s->insn->data | MO_ALIGN); ++ tcg_temp_free_i64(addr); ++ ++ /* Are the memory and expected values (un)equal? Note that this setcond ++ produces the output CC value, thus the NE sense of the test. */ ++ cc = tcg_temp_new_i64(); ++ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); ++ tcg_gen_extrl_i64_i32(cc_op, cc); ++ tcg_temp_free_i64(cc); ++ set_cc_static(s); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ int d2 = get_field(s, d2); ++ int b2 = get_field(s, b2); ++ DisasJumpType ret = DISAS_NEXT; ++ TCGv_i64 addr; ++ TCGv_i32 t_r1, t_r3; ++ ++ /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ ++ addr = get_address(s, 0, b2, d2); ++ t_r1 = tcg_const_i32(r1); ++ t_r3 = tcg_const_i32(r3); ++ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { ++ gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); ++ } else if (HAVE_CMPXCHG128) { ++ gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3); ++ } else { ++ gen_helper_exit_atomic(cpu_env); ++ ret = DISAS_NORETURN; ++ } ++ tcg_temp_free_i64(addr); ++ tcg_temp_free_i32(t_r1); ++ tcg_temp_free_i32(t_r3); ++ ++ set_cc_static(s); ++ return ret; ++} ++ ++static DisasJumpType op_csst(DisasContext *s, DisasOps *o) ++{ ++ int r3 = get_field(s, r3); ++ TCGv_i32 t_r3 = tcg_const_i32(r3); ++ ++ if (tb_cflags(s->base.tb) & CF_PARALLEL) { ++ gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2); ++ } else { ++ gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2); ++ } ++ tcg_temp_free_i32(t_r3); ++ ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_csp(DisasContext *s, DisasOps *o) ++{ ++ MemOp mop = s->insn->data; ++ TCGv_i64 addr, old, cc; ++ TCGLabel *lab = gen_new_label(); ++ ++ /* Note that in1 = R1 (zero-extended expected value), ++ out = R1 (original reg), out2 = R1+1 (new value). */ ++ ++ addr = tcg_temp_new_i64(); ++ old = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); ++ tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, ++ get_mem_index(s), mop | MO_ALIGN); ++ tcg_temp_free_i64(addr); ++ ++ /* Are the memory and expected values (un)equal? */ ++ cc = tcg_temp_new_i64(); ++ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old); ++ tcg_gen_extrl_i64_i32(cc_op, cc); ++ ++ /* Write back the output now, so that it happens before the ++ following branch, so that we don't need local temps. */ ++ if ((mop & MO_SIZE) == MO_32) { ++ tcg_gen_deposit_i64(o->out, o->out, old, 0, 32); ++ } else { ++ tcg_gen_mov_i64(o->out, old); ++ } ++ tcg_temp_free_i64(old); ++ ++ /* If the comparison was equal, and the LSB of R2 was set, ++ then we need to flush the TLB (for all cpus). */ ++ tcg_gen_xori_i64(cc, cc, 1); ++ tcg_gen_and_i64(cc, cc, o->in2); ++ tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); ++ tcg_temp_free_i64(cc); ++ ++ gen_helper_purge(cpu_env); ++ gen_set_label(lab); ++ ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i32 t2 = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(t2, o->in1); ++ gen_helper_cvd(t1, t2); ++ tcg_temp_free_i32(t2); ++ tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); ++ tcg_temp_free_i64(t1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ct(DisasContext *s, DisasOps *o) ++{ ++ int m3 = get_field(s, m3); ++ TCGLabel *lab = gen_new_label(); ++ TCGCond c; ++ ++ c = tcg_invert_cond(ltgt_cond[m3]); ++ if (s->insn->data) { ++ c = tcg_unsigned_cond(c); ++ } ++ tcg_gen_brcond_i64(c, o->in1, o->in2, lab); ++ ++ /* Trap. */ ++ gen_trap(s); ++ ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) ++{ ++ int m3 = get_field(s, m3); ++ int r1 = get_field(s, r1); ++ int r2 = get_field(s, r2); ++ TCGv_i32 tr1, tr2, chk; ++ ++ /* R1 and R2 must both be even. */ ++ if ((r1 | r2) & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ if (!s390_has_feat(S390_FEAT_ETF3_ENH)) { ++ m3 = 0; ++ } ++ ++ tr1 = tcg_const_i32(r1); ++ tr2 = tcg_const_i32(r2); ++ chk = tcg_const_i32(m3); ++ ++ switch (s->insn->data) { ++ case 12: ++ gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ case 14: ++ gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ case 21: ++ gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ case 24: ++ gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ case 41: ++ gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ case 42: ++ gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ tcg_temp_free_i32(tr1); ++ tcg_temp_free_i32(tr2); ++ tcg_temp_free_i32(chk); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_diag(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ TCGv_i32 func_code = tcg_const_i32(get_field(s, i2)); ++ ++ gen_helper_diag(cpu_env, r1, r3, func_code); ++ ++ tcg_temp_free_i32(func_code); ++ tcg_temp_free_i32(r3); ++ tcg_temp_free_i32(r1); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2); ++ return_low128(o->out); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2); ++ return_low128(o->out); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2); ++ return_low128(o->out); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2); ++ return_low128(o->out); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_deb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_deb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_ddb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ear(DisasContext *s, DisasOps *o) ++{ ++ int r2 = get_field(s, r2); ++ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2])); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) ++{ ++ /* No cache information provided. */ ++ tcg_gen_movi_i64(o->out, -1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r2 = get_field(s, r2); ++ TCGv_i64 t = tcg_temp_new_i64(); ++ ++ /* Note the "subsequently" in the PoO, which implies a defined result ++ if r1 == r2. Thus we cannot defer these writes to an output hook. */ ++ tcg_gen_shri_i64(t, psw_mask, 32); ++ store_reg32_i64(r1, t); ++ if (r2 != 0) { ++ store_reg32_i64(r2, psw_mask); ++ } ++ ++ tcg_temp_free_i64(t); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ex(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ TCGv_i32 ilen; ++ TCGv_i64 v1; ++ ++ /* Nested EXECUTE is not allowed. */ ++ if (unlikely(s->ex_value)) { ++ gen_program_exception(s, PGM_EXECUTE); ++ return DISAS_NORETURN; ++ } ++ ++ update_psw_addr(s); ++ update_cc_op(s); ++ ++ if (r1 == 0) { ++ v1 = tcg_const_i64(0); ++ } else { ++ v1 = regs[r1]; ++ } ++ ++ ilen = tcg_const_i32(s->ilen); ++ gen_helper_ex(cpu_env, ilen, v1, o->in2); ++ tcg_temp_free_i32(ilen); ++ ++ if (r1 == 0) { ++ tcg_temp_free_i64(v1); ++ } ++ ++ return DISAS_PC_CC_UPDATED; ++} ++ ++static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_fieb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_fidb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, false, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34); ++ return_low128(o->out2); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) ++{ ++ /* We'll use the original input for cc computation, since we get to ++ compare that against 0, which ought to be better than comparing ++ the real output against 64. It also lets cc_dst be a convenient ++ temporary during our computation. */ ++ gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); ++ ++ /* R1 = IN ? CLZ(IN) : 64. */ ++ tcg_gen_clzi_i64(o->out, o->in2, 64); ++ ++ /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this ++ value by 64, which is undefined. But since the shift is 64 iff the ++ input is zero, we still get the correct result after and'ing. */ ++ tcg_gen_movi_i64(o->out2, 0x8000000000000000ull); ++ tcg_gen_shr_i64(o->out2, o->out2, o->out); ++ tcg_gen_andc_i64(o->out2, cc_dst, o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_icm(DisasContext *s, DisasOps *o) ++{ ++ int m3 = get_field(s, m3); ++ int pos, len, base = s->insn->data; ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ uint64_t ccm; ++ ++ switch (m3) { ++ case 0xf: ++ /* Effectively a 32-bit load. */ ++ tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); ++ len = 32; ++ goto one_insert; ++ ++ case 0xc: ++ case 0x6: ++ case 0x3: ++ /* Effectively a 16-bit load. */ ++ tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); ++ len = 16; ++ goto one_insert; ++ ++ case 0x8: ++ case 0x4: ++ case 0x2: ++ case 0x1: ++ /* Effectively an 8-bit load. */ ++ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); ++ len = 8; ++ goto one_insert; ++ ++ one_insert: ++ pos = base + ctz32(m3) * 8; ++ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len); ++ ccm = ((1ull << len) - 1) << pos; ++ break; ++ ++ default: ++ /* This is going to be a sequence of loads and inserts. */ ++ pos = base + 32 - 8; ++ ccm = 0; ++ while (m3) { ++ if (m3 & 0x8) { ++ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(o->in2, o->in2, 1); ++ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); ++ ccm |= 0xff << pos; ++ } ++ m3 = (m3 << 1) & 0xf; ++ pos -= 8; ++ } ++ break; ++ } ++ ++ tcg_gen_movi_i64(tmp, ccm); ++ gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_insi(DisasContext *s, DisasOps *o) ++{ ++ int shift = s->insn->data & 0xff; ++ int size = s->insn->data >> 8; ++ tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t1, t2; ++ ++ gen_op_calc_cc(s); ++ t1 = tcg_temp_new_i64(); ++ tcg_gen_extract_i64(t1, psw_mask, 40, 4); ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_extu_i32_i64(t2, cc_op); ++ tcg_gen_deposit_i64(t1, t1, t2, 4, 60); ++ tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); ++ tcg_temp_free_i64(t1); ++ tcg_temp_free_i64(t2); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_idte(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m4; ++ ++ if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { ++ m4 = tcg_const_i32(get_field(s, m4)); ++ } else { ++ m4 = tcg_const_i32(0); ++ } ++ gen_helper_idte(cpu_env, o->in1, o->in2, m4); ++ tcg_temp_free_i32(m4); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m4; ++ ++ if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { ++ m4 = tcg_const_i32(get_field(s, m4)); ++ } else { ++ m4 = tcg_const_i32(0); ++ } ++ gen_helper_ipte(cpu_env, o->in1, o->in2, m4); ++ tcg_temp_free_i32(m4); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_iske(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_iske(o->out, cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_msa(DisasContext *s, DisasOps *o) ++{ ++ int r1 = have_field(s, r1) ? get_field(s, r1) : 0; ++ int r2 = have_field(s, r2) ? get_field(s, r2) : 0; ++ int r3 = have_field(s, r3) ? get_field(s, r3) : 0; ++ TCGv_i32 t_r1, t_r2, t_r3, type; ++ ++ switch (s->insn->data) { ++ case S390_FEAT_TYPE_KMA: ++ if (r3 == r1 || r3 == r2) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ /* FALL THROUGH */ ++ case S390_FEAT_TYPE_KMCTR: ++ if (r3 & 1 || !r3) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ /* FALL THROUGH */ ++ case S390_FEAT_TYPE_PPNO: ++ case S390_FEAT_TYPE_KMF: ++ case S390_FEAT_TYPE_KMC: ++ case S390_FEAT_TYPE_KMO: ++ case S390_FEAT_TYPE_KM: ++ if (r1 & 1 || !r1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ /* FALL THROUGH */ ++ case S390_FEAT_TYPE_KMAC: ++ case S390_FEAT_TYPE_KIMD: ++ case S390_FEAT_TYPE_KLMD: ++ if (r2 & 1 || !r2) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ /* FALL THROUGH */ ++ case S390_FEAT_TYPE_PCKMO: ++ case S390_FEAT_TYPE_PCC: ++ break; ++ default: ++ g_assert_not_reached(); ++ }; ++ ++ t_r1 = tcg_const_i32(r1); ++ t_r2 = tcg_const_i32(r2); ++ t_r3 = tcg_const_i32(r3); ++ type = tcg_const_i32(s->insn->data); ++ gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type); ++ set_cc_static(s); ++ tcg_temp_free_i32(t_r1); ++ tcg_temp_free_i32(t_r2); ++ tcg_temp_free_i32(t_r3); ++ tcg_temp_free_i32(type); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_keb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_keb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_laa(DisasContext *s, DisasOps *o) ++{ ++ /* The real output is indeed the original value in memory; ++ recompute the addition for the computation of CC. */ ++ tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s), ++ s->insn->data | MO_ALIGN); ++ /* However, we need to recompute the addition for setting CC. */ ++ tcg_gen_add_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lan(DisasContext *s, DisasOps *o) ++{ ++ /* The real output is indeed the original value in memory; ++ recompute the addition for the computation of CC. */ ++ tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s), ++ s->insn->data | MO_ALIGN); ++ /* However, we need to recompute the operation for setting CC. */ ++ tcg_gen_and_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lao(DisasContext *s, DisasOps *o) ++{ ++ /* The real output is indeed the original value in memory; ++ recompute the addition for the computation of CC. */ ++ tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s), ++ s->insn->data | MO_ALIGN); ++ /* However, we need to recompute the operation for setting CC. */ ++ tcg_gen_or_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lax(DisasContext *s, DisasOps *o) ++{ ++ /* The real output is indeed the original value in memory; ++ recompute the addition for the computation of CC. */ ++ tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s), ++ s->insn->data | MO_ALIGN); ++ /* However, we need to recompute the operation for setting CC. */ ++ tcg_gen_xor_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_ldeb(o->out, cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_ledb(o->out, cpu_env, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 m34 = fpinst_extract_m34(s, true, true); ++ ++ if (!m34) { ++ return DISAS_NORETURN; ++ } ++ gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34); ++ tcg_temp_free_i32(m34); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_lxdb(o->out, cpu_env, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_lxeb(o->out, cpu_env, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lde(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_shli_i64(o->out, o->in2, 32); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lat(DisasContext *s, DisasOps *o) ++{ ++ TCGLabel *lab = gen_new_label(); ++ store_reg32_i64(get_field(s, r1), o->in2); ++ /* The value is stored even in case of trap. */ ++ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); ++ gen_trap(s); ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) ++{ ++ TCGLabel *lab = gen_new_label(); ++ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); ++ /* The value is stored even in case of trap. */ ++ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); ++ gen_trap(s); ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) ++{ ++ TCGLabel *lab = gen_new_label(); ++ store_reg32h_i64(get_field(s, r1), o->in2); ++ /* The value is stored even in case of trap. */ ++ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); ++ gen_trap(s); ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) ++{ ++ TCGLabel *lab = gen_new_label(); ++ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); ++ /* The value is stored even in case of trap. */ ++ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); ++ gen_trap(s); ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) ++{ ++ TCGLabel *lab = gen_new_label(); ++ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); ++ /* The value is stored even in case of trap. */ ++ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); ++ gen_trap(s); ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_loc(DisasContext *s, DisasOps *o) ++{ ++ DisasCompare c; ++ ++ disas_jcc(s, &c, get_field(s, m3)); ++ ++ if (c.is_64) { ++ tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, ++ o->in2, o->in1); ++ free_compare(&c); ++ } else { ++ TCGv_i32 t32 = tcg_temp_new_i32(); ++ TCGv_i64 t, z; ++ ++ tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); ++ free_compare(&c); ++ ++ t = tcg_temp_new_i64(); ++ tcg_gen_extu_i32_i64(t, t32); ++ tcg_temp_free_i32(t32); ++ ++ z = tcg_const_i64(0); ++ tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); ++ tcg_temp_free_i64(t); ++ tcg_temp_free_i64(z); ++ } ++ ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_lctl(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ ++ return DISAS_PC_STALE_NOCHAIN; ++} ++ ++static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_lctlg(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ ++ return DISAS_PC_STALE_NOCHAIN; ++} ++ ++static DisasJumpType op_lra(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_lra(o->out, cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t1, t2; ++ ++ per_breaking_event(s); ++ ++ t1 = tcg_temp_new_i64(); ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), ++ MO_TEUL | MO_ALIGN_8); ++ tcg_gen_addi_i64(o->in2, o->in2, 4); ++ tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); ++ /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ ++ tcg_gen_shli_i64(t1, t1, 32); ++ gen_helper_load_psw(cpu_env, t1, t2); ++ tcg_temp_free_i64(t1); ++ tcg_temp_free_i64(t2); ++ return DISAS_NORETURN; ++} ++ ++static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t1, t2; ++ ++ per_breaking_event(s); ++ ++ t1 = tcg_temp_new_i64(); ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), ++ MO_TEQ | MO_ALIGN_8); ++ tcg_gen_addi_i64(o->in2, o->in2, 8); ++ tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); ++ gen_helper_load_psw(cpu_env, t1, t2); ++ tcg_temp_free_i64(t1); ++ tcg_temp_free_i64(t2); ++ return DISAS_NORETURN; ++} ++#endif ++ ++static DisasJumpType op_lam(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_lam(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i64 t1, t2; ++ ++ /* Only one register to read. */ ++ t1 = tcg_temp_new_i64(); ++ if (unlikely(r1 == r3)) { ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ store_reg32_i64(r1, t1); ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++ } ++ ++ /* First load the values of the first and last registers to trigger ++ possible page faults. */ ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); ++ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); ++ store_reg32_i64(r1, t1); ++ store_reg32_i64(r3, t2); ++ ++ /* Only two registers to read. */ ++ if (((r1 + 1) & 15) == r3) { ++ tcg_temp_free(t2); ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++ } ++ ++ /* Then load the remaining registers. Page fault can't occur. */ ++ r3 = (r3 - 1) & 15; ++ tcg_gen_movi_i64(t2, 4); ++ while (r1 != r3) { ++ r1 = (r1 + 1) & 15; ++ tcg_gen_add_i64(o->in2, o->in2, t2); ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ store_reg32_i64(r1, t1); ++ } ++ tcg_temp_free(t2); ++ tcg_temp_free(t1); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i64 t1, t2; ++ ++ /* Only one register to read. */ ++ t1 = tcg_temp_new_i64(); ++ if (unlikely(r1 == r3)) { ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ store_reg32h_i64(r1, t1); ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++ } ++ ++ /* First load the values of the first and last registers to trigger ++ possible page faults. */ ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); ++ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); ++ store_reg32h_i64(r1, t1); ++ store_reg32h_i64(r3, t2); ++ ++ /* Only two registers to read. */ ++ if (((r1 + 1) & 15) == r3) { ++ tcg_temp_free(t2); ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++ } ++ ++ /* Then load the remaining registers. Page fault can't occur. */ ++ r3 = (r3 - 1) & 15; ++ tcg_gen_movi_i64(t2, 4); ++ while (r1 != r3) { ++ r1 = (r1 + 1) & 15; ++ tcg_gen_add_i64(o->in2, o->in2, t2); ++ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); ++ store_reg32h_i64(r1, t1); ++ } ++ tcg_temp_free(t2); ++ tcg_temp_free(t1); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i64 t1, t2; ++ ++ /* Only one register to read. */ ++ if (unlikely(r1 == r3)) { ++ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++ } ++ ++ /* First load the values of the first and last registers to trigger ++ possible page faults. */ ++ t1 = tcg_temp_new_i64(); ++ t2 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); ++ tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); ++ tcg_gen_mov_i64(regs[r1], t1); ++ tcg_temp_free(t2); ++ ++ /* Only two registers to read. */ ++ if (((r1 + 1) & 15) == r3) { ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++ } ++ ++ /* Then load the remaining registers. Page fault can't occur. */ ++ r3 = (r3 - 1) & 15; ++ tcg_gen_movi_i64(t1, 8); ++ while (r1 != r3) { ++ r1 = (r1 + 1) & 15; ++ tcg_gen_add_i64(o->in2, o->in2, t1); ++ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); ++ } ++ tcg_temp_free(t1); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 a1, a2; ++ MemOp mop = s->insn->data; ++ ++ /* In a parallel context, stop the world and single step. */ ++ if (tb_cflags(s->base.tb) & CF_PARALLEL) { ++ update_psw_addr(s); ++ update_cc_op(s); ++ gen_exception(EXCP_ATOMIC); ++ return DISAS_NORETURN; ++ } ++ ++ /* In a serial context, perform the two loads ... */ ++ a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); ++ a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); ++ tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); ++ tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); ++ tcg_temp_free_i64(a1); ++ tcg_temp_free_i64(a2); ++ ++ /* ... and indicate that we performed them while interlocked. */ ++ gen_op_movi_cc(s, 0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) ++{ ++ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { ++ gen_helper_lpq(o->out, cpu_env, o->in2); ++ } else if (HAVE_ATOMIC128) { ++ gen_helper_lpq_parallel(o->out, cpu_env, o->in2); ++ } else { ++ gen_helper_exit_atomic(cpu_env); ++ return DISAS_NORETURN; ++ } ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_lura(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_andi_i64(o->out, o->in2, -256); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) ++{ ++ const int64_t block_size = (1ull << (get_field(s, m3) + 6)); ++ ++ if (get_field(s, m3) > 6) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tcg_gen_ori_i64(o->addr1, o->addr1, -block_size); ++ tcg_gen_neg_i64(o->addr1, o->addr1); ++ tcg_gen_movi_i64(o->out, 16); ++ tcg_gen_umin_i64(o->out, o->out, o->addr1); ++ gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mc(DisasContext *s, DisasOps *o) ++{ ++#if !defined(CONFIG_USER_ONLY) ++ TCGv_i32 i2; ++#endif ++ const uint16_t monitor_class = get_field(s, i2); ++ ++ if (monitor_class & 0xff00) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++#if !defined(CONFIG_USER_ONLY) ++ i2 = tcg_const_i32(monitor_class); ++ gen_helper_monitor_call(cpu_env, o->addr1, i2); ++ tcg_temp_free_i32(i2); ++#endif ++ /* Defaults to a NOP. */ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) ++{ ++ o->out = o->in2; ++ o->g_out = o->g_in2; ++ o->in2 = NULL; ++ o->g_in2 = false; ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) ++{ ++ int b2 = get_field(s, b2); ++ TCGv ar1 = tcg_temp_new_i64(); ++ ++ o->out = o->in2; ++ o->g_out = o->g_in2; ++ o->in2 = NULL; ++ o->g_in2 = false; ++ ++ switch (s->base.tb->flags & FLAG_MASK_ASC) { ++ case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: ++ tcg_gen_movi_i64(ar1, 0); ++ break; ++ case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: ++ tcg_gen_movi_i64(ar1, 1); ++ break; ++ case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: ++ if (b2) { ++ tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2])); ++ } else { ++ tcg_gen_movi_i64(ar1, 0); ++ } ++ break; ++ case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: ++ tcg_gen_movi_i64(ar1, 2); ++ break; ++ } ++ ++ tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); ++ tcg_temp_free_i64(ar1); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_movx(DisasContext *s, DisasOps *o) ++{ ++ o->out = o->in1; ++ o->out2 = o->in2; ++ o->g_out = o->g_in1; ++ o->g_out2 = o->g_in2; ++ o->in1 = NULL; ++ o->in2 = NULL; ++ o->g_in1 = o->g_in2 = false; ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_mvc(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_mvcin(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r2 = get_field(s, r2); ++ TCGv_i32 t1, t2; ++ ++ /* r1 and r2 must be even. */ ++ if (r1 & 1 || r2 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t2 = tcg_const_i32(r2); ++ gen_helper_mvcl(cc_op, cpu_env, t1, t2); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i32 t1, t3; ++ ++ /* r1 and r3 must be even. */ ++ if (r1 & 1 || r3 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t3 = tcg_const_i32(r3); ++ gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t3); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i32 t1, t3; ++ ++ /* r1 and r3 must be even. */ ++ if (r1 & 1 || r3 & 1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t1 = tcg_const_i32(r1); ++ t3 = tcg_const_i32(r3); ++ gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t3); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) ++{ ++ int r3 = get_field(s, r3); ++ gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, l1); ++ gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, l1); ++ gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_mvn(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_mvo(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_mvst(cc_op, cpu_env, t1, t2); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_mvz(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mul(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_mul_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_muls128(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_meeb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_mdb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); ++ gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); ++ tcg_temp_free_i64(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_madb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 r3 = load_freg(get_field(s, r3)); ++ gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); ++ tcg_temp_free_i64(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); ++ gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); ++ tcg_temp_free_i64(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 r3 = load_freg(get_field(s, r3)); ++ gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); ++ tcg_temp_free_i64(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 z, n; ++ z = tcg_const_i64(0); ++ n = tcg_temp_new_i64(); ++ tcg_gen_neg_i64(n, o->in2); ++ tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); ++ tcg_temp_free_i64(n); ++ tcg_temp_free_i64(z); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull); ++ tcg_gen_mov_i64(o->out2, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_nc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_neg(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_neg_i64(o->out, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull); ++ tcg_gen_mov_i64(o->out2, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_oc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_or(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_or_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ori(DisasContext *s, DisasOps *o) ++{ ++ int shift = s->insn->data & 0xff; ++ int size = s->insn->data >> 8; ++ uint64_t mask = ((1ull << size) - 1) << shift; ++ ++ assert(!o->g_in2); ++ tcg_gen_shli_i64(o->in2, o->in2, shift); ++ tcg_gen_or_i64(o->out, o->in1, o->in2); ++ ++ /* Produce the CC from only the bits manipulated. */ ++ tcg_gen_andi_i64(cc_dst, o->out, mask); ++ set_cc_nz_u64(s, cc_dst); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_oi(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); ++ } else { ++ /* Perform the atomic operation in memory. */ ++ tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s), ++ s->insn->data); ++ } ++ ++ /* Recompute also for atomic case: needed for setting CC. */ ++ tcg_gen_or_i64(o->out, o->in1, o->in2); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pack(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_pack(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pka(DisasContext *s, DisasOps *o) ++{ ++ int l2 = get_field(s, l2) + 1; ++ TCGv_i32 l; ++ ++ /* The length must not exceed 32 bytes. */ ++ if (l2 > 32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ l = tcg_const_i32(l2); ++ gen_helper_pka(cpu_env, o->addr1, o->in2, l); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pku(DisasContext *s, DisasOps *o) ++{ ++ int l2 = get_field(s, l2) + 1; ++ TCGv_i32 l; ++ ++ /* The length must be even and should not exceed 64 bytes. */ ++ if ((l2 & 1) || (l2 > 64)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ l = tcg_const_i32(l2); ++ gen_helper_pku(cpu_env, o->addr1, o->in2, l); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_popcnt(o->out, o->in2); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_ptlb(cpu_env); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) ++{ ++ int i3 = get_field(s, i3); ++ int i4 = get_field(s, i4); ++ int i5 = get_field(s, i5); ++ int do_zero = i4 & 0x80; ++ uint64_t mask, imask, pmask; ++ int pos, len, rot; ++ ++ /* Adjust the arguments for the specific insn. */ ++ switch (s->fields.op2) { ++ case 0x55: /* risbg */ ++ case 0x59: /* risbgn */ ++ i3 &= 63; ++ i4 &= 63; ++ pmask = ~0; ++ break; ++ case 0x5d: /* risbhg */ ++ i3 &= 31; ++ i4 &= 31; ++ pmask = 0xffffffff00000000ull; ++ break; ++ case 0x51: /* risblg */ ++ i3 = (i3 & 31) + 32; ++ i4 = (i4 & 31) + 32; ++ pmask = 0x00000000ffffffffull; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ /* MASK is the set of bits to be inserted from R2. */ ++ if (i3 <= i4) { ++ /* [0...i3---i4...63] */ ++ mask = (-1ull >> i3) & (-1ull << (63 - i4)); ++ } else { ++ /* [0---i4...i3---63] */ ++ mask = (-1ull >> i3) | (-1ull << (63 - i4)); ++ } ++ /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */ ++ mask &= pmask; ++ ++ /* IMASK is the set of bits to be kept from R1. In the case of the high/low ++ insns, we need to keep the other half of the register. */ ++ imask = ~mask | ~pmask; ++ if (do_zero) { ++ imask = ~pmask; ++ } ++ ++ len = i4 - i3 + 1; ++ pos = 63 - i4; ++ rot = i5 & 63; ++ ++ /* In some cases we can implement this with extract. */ ++ if (imask == 0 && pos == 0 && len > 0 && len <= rot) { ++ tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len); ++ return DISAS_NEXT; ++ } ++ ++ /* In some cases we can implement this with deposit. */ ++ if (len > 0 && (imask == 0 || ~mask == imask)) { ++ /* Note that we rotate the bits to be inserted to the lsb, not to ++ the position as described in the PoO. */ ++ rot = (rot - pos) & 63; ++ } else { ++ pos = -1; ++ } ++ ++ /* Rotate the input as necessary. */ ++ tcg_gen_rotli_i64(o->in2, o->in2, rot); ++ ++ /* Insert the selected bits into the output. */ ++ if (pos >= 0) { ++ if (imask == 0) { ++ tcg_gen_deposit_z_i64(o->out, o->in2, pos, len); ++ } else { ++ tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len); ++ } ++ } else if (imask == 0) { ++ tcg_gen_andi_i64(o->out, o->in2, mask); ++ } else { ++ tcg_gen_andi_i64(o->in2, o->in2, mask); ++ tcg_gen_andi_i64(o->out, o->out, imask); ++ tcg_gen_or_i64(o->out, o->out, o->in2); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) ++{ ++ int i3 = get_field(s, i3); ++ int i4 = get_field(s, i4); ++ int i5 = get_field(s, i5); ++ uint64_t mask; ++ ++ /* If this is a test-only form, arrange to discard the result. */ ++ if (i3 & 0x80) { ++ o->out = tcg_temp_new_i64(); ++ o->g_out = false; ++ } ++ ++ i3 &= 63; ++ i4 &= 63; ++ i5 &= 63; ++ ++ /* MASK is the set of bits to be operated on from R2. ++ Take care for I3/I4 wraparound. */ ++ mask = ~0ull >> i3; ++ if (i3 <= i4) { ++ mask ^= ~0ull >> i4 >> 1; ++ } else { ++ mask |= ~(~0ull >> i4 >> 1); ++ } ++ ++ /* Rotate the input as necessary. */ ++ tcg_gen_rotli_i64(o->in2, o->in2, i5); ++ ++ /* Operate. */ ++ switch (s->fields.op2) { ++ case 0x54: /* AND */ ++ tcg_gen_ori_i64(o->in2, o->in2, ~mask); ++ tcg_gen_and_i64(o->out, o->out, o->in2); ++ break; ++ case 0x56: /* OR */ ++ tcg_gen_andi_i64(o->in2, o->in2, mask); ++ tcg_gen_or_i64(o->out, o->out, o->in2); ++ break; ++ case 0x57: /* XOR */ ++ tcg_gen_andi_i64(o->in2, o->in2, mask); ++ tcg_gen_xor_i64(o->out, o->out, o->in2); ++ break; ++ default: ++ abort(); ++ } ++ ++ /* Set the CC. */ ++ tcg_gen_andi_i64(cc_dst, o->out, mask); ++ set_cc_nz_u64(s, cc_dst); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_bswap64_i64(o->out, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 t1 = tcg_temp_new_i32(); ++ TCGv_i32 t2 = tcg_temp_new_i32(); ++ TCGv_i32 to = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(t1, o->in1); ++ tcg_gen_extrl_i64_i32(t2, o->in2); ++ tcg_gen_rotl_i32(to, t1, t2); ++ tcg_gen_extu_i32_i64(o->out, to); ++ tcg_temp_free_i32(t1); ++ tcg_temp_free_i32(t2); ++ tcg_temp_free_i32(to); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_rotl_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_rrbe(cc_op, cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sacf(cpu_env, o->in2); ++ /* Addressing mode has changed, so end the block. */ ++ return DISAS_PC_STALE; ++} ++#endif ++ ++static DisasJumpType op_sam(DisasContext *s, DisasOps *o) ++{ ++ int sam = s->insn->data; ++ TCGv_i64 tsam; ++ uint64_t mask; ++ ++ switch (sam) { ++ case 0: ++ mask = 0xffffff; ++ break; ++ case 1: ++ mask = 0x7fffffff; ++ break; ++ default: ++ mask = -1; ++ break; ++ } ++ ++ /* Bizarre but true, we check the address of the current insn for the ++ specification exception, not the next to be executed. Thus the PoO ++ documents that Bad Things Happen two bytes before the end. */ ++ if (s->base.pc_next & ~mask) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ s->pc_tmp &= mask; ++ ++ tsam = tcg_const_i64(sam); ++ tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2); ++ tcg_temp_free_i64(tsam); ++ ++ /* Always exit the TB, since we (may have) changed execution mode. */ ++ return DISAS_PC_STALE; ++} ++ ++static DisasJumpType op_sar(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1])); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_seb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_seb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sdb(o->out, cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sqeb(o->out, cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sqdb(o->out, cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2); ++ return_low128(o->out2); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_servc(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_servc(cc_op, cpu_env, o->in2, o->in1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3); ++ set_cc_static(s); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_soc(DisasContext *s, DisasOps *o) ++{ ++ DisasCompare c; ++ TCGv_i64 a, h; ++ TCGLabel *lab; ++ int r1; ++ ++ disas_jcc(s, &c, get_field(s, m3)); ++ ++ /* We want to store when the condition is fulfilled, so branch ++ out when it's not */ ++ c.cond = tcg_invert_cond(c.cond); ++ ++ lab = gen_new_label(); ++ if (c.is_64) { ++ tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab); ++ } else { ++ tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); ++ } ++ free_compare(&c); ++ ++ r1 = get_field(s, r1); ++ a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); ++ switch (s->insn->data) { ++ case 1: /* STOCG */ ++ tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); ++ break; ++ case 0: /* STOC */ ++ tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); ++ break; ++ case 2: /* STOCFH */ ++ h = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(h, regs[r1], 32); ++ tcg_gen_qemu_st32(h, a, get_mem_index(s)); ++ tcg_temp_free_i64(h); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ tcg_temp_free_i64(a); ++ ++ gen_set_label(lab); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sla(DisasContext *s, DisasOps *o) ++{ ++ uint64_t sign = 1ull << s->insn->data; ++ enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; ++ gen_op_update2_cc_i64(s, cco, o->in1, o->in2); ++ tcg_gen_shl_i64(o->out, o->in1, o->in2); ++ /* The arithmetic left shift is curious in that it does not affect ++ the sign bit. Copy that over from the source unchanged. */ ++ tcg_gen_andi_i64(o->out, o->out, ~sign); ++ tcg_gen_andi_i64(o->in1, o->in1, sign); ++ tcg_gen_or_i64(o->out, o->out, o->in1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sll(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_shl_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sra(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_sar_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srl(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_shr_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sfpc(cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sfas(cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) ++{ ++ /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ ++ tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull); ++ gen_helper_srnm(cpu_env, o->addr1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) ++{ ++ /* Bits 0-55 are are ignored. */ ++ tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull); ++ gen_helper_srnm(cpu_env, o->addr1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ ++ /* Bits other than 61-63 are ignored. */ ++ tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull); ++ ++ /* No need to call a helper, we don't implement dfp */ ++ tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); ++ tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); ++ tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); ++ ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_spm(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_extrl_i64_i32(cc_op, o->in1); ++ tcg_gen_extract_i32(cc_op, cc_op, 28, 2); ++ set_cc_static(s); ++ ++ tcg_gen_shri_i64(o->in1, o->in1, 24); ++ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) ++{ ++ int b1 = get_field(s, b1); ++ int d1 = get_field(s, d1); ++ int b2 = get_field(s, b2); ++ int d2 = get_field(s, d2); ++ int r3 = get_field(s, r3); ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ ++ /* fetch all operands first */ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_addi_i64(o->in1, regs[b1], d1); ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_addi_i64(o->in2, regs[b2], d2); ++ o->addr1 = tcg_temp_new_i64(); ++ gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); ++ ++ /* load the third operand into r3 before modifying anything */ ++ tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); ++ ++ /* subtract CPU timer from first operand and store in GR0 */ ++ gen_helper_stpt(tmp, cpu_env); ++ tcg_gen_sub_i64(regs[0], o->in1, tmp); ++ ++ /* store second operand in GR1 */ ++ tcg_gen_mov_i64(regs[1], o->in2); ++ ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_spka(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_shri_i64(o->in2, o->in2, 4); ++ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sske(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sske(cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); ++ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ ++ return DISAS_PC_STALE_NOCHAIN; ++} ++ ++static DisasJumpType op_stap(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id)); ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_stck(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stck(o->out, cpu_env); ++ /* ??? We don't implement clock states. */ ++ gen_op_movi_cc(s, 0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 c1 = tcg_temp_new_i64(); ++ TCGv_i64 c2 = tcg_temp_new_i64(); ++ TCGv_i64 todpr = tcg_temp_new_i64(); ++ gen_helper_stck(c1, cpu_env); ++ /* 16 bit value store in an uint32_t (only valid bits set) */ ++ tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr)); ++ /* Shift the 64-bit value into its place as a zero-extended ++ 104-bit value. Note that "bit positions 64-103 are always ++ non-zero so that they compare differently to STCK"; we set ++ the least significant bit to 1. */ ++ tcg_gen_shli_i64(c2, c1, 56); ++ tcg_gen_shri_i64(c1, c1, 8); ++ tcg_gen_ori_i64(c2, c2, 0x10000); ++ tcg_gen_or_i64(c2, c2, todpr); ++ tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(o->in2, o->in2, 8); ++ tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); ++ tcg_temp_free_i64(c1); ++ tcg_temp_free_i64(c2); ++ tcg_temp_free_i64(todpr); ++ /* ??? We don't implement clock states. */ ++ gen_op_movi_cc(s, 0); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_sck(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); ++ gen_helper_sck(cc_op, cpu_env, o->in1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sckc(cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sckpf(cpu_env, regs[0]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stckc(o->out, cpu_env); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_stctg(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_stctl(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_spt(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_spt(cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stfl(cpu_env); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stpt(o->out, cpu_env); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_spx(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_spx(cpu_env, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_xsch(cpu_env, regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_csch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_csch(cpu_env, regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_hsch(cpu_env, regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_msch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_msch(cpu_env, regs[1], o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_rchp(cpu_env, regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_rsch(cpu_env, regs[1]); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sal(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sal(cpu_env, regs[1]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_schm(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_schm(cpu_env, regs[1], regs[2], o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_siga(DisasContext *s, DisasOps *o) ++{ ++ /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ ++ gen_op_movi_cc(s, 3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) ++{ ++ /* The instruction is suppressed if not provided. */ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_ssch(cpu_env, regs[1], o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stsch(cpu_env, regs[1], o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stcrw(cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tpi(cc_op, cpu_env, o->addr1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tsch(cpu_env, regs[1], o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_chsc(cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa)); ++ tcg_gen_andi_i64(o->out, o->out, 0x7fffe000); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) ++{ ++ uint64_t i2 = get_field(s, i2); ++ TCGv_i64 t; ++ ++ /* It is important to do what the instruction name says: STORE THEN. ++ If we let the output hook perform the store then if we fault and ++ restart, we'll have the wrong SYSTEM MASK in place. */ ++ t = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(t, psw_mask, 56); ++ tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); ++ tcg_temp_free_i64(t); ++ ++ if (s->fields.op == 0xac) { ++ tcg_gen_andi_i64(psw_mask, psw_mask, ++ (i2 << 56) | 0x00ffffffffffffffull); ++ } else { ++ tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56); ++ } ++ ++ /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ ++ return DISAS_PC_STALE_NOCHAIN; ++} ++ ++static DisasJumpType op_stura(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data); ++ ++ if (s->base.tb->flags & FLAG_MASK_PER) { ++ update_psw_addr(s); ++ gen_helper_per_store_real(cpu_env); ++ } ++ return DISAS_NEXT; ++} ++#endif ++ ++static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_stfle(cc_op, cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_st8(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_st16(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_st32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_st64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stam(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ gen_helper_stam(cpu_env, r1, o->in2, r3); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) ++{ ++ int m3 = get_field(s, m3); ++ int pos, base = s->insn->data; ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ ++ pos = base + ctz32(m3) * 8; ++ switch (m3) { ++ case 0xf: ++ /* Effectively a 32-bit store. */ ++ tcg_gen_shri_i64(tmp, o->in1, pos); ++ tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); ++ break; ++ ++ case 0xc: ++ case 0x6: ++ case 0x3: ++ /* Effectively a 16-bit store. */ ++ tcg_gen_shri_i64(tmp, o->in1, pos); ++ tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); ++ break; ++ ++ case 0x8: ++ case 0x4: ++ case 0x2: ++ case 0x1: ++ /* Effectively an 8-bit store. */ ++ tcg_gen_shri_i64(tmp, o->in1, pos); ++ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); ++ break; ++ ++ default: ++ /* This is going to be a sequence of shifts and stores. */ ++ pos = base + 32 - 8; ++ while (m3) { ++ if (m3 & 0x8) { ++ tcg_gen_shri_i64(tmp, o->in1, pos); ++ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); ++ tcg_gen_addi_i64(o->in2, o->in2, 1); ++ } ++ m3 = (m3 << 1) & 0xf; ++ pos -= 8; ++ } ++ break; ++ } ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stm(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ int size = s->insn->data; ++ TCGv_i64 tsize = tcg_const_i64(size); ++ ++ while (1) { ++ if (size == 8) { ++ tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); ++ } else { ++ tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s)); ++ } ++ if (r1 == r3) { ++ break; ++ } ++ tcg_gen_add_i64(o->in2, o->in2, tsize); ++ r1 = (r1 + 1) & 15; ++ } ++ ++ tcg_temp_free_i64(tsize); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ int r3 = get_field(s, r3); ++ TCGv_i64 t = tcg_temp_new_i64(); ++ TCGv_i64 t4 = tcg_const_i64(4); ++ TCGv_i64 t32 = tcg_const_i64(32); ++ ++ while (1) { ++ tcg_gen_shl_i64(t, regs[r1], t32); ++ tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); ++ if (r1 == r3) { ++ break; ++ } ++ tcg_gen_add_i64(o->in2, o->in2, t4); ++ r1 = (r1 + 1) & 15; ++ } ++ ++ tcg_temp_free_i64(t); ++ tcg_temp_free_i64(t4); ++ tcg_temp_free_i64(t32); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) ++{ ++ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { ++ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); ++ } else if (HAVE_ATOMIC128) { ++ gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); ++ } else { ++ gen_helper_exit_atomic(cpu_env); ++ return DISAS_NORETURN; ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srst(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_srst(cpu_env, r1, r2); ++ ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_srstu(cpu_env, r1, r2); ++ ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sub(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_sub_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_subu64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_movi_i64(cc_src, 0); ++ tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); ++ return DISAS_NEXT; ++} ++ ++/* Compute borrow (0, -1) into cc_src. */ ++static void compute_borrow(DisasContext *s) ++{ ++ switch (s->cc_op) { ++ case CC_OP_SUBU: ++ /* The borrow value is already in cc_src (0,-1). */ ++ break; ++ default: ++ gen_op_calc_cc(s); ++ /* fall through */ ++ case CC_OP_STATIC: ++ /* The carry flag is the msb of CC; compute into cc_src. */ ++ tcg_gen_extu_i32_i64(cc_src, cc_op); ++ tcg_gen_shri_i64(cc_src, cc_src, 1); ++ /* fall through */ ++ case CC_OP_ADDU: ++ /* Convert carry (1,0) to borrow (0,-1). */ ++ tcg_gen_subi_i64(cc_src, cc_src, 1); ++ break; ++ } ++} ++ ++static DisasJumpType op_subb32(DisasContext *s, DisasOps *o) ++{ ++ compute_borrow(s); ++ ++ /* Borrow is {0, -1}, so add to subtract. */ ++ tcg_gen_add_i64(o->out, o->in1, cc_src); ++ tcg_gen_sub_i64(o->out, o->out, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_subb64(DisasContext *s, DisasOps *o) ++{ ++ compute_borrow(s); ++ ++ /* ++ * Borrow is {0, -1}, so add to subtract; replicate the ++ * borrow input to produce 128-bit -1 for the addition. ++ */ ++ TCGv_i64 zero = tcg_const_i64(0); ++ tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src); ++ tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); ++ tcg_temp_free_i64(zero); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_svc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 t; ++ ++ update_psw_addr(s); ++ update_cc_op(s); ++ ++ t = tcg_const_i32(get_field(s, i1) & 0xff); ++ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code)); ++ tcg_temp_free_i32(t); ++ ++ t = tcg_const_i32(s->ilen); ++ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen)); ++ tcg_temp_free_i32(t); ++ ++ gen_exception(EXCP_SVC); ++ return DISAS_NORETURN; ++} ++ ++static DisasJumpType op_tam(DisasContext *s, DisasOps *o) ++{ ++ int cc = 0; ++ ++ cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; ++ cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; ++ gen_op_movi_cc(s, cc); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++ ++static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_testblock(cc_op, cpu_env, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++#endif ++ ++static DisasJumpType op_tp(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1); ++ gen_helper_tp(cc_op, cpu_env, o->addr1, l1); ++ tcg_temp_free_i32(l1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tr(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_tr(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_tre(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2); ++ return_low128(o->out2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_trt(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3); ++ TCGv_i32 tst = tcg_temp_new_i32(); ++ int m3 = get_field(s, m3); ++ ++ if (!s390_has_feat(S390_FEAT_ETF2_ENH)) { ++ m3 = 0; ++ } ++ if (m3 & 1) { ++ tcg_gen_movi_i32(tst, -1); ++ } else { ++ tcg_gen_extrl_i64_i32(tst, regs[0]); ++ if (s->insn->opc & 3) { ++ tcg_gen_ext8u_i32(tst, tst); ++ } else { ++ tcg_gen_ext16u_i32(tst, tst); ++ } ++ } ++ gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes); ++ ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ tcg_temp_free_i32(sizes); ++ tcg_temp_free_i32(tst); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ts(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 t1 = tcg_const_i32(0xff); ++ tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB); ++ tcg_gen_extract_i32(cc_op, t1, 7, 1); ++ tcg_temp_free_i32(t1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 l = tcg_const_i32(get_field(s, l1)); ++ gen_helper_unpk(cpu_env, l, o->addr1, o->in2); ++ tcg_temp_free_i32(l); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) ++{ ++ int l1 = get_field(s, l1) + 1; ++ TCGv_i32 l; ++ ++ /* The length must not exceed 32 bytes. */ ++ if (l1 > 32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ l = tcg_const_i32(l1); ++ gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) ++{ ++ int l1 = get_field(s, l1) + 1; ++ TCGv_i32 l; ++ ++ /* The length must be even and should not exceed 64 bytes. */ ++ if ((l1 & 1) || (l1 > 64)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ l = tcg_const_i32(l1); ++ gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2); ++ tcg_temp_free_i32(l); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++ ++static DisasJumpType op_xc(DisasContext *s, DisasOps *o) ++{ ++ int d1 = get_field(s, d1); ++ int d2 = get_field(s, d2); ++ int b1 = get_field(s, b1); ++ int b2 = get_field(s, b2); ++ int l = get_field(s, l1); ++ TCGv_i32 t32; ++ ++ o->addr1 = get_address(s, 0, b1, d1); ++ ++ /* If the addresses are identical, this is a store/memset of zero. */ ++ if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { ++ o->in2 = tcg_const_i64(0); ++ ++ l++; ++ while (l >= 8) { ++ tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); ++ l -= 8; ++ if (l > 0) { ++ tcg_gen_addi_i64(o->addr1, o->addr1, 8); ++ } ++ } ++ if (l >= 4) { ++ tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); ++ l -= 4; ++ if (l > 0) { ++ tcg_gen_addi_i64(o->addr1, o->addr1, 4); ++ } ++ } ++ if (l >= 2) { ++ tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); ++ l -= 2; ++ if (l > 0) { ++ tcg_gen_addi_i64(o->addr1, o->addr1, 2); ++ } ++ } ++ if (l) { ++ tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); ++ } ++ gen_op_movi_cc(s, 0); ++ return DISAS_NEXT; ++ } ++ ++ /* But in general we'll defer to a helper. */ ++ o->in2 = get_address(s, 0, b2, d2); ++ t32 = tcg_const_i32(l); ++ gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2); ++ tcg_temp_free_i32(t32); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_xor(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_xor_i64(o->out, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_xori(DisasContext *s, DisasOps *o) ++{ ++ int shift = s->insn->data & 0xff; ++ int size = s->insn->data >> 8; ++ uint64_t mask = ((1ull << size) - 1) << shift; ++ ++ assert(!o->g_in2); ++ tcg_gen_shli_i64(o->in2, o->in2, shift); ++ tcg_gen_xor_i64(o->out, o->in1, o->in2); ++ ++ /* Produce the CC from only the bits manipulated. */ ++ tcg_gen_andi_i64(cc_dst, o->out, mask); ++ set_cc_nz_u64(s, cc_dst); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_xi(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); ++ } else { ++ /* Perform the atomic operation in memory. */ ++ tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s), ++ s->insn->data); ++ } ++ ++ /* Recompute also for atomic case: needed for setting CC. */ ++ tcg_gen_xor_i64(o->out, o->in1, o->in2); ++ ++ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_zero(DisasContext *s, DisasOps *o) ++{ ++ o->out = tcg_const_i64(0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) ++{ ++ o->out = tcg_const_i64(0); ++ o->out2 = o->out; ++ o->g_out2 = true; ++ return DISAS_NEXT; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static DisasJumpType op_clp(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_clp(cpu_env, r2); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_pcilg(cpu_env, r1, r2); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_pcistg(cpu_env, r1, r2); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); ++ ++ gen_helper_stpcifc(cpu_env, r1, o->addr1, ar); ++ tcg_temp_free_i32(ar); ++ tcg_temp_free_i32(r1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_sic(DisasContext *s, DisasOps *o) ++{ ++ gen_helper_sic(cpu_env, o->in1, o->in2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); ++ ++ gen_helper_rpcit(cpu_env, r1, r2); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r2); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); ++ TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); ++ ++ gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar); ++ tcg_temp_free_i32(ar); ++ tcg_temp_free_i32(r1); ++ tcg_temp_free_i32(r3); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); ++ TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); ++ ++ gen_helper_mpcifc(cpu_env, r1, o->addr1, ar); ++ tcg_temp_free_i32(ar); ++ tcg_temp_free_i32(r1); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++#endif ++ ++#include "translate_vx.c.inc" ++ ++/* ====================================================================== */ ++/* The "Cc OUTput" generators. Given the generated output (and in some cases ++ the original inputs), update the various cc data structures in order to ++ be able to compute the new condition code. */ ++ ++static void cout_abs32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); ++} ++ ++static void cout_abs64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); ++} ++ ++static void cout_adds32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); ++} ++ ++static void cout_adds64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); ++} ++ ++static void cout_addu32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_shri_i64(cc_src, o->out, 32); ++ tcg_gen_ext32u_i64(cc_dst, o->out); ++ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst); ++} ++ ++static void cout_addu64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out); ++} ++ ++static void cout_cmps32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); ++} ++ ++static void cout_cmps64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); ++} ++ ++static void cout_cmpu32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); ++} ++ ++static void cout_cmpu64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); ++} ++ ++static void cout_f32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); ++} ++ ++static void cout_f64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); ++} ++ ++static void cout_f128(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); ++} ++ ++static void cout_nabs32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); ++} ++ ++static void cout_nabs64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); ++} ++ ++static void cout_neg32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); ++} ++ ++static void cout_neg64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); ++} ++ ++static void cout_nz32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_ext32u_i64(cc_dst, o->out); ++ gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); ++} ++ ++static void cout_nz64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); ++} ++ ++static void cout_s32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); ++} ++ ++static void cout_s64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); ++} ++ ++static void cout_subs32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); ++} ++ ++static void cout_subs64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); ++} ++ ++static void cout_subu32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_sari_i64(cc_src, o->out, 32); ++ tcg_gen_ext32u_i64(cc_dst, o->out); ++ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst); ++} ++ ++static void cout_subu64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out); ++} ++ ++static void cout_tm32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); ++} ++ ++static void cout_tm64(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); ++} ++ ++static void cout_muls32(DisasContext *s, DisasOps *o) ++{ ++ gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out); ++} ++ ++static void cout_muls64(DisasContext *s, DisasOps *o) ++{ ++ /* out contains "high" part, out2 contains "low" part of 128 bit result */ ++ gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2); ++} ++ ++/* ====================================================================== */ ++/* The "PREParation" generators. These initialize the DisasOps.OUT fields ++ with the TCG register to which we will write. Used in combination with ++ the "wout" generators, in some cases we need a new temporary, and in ++ some cases we can write to a TCG global. */ ++ ++static void prep_new(DisasContext *s, DisasOps *o) ++{ ++ o->out = tcg_temp_new_i64(); ++} ++#define SPEC_prep_new 0 ++ ++static void prep_new_P(DisasContext *s, DisasOps *o) ++{ ++ o->out = tcg_temp_new_i64(); ++ o->out2 = tcg_temp_new_i64(); ++} ++#define SPEC_prep_new_P 0 ++ ++static void prep_r1(DisasContext *s, DisasOps *o) ++{ ++ o->out = regs[get_field(s, r1)]; ++ o->g_out = true; ++} ++#define SPEC_prep_r1 0 ++ ++static void prep_r1_P(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ o->out = regs[r1]; ++ o->out2 = regs[r1 + 1]; ++ o->g_out = o->g_out2 = true; ++} ++#define SPEC_prep_r1_P SPEC_r1_even ++ ++/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ ++static void prep_x1(DisasContext *s, DisasOps *o) ++{ ++ o->out = load_freg(get_field(s, r1)); ++ o->out2 = load_freg(get_field(s, r1) + 2); ++} ++#define SPEC_prep_x1 SPEC_r1_f128 ++ ++/* ====================================================================== */ ++/* The "Write OUTput" generators. These generally perform some non-trivial ++ copy of data to TCG globals, or to main memory. The trivial cases are ++ generally handled by having a "prep" generator install the TCG global ++ as the destination of the operation. */ ++ ++static void wout_r1(DisasContext *s, DisasOps *o) ++{ ++ store_reg(get_field(s, r1), o->out); ++} ++#define SPEC_wout_r1 0 ++ ++static void wout_out2_r1(DisasContext *s, DisasOps *o) ++{ ++ store_reg(get_field(s, r1), o->out2); ++} ++#define SPEC_wout_out2_r1 0 ++ ++static void wout_r1_8(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8); ++} ++#define SPEC_wout_r1_8 0 ++ ++static void wout_r1_16(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16); ++} ++#define SPEC_wout_r1_16 0 ++ ++static void wout_r1_32(DisasContext *s, DisasOps *o) ++{ ++ store_reg32_i64(get_field(s, r1), o->out); ++} ++#define SPEC_wout_r1_32 0 ++ ++static void wout_r1_32h(DisasContext *s, DisasOps *o) ++{ ++ store_reg32h_i64(get_field(s, r1), o->out); ++} ++#define SPEC_wout_r1_32h 0 ++ ++static void wout_r1_P32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ store_reg32_i64(r1, o->out); ++ store_reg32_i64(r1 + 1, o->out2); ++} ++#define SPEC_wout_r1_P32 SPEC_r1_even ++ ++static void wout_r1_D32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ store_reg32_i64(r1 + 1, o->out); ++ tcg_gen_shri_i64(o->out, o->out, 32); ++ store_reg32_i64(r1, o->out); ++} ++#define SPEC_wout_r1_D32 SPEC_r1_even ++ ++static void wout_r3_P32(DisasContext *s, DisasOps *o) ++{ ++ int r3 = get_field(s, r3); ++ store_reg32_i64(r3, o->out); ++ store_reg32_i64(r3 + 1, o->out2); ++} ++#define SPEC_wout_r3_P32 SPEC_r3_even ++ ++static void wout_r3_P64(DisasContext *s, DisasOps *o) ++{ ++ int r3 = get_field(s, r3); ++ store_reg(r3, o->out); ++ store_reg(r3 + 1, o->out2); ++} ++#define SPEC_wout_r3_P64 SPEC_r3_even ++ ++static void wout_e1(DisasContext *s, DisasOps *o) ++{ ++ store_freg32_i64(get_field(s, r1), o->out); ++} ++#define SPEC_wout_e1 0 ++ ++static void wout_f1(DisasContext *s, DisasOps *o) ++{ ++ store_freg(get_field(s, r1), o->out); ++} ++#define SPEC_wout_f1 0 ++ ++static void wout_x1(DisasContext *s, DisasOps *o) ++{ ++ int f1 = get_field(s, r1); ++ store_freg(f1, o->out); ++ store_freg(f1 + 2, o->out2); ++} ++#define SPEC_wout_x1 SPEC_r1_f128 ++ ++static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, r1) != get_field(s, r2)) { ++ store_reg32_i64(get_field(s, r1), o->out); ++ } ++} ++#define SPEC_wout_cond_r1r2_32 0 ++ ++static void wout_cond_e1e2(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, r1) != get_field(s, r2)) { ++ store_freg32_i64(get_field(s, r1), o->out); ++ } ++} ++#define SPEC_wout_cond_e1e2 0 ++ ++static void wout_m1_8(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); ++} ++#define SPEC_wout_m1_8 0 ++ ++static void wout_m1_16(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); ++} ++#define SPEC_wout_m1_16 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void wout_m1_16a(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); ++} ++#define SPEC_wout_m1_16a 0 ++#endif ++ ++static void wout_m1_32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); ++} ++#define SPEC_wout_m1_32 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void wout_m1_32a(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); ++} ++#define SPEC_wout_m1_32a 0 ++#endif ++ ++static void wout_m1_64(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); ++} ++#define SPEC_wout_m1_64 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void wout_m1_64a(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); ++} ++#define SPEC_wout_m1_64a 0 ++#endif ++ ++static void wout_m2_32(DisasContext *s, DisasOps *o) ++{ ++ tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); ++} ++#define SPEC_wout_m2_32 0 ++ ++static void wout_in2_r1(DisasContext *s, DisasOps *o) ++{ ++ store_reg(get_field(s, r1), o->in2); ++} ++#define SPEC_wout_in2_r1 0 ++ ++static void wout_in2_r1_32(DisasContext *s, DisasOps *o) ++{ ++ store_reg32_i64(get_field(s, r1), o->in2); ++} ++#define SPEC_wout_in2_r1_32 0 ++ ++/* ====================================================================== */ ++/* The "INput 1" generators. These load the first operand to an insn. */ ++ ++static void in1_r1(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_reg(get_field(s, r1)); ++} ++#define SPEC_in1_r1 0 ++ ++static void in1_r1_o(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = regs[get_field(s, r1)]; ++ o->g_in1 = true; ++} ++#define SPEC_in1_r1_o 0 ++ ++static void in1_r1_32s(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]); ++} ++#define SPEC_in1_r1_32s 0 ++ ++static void in1_r1_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]); ++} ++#define SPEC_in1_r1_32u 0 ++ ++static void in1_r1_sr32(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32); ++} ++#define SPEC_in1_r1_sr32 0 ++ ++static void in1_r1p1(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_reg(get_field(s, r1) + 1); ++} ++#define SPEC_in1_r1p1 SPEC_r1_even ++ ++static void in1_r1p1_o(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = regs[get_field(s, r1) + 1]; ++ o->g_in1 = true; ++} ++#define SPEC_in1_r1p1_o SPEC_r1_even ++ ++static void in1_r1p1_32s(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]); ++} ++#define SPEC_in1_r1p1_32s SPEC_r1_even ++ ++static void in1_r1p1_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]); ++} ++#define SPEC_in1_r1p1_32u SPEC_r1_even ++ ++static void in1_r1_D32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]); ++} ++#define SPEC_in1_r1_D32 SPEC_r1_even ++ ++static void in1_r2(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_reg(get_field(s, r2)); ++} ++#define SPEC_in1_r2 0 ++ ++static void in1_r2_sr32(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32); ++} ++#define SPEC_in1_r2_sr32 0 ++ ++static void in1_r2_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]); ++} ++#define SPEC_in1_r2_32u 0 ++ ++static void in1_r3(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_reg(get_field(s, r3)); ++} ++#define SPEC_in1_r3 0 ++ ++static void in1_r3_o(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = regs[get_field(s, r3)]; ++ o->g_in1 = true; ++} ++#define SPEC_in1_r3_o 0 ++ ++static void in1_r3_32s(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]); ++} ++#define SPEC_in1_r3_32s 0 ++ ++static void in1_r3_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]); ++} ++#define SPEC_in1_r3_32u 0 ++ ++static void in1_r3_D32(DisasContext *s, DisasOps *o) ++{ ++ int r3 = get_field(s, r3); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]); ++} ++#define SPEC_in1_r3_D32 SPEC_r3_even ++ ++static void in1_e1(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_freg32_i64(get_field(s, r1)); ++} ++#define SPEC_in1_e1 0 ++ ++static void in1_f1(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_freg(get_field(s, r1)); ++} ++#define SPEC_in1_f1 0 ++ ++/* Load the high double word of an extended (128-bit) format FP number */ ++static void in1_x2h(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_freg(get_field(s, r2)); ++} ++#define SPEC_in1_x2h SPEC_r2_f128 ++ ++static void in1_f3(DisasContext *s, DisasOps *o) ++{ ++ o->in1 = load_freg(get_field(s, r3)); ++} ++#define SPEC_in1_f3 0 ++ ++static void in1_la1(DisasContext *s, DisasOps *o) ++{ ++ o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); ++} ++#define SPEC_in1_la1 0 ++ ++static void in1_la2(DisasContext *s, DisasOps *o) ++{ ++ int x2 = have_field(s, x2) ? get_field(s, x2) : 0; ++ o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); ++} ++#define SPEC_in1_la2 0 ++ ++static void in1_m1_8u(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_8u 0 ++ ++static void in1_m1_16s(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_16s 0 ++ ++static void in1_m1_16u(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_16u 0 ++ ++static void in1_m1_32s(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_32s 0 ++ ++static void in1_m1_32u(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_32u 0 ++ ++static void in1_m1_64(DisasContext *s, DisasOps *o) ++{ ++ in1_la1(s, o); ++ o->in1 = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); ++} ++#define SPEC_in1_m1_64 0 ++ ++/* ====================================================================== */ ++/* The "INput 2" generators. These load the second operand to an insn. */ ++ ++static void in2_r1_o(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = regs[get_field(s, r1)]; ++ o->g_in2 = true; ++} ++#define SPEC_in2_r1_o 0 ++ ++static void in2_r1_16u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]); ++} ++#define SPEC_in2_r1_16u 0 ++ ++static void in2_r1_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]); ++} ++#define SPEC_in2_r1_32u 0 ++ ++static void in2_r1_D32(DisasContext *s, DisasOps *o) ++{ ++ int r1 = get_field(s, r1); ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]); ++} ++#define SPEC_in2_r1_D32 SPEC_r1_even ++ ++static void in2_r2(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = load_reg(get_field(s, r2)); ++} ++#define SPEC_in2_r2 0 ++ ++static void in2_r2_o(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = regs[get_field(s, r2)]; ++ o->g_in2 = true; ++} ++#define SPEC_in2_r2_o 0 ++ ++static void in2_r2_nz(DisasContext *s, DisasOps *o) ++{ ++ int r2 = get_field(s, r2); ++ if (r2 != 0) { ++ o->in2 = load_reg(r2); ++ } ++} ++#define SPEC_in2_r2_nz 0 ++ ++static void in2_r2_8s(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_8s 0 ++ ++static void in2_r2_8u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_8u 0 ++ ++static void in2_r2_16s(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_16s 0 ++ ++static void in2_r2_16u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_16u 0 ++ ++static void in2_r3(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = load_reg(get_field(s, r3)); ++} ++#define SPEC_in2_r3 0 ++ ++static void in2_r3_sr32(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32); ++} ++#define SPEC_in2_r3_sr32 0 ++ ++static void in2_r3_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]); ++} ++#define SPEC_in2_r3_32u 0 ++ ++static void in2_r2_32s(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_32s 0 ++ ++static void in2_r2_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]); ++} ++#define SPEC_in2_r2_32u 0 ++ ++static void in2_r2_sr32(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_temp_new_i64(); ++ tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32); ++} ++#define SPEC_in2_r2_sr32 0 ++ ++static void in2_e2(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = load_freg32_i64(get_field(s, r2)); ++} ++#define SPEC_in2_e2 0 ++ ++static void in2_f2(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = load_freg(get_field(s, r2)); ++} ++#define SPEC_in2_f2 0 ++ ++/* Load the low double word of an extended (128-bit) format FP number */ ++static void in2_x2l(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = load_freg(get_field(s, r2) + 2); ++} ++#define SPEC_in2_x2l SPEC_r2_f128 ++ ++static void in2_ra2(DisasContext *s, DisasOps *o) ++{ ++ int r2 = get_field(s, r2); ++ ++ /* Note: *don't* treat !r2 as 0, use the reg value. */ ++ o->in2 = tcg_temp_new_i64(); ++ gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0); ++} ++#define SPEC_in2_ra2 0 ++ ++static void in2_a2(DisasContext *s, DisasOps *o) ++{ ++ int x2 = have_field(s, x2) ? get_field(s, x2) : 0; ++ o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); ++} ++#define SPEC_in2_a2 0 ++ ++static void in2_ri2(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); ++} ++#define SPEC_in2_ri2 0 ++ ++static void in2_sh32(DisasContext *s, DisasOps *o) ++{ ++ help_l2_shift(s, o, 31); ++} ++#define SPEC_in2_sh32 0 ++ ++static void in2_sh64(DisasContext *s, DisasOps *o) ++{ ++ help_l2_shift(s, o, 63); ++} ++#define SPEC_in2_sh64 0 ++ ++static void in2_m2_8u(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_8u 0 ++ ++static void in2_m2_16s(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_16s 0 ++ ++static void in2_m2_16u(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_16u 0 ++ ++static void in2_m2_32s(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_32s 0 ++ ++static void in2_m2_32u(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_32u 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void in2_m2_32ua(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); ++} ++#define SPEC_in2_m2_32ua 0 ++#endif ++ ++static void in2_m2_64(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_m2_64 0 ++ ++static void in2_m2_64w(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); ++ gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); ++} ++#define SPEC_in2_m2_64w 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void in2_m2_64a(DisasContext *s, DisasOps *o) ++{ ++ in2_a2(s, o); ++ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); ++} ++#define SPEC_in2_m2_64a 0 ++#endif ++ ++static void in2_mri2_16u(DisasContext *s, DisasOps *o) ++{ ++ in2_ri2(s, o); ++ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_mri2_16u 0 ++ ++static void in2_mri2_32s(DisasContext *s, DisasOps *o) ++{ ++ in2_ri2(s, o); ++ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_mri2_32s 0 ++ ++static void in2_mri2_32u(DisasContext *s, DisasOps *o) ++{ ++ in2_ri2(s, o); ++ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_mri2_32u 0 ++ ++static void in2_mri2_64(DisasContext *s, DisasOps *o) ++{ ++ in2_ri2(s, o); ++ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); ++} ++#define SPEC_in2_mri2_64 0 ++ ++static void in2_i2(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64(get_field(s, i2)); ++} ++#define SPEC_in2_i2 0 ++ ++static void in2_i2_8u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64((uint8_t)get_field(s, i2)); ++} ++#define SPEC_in2_i2_8u 0 ++ ++static void in2_i2_16u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64((uint16_t)get_field(s, i2)); ++} ++#define SPEC_in2_i2_16u 0 ++ ++static void in2_i2_32u(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64((uint32_t)get_field(s, i2)); ++} ++#define SPEC_in2_i2_32u 0 ++ ++static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) ++{ ++ uint64_t i2 = (uint16_t)get_field(s, i2); ++ o->in2 = tcg_const_i64(i2 << s->insn->data); ++} ++#define SPEC_in2_i2_16u_shl 0 ++ ++static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) ++{ ++ uint64_t i2 = (uint32_t)get_field(s, i2); ++ o->in2 = tcg_const_i64(i2 << s->insn->data); ++} ++#define SPEC_in2_i2_32u_shl 0 ++ ++#ifndef CONFIG_USER_ONLY ++static void in2_insn(DisasContext *s, DisasOps *o) ++{ ++ o->in2 = tcg_const_i64(s->fields.raw_insn); ++} ++#define SPEC_in2_insn 0 ++#endif ++ ++/* ====================================================================== */ ++ ++/* Find opc within the table of insns. This is formulated as a switch ++ statement so that (1) we get compile-time notice of cut-paste errors ++ for duplicated opcodes, and (2) the compiler generates the binary ++ search tree, rather than us having to post-process the table. */ ++ ++#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ ++ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) ++ ++#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ ++ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) ++ ++#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ ++ E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) ++ ++#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, ++ ++enum DisasInsnEnum { ++#include "insn-data.def" ++}; ++ ++#undef E ++#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ ++ .opc = OPC, \ ++ .flags = FL, \ ++ .fmt = FMT_##FT, \ ++ .fac = FAC_##FC, \ ++ .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ ++ .name = #NM, \ ++ .help_in1 = in1_##I1, \ ++ .help_in2 = in2_##I2, \ ++ .help_prep = prep_##P, \ ++ .help_wout = wout_##W, \ ++ .help_cout = cout_##CC, \ ++ .help_op = op_##OP, \ ++ .data = D \ ++ }, ++ ++/* Allow 0 to be used for NULL in the table below. */ ++#define in1_0 NULL ++#define in2_0 NULL ++#define prep_0 NULL ++#define wout_0 NULL ++#define cout_0 NULL ++#define op_0 NULL ++ ++#define SPEC_in1_0 0 ++#define SPEC_in2_0 0 ++#define SPEC_prep_0 0 ++#define SPEC_wout_0 0 ++ ++/* Give smaller names to the various facilities. */ ++#define FAC_Z S390_FEAT_ZARCH ++#define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE ++#define FAC_DFP S390_FEAT_DFP ++#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ ++#define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ ++#define FAC_EE S390_FEAT_EXECUTE_EXT ++#define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE ++#define FAC_FPE S390_FEAT_FLOATING_POINT_EXT ++#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ ++#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ ++#define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT ++#define FAC_HFP_MA S390_FEAT_HFP_MADDSUB ++#define FAC_HW S390_FEAT_STFLE_45 /* high-word */ ++#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ ++#define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ ++#define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ ++#define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ ++#define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ ++#define FAC_LD S390_FEAT_LONG_DISPLACEMENT ++#define FAC_PC S390_FEAT_STFLE_45 /* population count */ ++#define FAC_SCF S390_FEAT_STORE_CLOCK_FAST ++#define FAC_SFLE S390_FEAT_STFLE ++#define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ ++#define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC ++#define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ ++#define FAC_DAT_ENH S390_FEAT_DAT_ENH ++#define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 ++#define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ ++#define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ ++#define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ ++#define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 ++#define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ ++#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ ++#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ ++#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ ++#define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */ ++#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME ++#define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ ++#define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION ++#define FAC_V S390_FEAT_VECTOR /* vector facility */ ++#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ ++#define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */ ++ ++static const DisasInsn insn_info[] = { ++#include "insn-data.def" ++}; ++ ++#undef E ++#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ ++ case OPC: return &insn_info[insn_ ## NM]; ++ ++static const DisasInsn *lookup_opc(uint16_t opc) ++{ ++ switch (opc) { ++#include "insn-data.def" ++ default: ++ return NULL; ++ } ++} ++ ++#undef F ++#undef E ++#undef D ++#undef C ++ ++/* Extract a field from the insn. The INSN should be left-aligned in ++ the uint64_t so that we can more easily utilize the big-bit-endian ++ definitions we extract from the Principals of Operation. */ ++ ++static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) ++{ ++ uint32_t r, m; ++ ++ if (f->size == 0) { ++ return; ++ } ++ ++ /* Zero extract the field from the insn. */ ++ r = (insn << f->beg) >> (64 - f->size); ++ ++ /* Sign-extend, or un-swap the field as necessary. */ ++ switch (f->type) { ++ case 0: /* unsigned */ ++ break; ++ case 1: /* signed */ ++ assert(f->size <= 32); ++ m = 1u << (f->size - 1); ++ r = (r ^ m) - m; ++ break; ++ case 2: /* dl+dh split, signed 20 bit. */ ++ r = ((int8_t)r << 12) | (r >> 8); ++ break; ++ case 3: /* MSB stored in RXB */ ++ g_assert(f->size == 4); ++ switch (f->beg) { ++ case 8: ++ r |= extract64(insn, 63 - 36, 1) << 4; ++ break; ++ case 12: ++ r |= extract64(insn, 63 - 37, 1) << 4; ++ break; ++ case 16: ++ r |= extract64(insn, 63 - 38, 1) << 4; ++ break; ++ case 32: ++ r |= extract64(insn, 63 - 39, 1) << 4; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ break; ++ default: ++ abort(); ++ } ++ ++ /* ++ * Validate that the "compressed" encoding we selected above is valid. ++ * I.e. we haven't made two different original fields overlap. ++ */ ++ assert(((o->presentC >> f->indexC) & 1) == 0); ++ o->presentC |= 1 << f->indexC; ++ o->presentO |= 1 << f->indexO; ++ ++ o->c[f->indexC] = r; ++} ++ ++/* Lookup the insn at the current PC, extracting the operands into O and ++ returning the info struct for the insn. Returns NULL for invalid insn. */ ++ ++static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) ++{ ++ uint64_t insn, pc = s->base.pc_next; ++ int op, op2, ilen; ++ const DisasInsn *info; ++ ++ if (unlikely(s->ex_value)) { ++ /* Drop the EX data now, so that it's clear on exception paths. */ ++ TCGv_i64 zero = tcg_const_i64(0); ++ tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value)); ++ tcg_temp_free_i64(zero); ++ ++ /* Extract the values saved by EXECUTE. */ ++ insn = s->ex_value & 0xffffffffffff0000ull; ++ ilen = s->ex_value & 0xf; ++ op = insn >> 56; ++ } else { ++ insn = ld_code2(env, pc); ++ op = (insn >> 8) & 0xff; ++ ilen = get_ilen(op); ++ switch (ilen) { ++ case 2: ++ insn = insn << 48; ++ break; ++ case 4: ++ insn = ld_code4(env, pc) << 32; ++ break; ++ case 6: ++ insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } ++ s->pc_tmp = s->base.pc_next + ilen; ++ s->ilen = ilen; ++ ++ /* We can't actually determine the insn format until we've looked up ++ the full insn opcode. Which we can't do without locating the ++ secondary opcode. Assume by default that OP2 is at bit 40; for ++ those smaller insns that don't actually have a secondary opcode ++ this will correctly result in OP2 = 0. */ ++ switch (op) { ++ case 0x01: /* E */ ++ case 0x80: /* S */ ++ case 0x82: /* S */ ++ case 0x93: /* S */ ++ case 0xb2: /* S, RRF, RRE, IE */ ++ case 0xb3: /* RRE, RRD, RRF */ ++ case 0xb9: /* RRE, RRF */ ++ case 0xe5: /* SSE, SIL */ ++ op2 = (insn << 8) >> 56; ++ break; ++ case 0xa5: /* RI */ ++ case 0xa7: /* RI */ ++ case 0xc0: /* RIL */ ++ case 0xc2: /* RIL */ ++ case 0xc4: /* RIL */ ++ case 0xc6: /* RIL */ ++ case 0xc8: /* SSF */ ++ case 0xcc: /* RIL */ ++ op2 = (insn << 12) >> 60; ++ break; ++ case 0xc5: /* MII */ ++ case 0xc7: /* SMI */ ++ case 0xd0 ... 0xdf: /* SS */ ++ case 0xe1: /* SS */ ++ case 0xe2: /* SS */ ++ case 0xe8: /* SS */ ++ case 0xe9: /* SS */ ++ case 0xea: /* SS */ ++ case 0xee ... 0xf3: /* SS */ ++ case 0xf8 ... 0xfd: /* SS */ ++ op2 = 0; ++ break; ++ default: ++ op2 = (insn << 40) >> 56; ++ break; ++ } ++ ++ memset(&s->fields, 0, sizeof(s->fields)); ++ s->fields.raw_insn = insn; ++ s->fields.op = op; ++ s->fields.op2 = op2; ++ ++ /* Lookup the instruction. */ ++ info = lookup_opc(op << 8 | op2); ++ s->insn = info; ++ ++ /* If we found it, extract the operands. */ ++ if (info != NULL) { ++ DisasFormat fmt = info->fmt; ++ int i; ++ ++ for (i = 0; i < NUM_C_FIELD; ++i) { ++ extract_field(&s->fields, &format_info[fmt].op[i], insn); ++ } ++ } ++ return info; ++} ++ ++static bool is_afp_reg(int reg) ++{ ++ return reg % 2 || reg > 6; ++} ++ ++static bool is_fp_pair(int reg) ++{ ++ /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ ++ return !(reg & 0x2); ++} ++ ++static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) ++{ ++ const DisasInsn *insn; ++ DisasJumpType ret = DISAS_NEXT; ++ DisasOps o = {}; ++ bool icount = false; ++ ++ /* Search for the insn in the table. */ ++ insn = extract_insn(env, s); ++ ++ /* Emit insn_start now that we know the ILEN. */ ++ tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); ++ ++ /* Not found means unimplemented/illegal opcode. */ ++ if (insn == NULL) { ++ qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", ++ s->fields.op, s->fields.op2); ++ gen_illegal_opcode(s); ++ ret = DISAS_NORETURN; ++ goto out; ++ } ++ ++#ifndef CONFIG_USER_ONLY ++ if (s->base.tb->flags & FLAG_MASK_PER) { ++ TCGv_i64 addr = tcg_const_i64(s->base.pc_next); ++ gen_helper_per_ifetch(cpu_env, addr); ++ tcg_temp_free_i64(addr); ++ } ++#endif ++ ++ /* process flags */ ++ if (insn->flags) { ++ /* privileged instruction */ ++ if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { ++ gen_program_exception(s, PGM_PRIVILEGED); ++ ret = DISAS_NORETURN; ++ goto out; ++ } ++ ++ /* if AFP is not enabled, instructions and registers are forbidden */ ++ if (!(s->base.tb->flags & FLAG_MASK_AFP)) { ++ uint8_t dxc = 0; ++ ++ if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { ++ dxc = 1; ++ } ++ if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { ++ dxc = 1; ++ } ++ if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { ++ dxc = 1; ++ } ++ if (insn->flags & IF_BFP) { ++ dxc = 2; ++ } ++ if (insn->flags & IF_DFP) { ++ dxc = 3; ++ } ++ if (insn->flags & IF_VEC) { ++ dxc = 0xfe; ++ } ++ if (dxc) { ++ gen_data_exception(dxc); ++ ret = DISAS_NORETURN; ++ goto out; ++ } ++ } ++ ++ /* if vector instructions not enabled, executing them is forbidden */ ++ if (insn->flags & IF_VEC) { ++ if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { ++ gen_data_exception(0xfe); ++ ret = DISAS_NORETURN; ++ goto out; ++ } ++ } ++ ++ /* input/output is the special case for icount mode */ ++ if (unlikely(insn->flags & IF_IO)) { ++ icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT; ++ if (icount) { ++ gen_io_start(); ++ } ++ } ++ } ++ ++ /* Check for insn specification exceptions. */ ++ if (insn->spec) { ++ if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || ++ (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || ++ (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || ++ (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || ++ (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ ret = DISAS_NORETURN; ++ goto out; ++ } ++ } ++ ++ /* Implement the instruction. */ ++ if (insn->help_in1) { ++ insn->help_in1(s, &o); ++ } ++ if (insn->help_in2) { ++ insn->help_in2(s, &o); ++ } ++ if (insn->help_prep) { ++ insn->help_prep(s, &o); ++ } ++ if (insn->help_op) { ++ ret = insn->help_op(s, &o); ++ } ++ if (ret != DISAS_NORETURN) { ++ if (insn->help_wout) { ++ insn->help_wout(s, &o); ++ } ++ if (insn->help_cout) { ++ insn->help_cout(s, &o); ++ } ++ } ++ ++ /* Free any temporaries created by the helpers. */ ++ if (o.out && !o.g_out) { ++ tcg_temp_free_i64(o.out); ++ } ++ if (o.out2 && !o.g_out2) { ++ tcg_temp_free_i64(o.out2); ++ } ++ if (o.in1 && !o.g_in1) { ++ tcg_temp_free_i64(o.in1); ++ } ++ if (o.in2 && !o.g_in2) { ++ tcg_temp_free_i64(o.in2); ++ } ++ if (o.addr1) { ++ tcg_temp_free_i64(o.addr1); ++ } ++ ++ /* io should be the last instruction in tb when icount is enabled */ ++ if (unlikely(icount && ret == DISAS_NEXT)) { ++ ret = DISAS_PC_STALE; ++ } ++ ++#ifndef CONFIG_USER_ONLY ++ if (s->base.tb->flags & FLAG_MASK_PER) { ++ /* An exception might be triggered, save PSW if not already done. */ ++ if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { ++ tcg_gen_movi_i64(psw_addr, s->pc_tmp); ++ } ++ ++ /* Call the helper to check for a possible PER exception. */ ++ gen_helper_per_check_exception(cpu_env); ++ } ++#endif ++ ++out: ++ /* Advance to the next instruction. */ ++ s->base.pc_next = s->pc_tmp; ++ return ret; ++} ++ ++static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ++{ ++ DisasContext *dc = container_of(dcbase, DisasContext, base); ++ ++ /* 31-bit mode */ ++ if (!(dc->base.tb->flags & FLAG_MASK_64)) { ++ dc->base.pc_first &= 0x7fffffff; ++ dc->base.pc_next = dc->base.pc_first; ++ } ++ ++ dc->cc_op = CC_OP_DYNAMIC; ++ dc->ex_value = dc->base.tb->cs_base; ++ dc->do_debug = dc->base.singlestep_enabled; ++} ++ ++static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) ++{ ++} ++ ++static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) ++{ ++} ++ ++static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, ++ const CPUBreakpoint *bp) ++{ ++ DisasContext *dc = container_of(dcbase, DisasContext, base); ++ ++ /* ++ * Emit an insn_start to accompany the breakpoint exception. ++ * The ILEN value is a dummy, since this does not result in ++ * an s390x exception, but an internal qemu exception which ++ * brings us back to interact with the gdbstub. ++ */ ++ tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2); ++ ++ dc->base.is_jmp = DISAS_PC_STALE; ++ dc->do_debug = true; ++ /* The address covered by the breakpoint must be included in ++ [tb->pc, tb->pc + tb->size) in order to for it to be ++ properly cleared -- thus we increment the PC here so that ++ the logic setting tb->size does the right thing. */ ++ dc->base.pc_next += 2; ++ return true; ++} ++ ++static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ++{ ++ CPUS390XState *env = cs->env_ptr; ++ DisasContext *dc = container_of(dcbase, DisasContext, base); ++ ++ dc->base.is_jmp = translate_one(env, dc); ++ if (dc->base.is_jmp == DISAS_NEXT) { ++ uint64_t page_start; ++ ++ page_start = dc->base.pc_first & TARGET_PAGE_MASK; ++ if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { ++ dc->base.is_jmp = DISAS_TOO_MANY; ++ } ++ } ++} ++ ++static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) ++{ ++ DisasContext *dc = container_of(dcbase, DisasContext, base); ++ ++ switch (dc->base.is_jmp) { ++ case DISAS_GOTO_TB: ++ case DISAS_NORETURN: ++ break; ++ case DISAS_TOO_MANY: ++ case DISAS_PC_STALE: ++ case DISAS_PC_STALE_NOCHAIN: ++ update_psw_addr(dc); ++ /* FALLTHRU */ ++ case DISAS_PC_UPDATED: ++ /* Next TB starts off with CC_OP_DYNAMIC, so make sure the ++ cc op type is in env */ ++ update_cc_op(dc); ++ /* FALLTHRU */ ++ case DISAS_PC_CC_UPDATED: ++ /* Exit the TB, either by raising a debug exception or by return. */ ++ if (dc->do_debug) { ++ gen_exception(EXCP_DEBUG); ++ } else if ((dc->base.tb->flags & FLAG_MASK_PER) || ++ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { ++ tcg_gen_exit_tb(NULL, 0); ++ } else { ++ tcg_gen_lookup_and_goto_ptr(); ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) ++{ ++ DisasContext *dc = container_of(dcbase, DisasContext, base); ++ ++ if (unlikely(dc->ex_value)) { ++ /* ??? Unfortunately log_target_disas can't use host memory. */ ++ qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value); ++ } else { ++ qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); ++ log_target_disas(cs, dc->base.pc_first, dc->base.tb->size); ++ } ++} ++ ++static const TranslatorOps s390x_tr_ops = { ++ .init_disas_context = s390x_tr_init_disas_context, ++ .tb_start = s390x_tr_tb_start, ++ .insn_start = s390x_tr_insn_start, ++ .breakpoint_check = s390x_tr_breakpoint_check, ++ .translate_insn = s390x_tr_translate_insn, ++ .tb_stop = s390x_tr_tb_stop, ++ .disas_log = s390x_tr_disas_log, ++}; ++ ++void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) ++{ ++ DisasContext dc; ++ ++ translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); ++} ++ ++void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, ++ target_ulong *data) ++{ ++ int cc_op = data[1]; ++ ++ env->psw.addr = data[0]; ++ ++ /* Update the CC opcode if it is not already up-to-date. */ ++ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { ++ env->cc_op = cc_op; ++ } ++ ++ /* Record ILEN. */ ++ env->int_pgm_ilen = data[2]; ++} +diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc +new file mode 100644 +index 0000000000..0afa46e463 +--- /dev/null ++++ b/target/s390x/tcg/translate_vx.c.inc +@@ -0,0 +1,3109 @@ ++/* ++ * QEMU TCG support -- s390x vector instruction translation functions ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++/* ++ * For most instructions that use the same element size for reads and ++ * writes, we can use real gvec vector expansion, which potantially uses ++ * real host vector instructions. As they only work up to 64 bit elements, ++ * 128 bit elements (vector is a single element) have to be handled ++ * differently. Operations that are too complicated to encode via TCG ops ++ * are handled via gvec ool (out-of-line) handlers. ++ * ++ * As soon as instructions use different element sizes for reads and writes ++ * or access elements "out of their element scope" we expand them manually ++ * in fancy loops, as gvec expansion does not deal with actual element ++ * numbers and does also not support access to other elements. ++ * ++ * 128 bit elements: ++ * As we only have i32/i64, such elements have to be loaded into two ++ * i64 values and can then be processed e.g. by tcg_gen_add2_i64. ++ * ++ * Sizes: ++ * On s390x, the operand size (oprsz) and the maximum size (maxsz) are ++ * always 16 (128 bit). What gvec code calls "vece", s390x calls "es", ++ * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only ++ * 128 bit element size has to be treated in a special way (MO_64 + 1). ++ * We will use ES_* instead of MO_* for this reason in this file. ++ * ++ * CC handling: ++ * As gvec ool-helpers can currently not return values (besides via ++ * pointers like vectors or cpu_env), whenever we have to set the CC and ++ * can't conclude the value from the result vector, we will directly ++ * set it in "env->cc_op" and mark it as static via set_cc_static()". ++ * Whenever this is done, the helper writes globals (cc_op). ++ */ ++ ++#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es)) ++#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es)) ++#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE) ++ ++#define ES_8 MO_8 ++#define ES_16 MO_16 ++#define ES_32 MO_32 ++#define ES_64 MO_64 ++#define ES_128 4 ++ ++/* Floating-Point Format */ ++#define FPF_SHORT 2 ++#define FPF_LONG 3 ++#define FPF_EXT 4 ++ ++static inline bool valid_vec_element(uint8_t enr, MemOp es) ++{ ++ return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); ++} ++ ++static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, ++ MemOp memop) ++{ ++ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); ++ ++ switch (memop) { ++ case ES_8: ++ tcg_gen_ld8u_i64(dst, cpu_env, offs); ++ break; ++ case ES_16: ++ tcg_gen_ld16u_i64(dst, cpu_env, offs); ++ break; ++ case ES_32: ++ tcg_gen_ld32u_i64(dst, cpu_env, offs); ++ break; ++ case ES_8 | MO_SIGN: ++ tcg_gen_ld8s_i64(dst, cpu_env, offs); ++ break; ++ case ES_16 | MO_SIGN: ++ tcg_gen_ld16s_i64(dst, cpu_env, offs); ++ break; ++ case ES_32 | MO_SIGN: ++ tcg_gen_ld32s_i64(dst, cpu_env, offs); ++ break; ++ case ES_64: ++ case ES_64 | MO_SIGN: ++ tcg_gen_ld_i64(dst, cpu_env, offs); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, ++ MemOp memop) ++{ ++ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); ++ ++ switch (memop) { ++ case ES_8: ++ tcg_gen_ld8u_i32(dst, cpu_env, offs); ++ break; ++ case ES_16: ++ tcg_gen_ld16u_i32(dst, cpu_env, offs); ++ break; ++ case ES_8 | MO_SIGN: ++ tcg_gen_ld8s_i32(dst, cpu_env, offs); ++ break; ++ case ES_16 | MO_SIGN: ++ tcg_gen_ld16s_i32(dst, cpu_env, offs); ++ break; ++ case ES_32: ++ case ES_32 | MO_SIGN: ++ tcg_gen_ld_i32(dst, cpu_env, offs); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, ++ MemOp memop) ++{ ++ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); ++ ++ switch (memop) { ++ case ES_8: ++ tcg_gen_st8_i64(src, cpu_env, offs); ++ break; ++ case ES_16: ++ tcg_gen_st16_i64(src, cpu_env, offs); ++ break; ++ case ES_32: ++ tcg_gen_st32_i64(src, cpu_env, offs); ++ break; ++ case ES_64: ++ tcg_gen_st_i64(src, cpu_env, offs); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr, ++ MemOp memop) ++{ ++ const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); ++ ++ switch (memop) { ++ case ES_8: ++ tcg_gen_st8_i32(src, cpu_env, offs); ++ break; ++ case ES_16: ++ tcg_gen_st16_i32(src, cpu_env, offs); ++ break; ++ case ES_32: ++ tcg_gen_st_i32(src, cpu_env, offs); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, ++ uint8_t es) ++{ ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ ++ /* mask off invalid parts from the element nr */ ++ tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1); ++ ++ /* convert it to an element offset relative to cpu_env (vec_reg_offset() */ ++ tcg_gen_shli_i64(tmp, tmp, es); ++#ifndef HOST_WORDS_BIGENDIAN ++ tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); ++#endif ++ tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg)); ++ ++ /* generate the final ptr by adding cpu_env */ ++ tcg_gen_trunc_i64_ptr(ptr, tmp); ++ tcg_gen_add_ptr(ptr, ptr, cpu_env); ++ ++ tcg_temp_free_i64(tmp); ++} ++ ++#define gen_gvec_2(v1, v2, gen) \ ++ tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ 16, 16, gen) ++#define gen_gvec_2s(v1, v2, c, gen) \ ++ tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ 16, 16, c, gen) ++#define gen_gvec_2_ool(v1, v2, data, fn) \ ++ tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ 16, 16, data, fn) ++#define gen_gvec_2i_ool(v1, v2, c, data, fn) \ ++ tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ c, 16, 16, data, fn) ++#define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \ ++ tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ ptr, 16, 16, data, fn) ++#define gen_gvec_3(v1, v2, v3, gen) \ ++ tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), 16, 16, gen) ++#define gen_gvec_3_ool(v1, v2, v3, data, fn) \ ++ tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), 16, 16, data, fn) ++#define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \ ++ tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), ptr, 16, 16, data, fn) ++#define gen_gvec_3i(v1, v2, v3, c, gen) \ ++ tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), 16, 16, c, gen) ++#define gen_gvec_4(v1, v2, v3, v4, gen) \ ++ tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ ++ 16, 16, gen) ++#define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \ ++ tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ ++ 16, 16, data, fn) ++#define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \ ++ tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ ++ ptr, 16, 16, data, fn) ++#define gen_gvec_dup_i64(es, v1, c) \ ++ tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c) ++#define gen_gvec_mov(v1, v2) \ ++ tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \ ++ 16) ++#define gen_gvec_dup_imm(es, v1, c) \ ++ tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c); ++#define gen_gvec_fn_2(fn, es, v1, v2) \ ++ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ 16, 16) ++#define gen_gvec_fn_2i(fn, es, v1, v2, c) \ ++ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ c, 16, 16) ++#define gen_gvec_fn_2s(fn, es, v1, v2, s) \ ++ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ s, 16, 16) ++#define gen_gvec_fn_3(fn, es, v1, v2, v3) \ ++ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), 16, 16) ++#define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \ ++ tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ ++ vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16) ++ ++/* ++ * Helper to carry out a 128 bit vector computation using 2 i64 values per ++ * vector. ++ */ ++typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, ++ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); ++static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, ++ uint8_t b) ++{ ++ TCGv_i64 dh = tcg_temp_new_i64(); ++ TCGv_i64 dl = tcg_temp_new_i64(); ++ TCGv_i64 ah = tcg_temp_new_i64(); ++ TCGv_i64 al = tcg_temp_new_i64(); ++ TCGv_i64 bh = tcg_temp_new_i64(); ++ TCGv_i64 bl = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(ah, a, 0, ES_64); ++ read_vec_element_i64(al, a, 1, ES_64); ++ read_vec_element_i64(bh, b, 0, ES_64); ++ read_vec_element_i64(bl, b, 1, ES_64); ++ fn(dl, dh, al, ah, bl, bh); ++ write_vec_element_i64(dh, d, 0, ES_64); ++ write_vec_element_i64(dl, d, 1, ES_64); ++ ++ tcg_temp_free_i64(dh); ++ tcg_temp_free_i64(dl); ++ tcg_temp_free_i64(ah); ++ tcg_temp_free_i64(al); ++ tcg_temp_free_i64(bh); ++ tcg_temp_free_i64(bl); ++} ++ ++typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, ++ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, ++ TCGv_i64 cl, TCGv_i64 ch); ++static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, ++ uint8_t b, uint8_t c) ++{ ++ TCGv_i64 dh = tcg_temp_new_i64(); ++ TCGv_i64 dl = tcg_temp_new_i64(); ++ TCGv_i64 ah = tcg_temp_new_i64(); ++ TCGv_i64 al = tcg_temp_new_i64(); ++ TCGv_i64 bh = tcg_temp_new_i64(); ++ TCGv_i64 bl = tcg_temp_new_i64(); ++ TCGv_i64 ch = tcg_temp_new_i64(); ++ TCGv_i64 cl = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(ah, a, 0, ES_64); ++ read_vec_element_i64(al, a, 1, ES_64); ++ read_vec_element_i64(bh, b, 0, ES_64); ++ read_vec_element_i64(bl, b, 1, ES_64); ++ read_vec_element_i64(ch, c, 0, ES_64); ++ read_vec_element_i64(cl, c, 1, ES_64); ++ fn(dl, dh, al, ah, bl, bh, cl, ch); ++ write_vec_element_i64(dh, d, 0, ES_64); ++ write_vec_element_i64(dl, d, 1, ES_64); ++ ++ tcg_temp_free_i64(dh); ++ tcg_temp_free_i64(dl); ++ tcg_temp_free_i64(ah); ++ tcg_temp_free_i64(al); ++ tcg_temp_free_i64(bh); ++ tcg_temp_free_i64(bl); ++ tcg_temp_free_i64(ch); ++ tcg_temp_free_i64(cl); ++} ++ ++static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, ++ uint64_t b) ++{ ++ TCGv_i64 bl = tcg_const_i64(b); ++ TCGv_i64 bh = tcg_const_i64(0); ++ ++ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); ++ tcg_temp_free_i64(bl); ++ tcg_temp_free_i64(bh); ++} ++ ++static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0, ++ gen_helper_gvec_vbperm); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vge(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = s->insn->data; ++ const uint8_t enr = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (!valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ read_vec_element_i64(tmp, get_field(s, v2), enr, es); ++ tcg_gen_add_i64(o->addr1, o->addr1, tmp); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); ++ ++ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); ++ write_vec_element_i64(tmp, get_field(s, v1), enr, es); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static uint64_t generate_byte_mask(uint8_t mask) ++{ ++ uint64_t r = 0; ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ if ((mask >> i) & 1) { ++ r |= 0xffull << (i * 8); ++ } ++ } ++ return r; ++} ++ ++static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) ++{ ++ const uint16_t i2 = get_field(s, i2); ++ ++ if (i2 == (i2 & 0xff) * 0x0101) { ++ /* ++ * Masks for both 64 bit elements of the vector are the same. ++ * Trust tcg to produce a good constant loading. ++ */ ++ gen_gvec_dup_imm(ES_64, get_field(s, v1), ++ generate_byte_mask(i2 & 0xff)); ++ } else { ++ TCGv_i64 t = tcg_temp_new_i64(); ++ ++ tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8)); ++ write_vec_element_i64(t, get_field(s, v1), 0, ES_64); ++ tcg_gen_movi_i64(t, generate_byte_mask(i2)); ++ write_vec_element_i64(t, get_field(s, v1), 1, ES_64); ++ tcg_temp_free_i64(t); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t bits = NUM_VEC_ELEMENT_BITS(es); ++ const uint8_t i2 = get_field(s, i2) & (bits - 1); ++ const uint8_t i3 = get_field(s, i3) & (bits - 1); ++ uint64_t mask = 0; ++ int i; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* generate the mask - take care of wrapping */ ++ for (i = i2; ; i = (i + 1) % bits) { ++ mask |= 1ull << (bits - i - 1); ++ if (i == i3) { ++ break; ++ } ++ } ++ ++ gen_gvec_dup_imm(es, get_field(s, v1), mask); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ ++ tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); ++ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlr(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_mov(get_field(s, v1), get_field(s, v2)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); ++ gen_gvec_dup_i64(es, get_field(s, v1), tmp); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vle(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = s->insn->data; ++ const uint8_t enr = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (!valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); ++ write_vec_element_i64(tmp, get_field(s, v1), enr, es); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = s->insn->data; ++ const uint8_t enr = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (!valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_const_i64((int16_t)get_field(s, i2)); ++ write_vec_element_i64(tmp, get_field(s, v1), enr, es); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ TCGv_ptr ptr; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* fast path if we don't need the register content */ ++ if (!get_field(s, b2)) { ++ uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); ++ ++ read_vec_element_i64(o->out, get_field(s, v3), enr, es); ++ return DISAS_NEXT; ++ } ++ ++ ptr = tcg_temp_new_ptr(); ++ get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es); ++ switch (es) { ++ case ES_8: ++ tcg_gen_ld8u_i64(o->out, ptr, 0); ++ break; ++ case ES_16: ++ tcg_gen_ld16u_i64(o->out, ptr, 0); ++ break; ++ case ES_32: ++ tcg_gen_ld32u_i64(o->out, ptr, 0); ++ break; ++ case ES_64: ++ tcg_gen_ld_i64(o->out, ptr, 0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ tcg_temp_free_ptr(ptr); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) ++{ ++ uint8_t es = get_field(s, m3); ++ uint8_t enr; ++ TCGv_i64 t; ++ ++ switch (es) { ++ /* rightmost sub-element of leftmost doubleword */ ++ case ES_8: ++ enr = 7; ++ break; ++ case ES_16: ++ enr = 3; ++ break; ++ case ES_32: ++ enr = 1; ++ break; ++ case ES_64: ++ enr = 0; ++ break; ++ /* leftmost sub-element of leftmost doubleword */ ++ case 6: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ es = ES_32; ++ enr = 0; ++ break; ++ } ++ /* fallthrough */ ++ default: ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ t = tcg_temp_new_i64(); ++ tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es); ++ gen_gvec_dup_imm(es, get_field(s, v1), 0); ++ write_vec_element_i64(t, get_field(s, v1), enr, es); ++ tcg_temp_free_i64(t); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v3 = get_field(s, v3); ++ uint8_t v1 = get_field(s, v1); ++ TCGv_i64 t0, t1; ++ ++ if (v3 < v1 || (v3 - v1 + 1) > 16) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* ++ * Check for possible access exceptions by trying to load the last ++ * element. The first element will be checked first next. ++ */ ++ t0 = tcg_temp_new_i64(); ++ t1 = tcg_temp_new_i64(); ++ gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8); ++ tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ); ++ ++ for (;; v1++) { ++ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); ++ write_vec_element_i64(t1, v1, 0, ES_64); ++ if (v1 == v3) { ++ break; ++ } ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); ++ write_vec_element_i64(t1, v1, 1, ES_64); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ } ++ ++ /* Store the last element, loaded first */ ++ write_vec_element_i64(t0, v1, 1, ES_64); ++ ++ tcg_temp_free_i64(t0); ++ tcg_temp_free_i64(t1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) ++{ ++ const int64_t block_size = (1ull << (get_field(s, m3) + 6)); ++ const int v1_offs = vec_full_reg_offset(get_field(s, v1)); ++ TCGv_ptr a0; ++ TCGv_i64 bytes; ++ ++ if (get_field(s, m3) > 6) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ bytes = tcg_temp_new_i64(); ++ a0 = tcg_temp_new_ptr(); ++ /* calculate the number of bytes until the next block boundary */ ++ tcg_gen_ori_i64(bytes, o->addr1, -block_size); ++ tcg_gen_neg_i64(bytes, bytes); ++ ++ tcg_gen_addi_ptr(a0, cpu_env, v1_offs); ++ gen_helper_vll(cpu_env, a0, o->addr1, bytes); ++ tcg_temp_free_i64(bytes); ++ tcg_temp_free_ptr(a0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ TCGv_ptr ptr; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* fast path if we don't need the register content */ ++ if (!get_field(s, b2)) { ++ uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); ++ ++ write_vec_element_i64(o->in2, get_field(s, v1), enr, es); ++ return DISAS_NEXT; ++ } ++ ++ ptr = tcg_temp_new_ptr(); ++ get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es); ++ switch (es) { ++ case ES_8: ++ tcg_gen_st8_i64(o->in2, ptr, 0); ++ break; ++ case ES_16: ++ tcg_gen_st16_i64(o->in2, ptr, 0); ++ break; ++ case ES_32: ++ tcg_gen_st32_i64(o->in2, ptr, 0); ++ break; ++ case ES_64: ++ tcg_gen_st_i64(o->in2, ptr, 0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ tcg_temp_free_ptr(ptr); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o) ++{ ++ write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vll(DisasContext *s, DisasOps *o) ++{ ++ const int v1_offs = vec_full_reg_offset(get_field(s, v1)); ++ TCGv_ptr a0 = tcg_temp_new_ptr(); ++ ++ /* convert highest index into an actual length */ ++ tcg_gen_addi_i64(o->in2, o->in2, 1); ++ tcg_gen_addi_ptr(a0, cpu_env, v1_offs); ++ gen_helper_vll(cpu_env, a0, o->addr1, o->in2); ++ tcg_temp_free_ptr(a0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t v3 = get_field(s, v3); ++ const uint8_t es = get_field(s, m4); ++ int dst_idx, src_idx; ++ TCGv_i64 tmp; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ if (s->fields.op2 == 0x61) { ++ /* iterate backwards to avoid overwriting data we might need later */ ++ for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) { ++ src_idx = dst_idx / 2; ++ if (dst_idx % 2 == 0) { ++ read_vec_element_i64(tmp, v2, src_idx, es); ++ } else { ++ read_vec_element_i64(tmp, v3, src_idx, es); ++ } ++ write_vec_element_i64(tmp, v1, dst_idx, es); ++ } ++ } else { ++ /* iterate forward to avoid overwriting data we might need later */ ++ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) { ++ src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2; ++ if (dst_idx % 2 == 0) { ++ read_vec_element_i64(tmp, v2, src_idx, es); ++ } else { ++ read_vec_element_i64(tmp, v3, src_idx, es); ++ } ++ write_vec_element_i64(tmp, v1, dst_idx, es); ++ } ++ } ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t v3 = get_field(s, v3); ++ const uint8_t es = get_field(s, m4); ++ static gen_helper_gvec_3 * const vpk[3] = { ++ gen_helper_gvec_vpk16, ++ gen_helper_gvec_vpk32, ++ gen_helper_gvec_vpk64, ++ }; ++ static gen_helper_gvec_3 * const vpks[3] = { ++ gen_helper_gvec_vpks16, ++ gen_helper_gvec_vpks32, ++ gen_helper_gvec_vpks64, ++ }; ++ static gen_helper_gvec_3_ptr * const vpks_cc[3] = { ++ gen_helper_gvec_vpks_cc16, ++ gen_helper_gvec_vpks_cc32, ++ gen_helper_gvec_vpks_cc64, ++ }; ++ static gen_helper_gvec_3 * const vpkls[3] = { ++ gen_helper_gvec_vpkls16, ++ gen_helper_gvec_vpkls32, ++ gen_helper_gvec_vpkls64, ++ }; ++ static gen_helper_gvec_3_ptr * const vpkls_cc[3] = { ++ gen_helper_gvec_vpkls_cc16, ++ gen_helper_gvec_vpkls_cc32, ++ gen_helper_gvec_vpkls_cc64, ++ }; ++ ++ if (es == ES_8 || es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (s->fields.op2) { ++ case 0x97: ++ if (get_field(s, m5) & 0x1) { ++ gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]); ++ } ++ break; ++ case 0x95: ++ if (get_field(s, m5) & 0x1) { ++ gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]); ++ } ++ break; ++ case 0x94: ++ /* If sources and destination dont't overlap -> fast path */ ++ if (v1 != v2 && v1 != v3) { ++ const uint8_t src_es = get_field(s, m4); ++ const uint8_t dst_es = src_es - 1; ++ TCGv_i64 tmp = tcg_temp_new_i64(); ++ int dst_idx, src_idx; ++ ++ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { ++ src_idx = dst_idx; ++ if (src_idx < NUM_VEC_ELEMENTS(src_es)) { ++ read_vec_element_i64(tmp, v2, src_idx, src_es); ++ } else { ++ src_idx -= NUM_VEC_ELEMENTS(src_es); ++ read_vec_element_i64(tmp, v3, src_idx, src_es); ++ } ++ write_vec_element_i64(tmp, v1, dst_idx, dst_es); ++ } ++ tcg_temp_free_i64(tmp); ++ } else { ++ gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]); ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vperm(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), ++ 0, gen_helper_gvec_vperm); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t i2 = extract32(get_field(s, m4), 2, 1); ++ const uint8_t i3 = extract32(get_field(s, m4), 0, 1); ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(t0, get_field(s, v2), i2, ES_64); ++ read_vec_element_i64(t1, get_field(s, v3), i3, ES_64); ++ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); ++ tcg_temp_free_i64(t0); ++ tcg_temp_free_i64(t1); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vrep(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t enr = get_field(s, i2); ++ const uint8_t es = get_field(s, m4); ++ ++ if (es > ES_64 || !valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)), ++ vec_reg_offset(get_field(s, v3), enr, es), ++ 16, 16); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o) ++{ ++ const int64_t data = (int16_t)get_field(s, i2); ++ const uint8_t es = get_field(s, m3); ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_dup_imm(es, get_field(s, v1), data); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = s->insn->data; ++ const uint8_t enr = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (!valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ read_vec_element_i64(tmp, get_field(s, v2), enr, es); ++ tcg_gen_add_i64(o->addr1, o->addr1, tmp); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); ++ ++ read_vec_element_i64(tmp, get_field(s, v1), enr, es); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsel(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1), ++ get_field(s, v4), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ int idx1, idx2; ++ TCGv_i64 tmp; ++ ++ switch (es) { ++ case ES_8: ++ idx1 = 7; ++ idx2 = 15; ++ break; ++ case ES_16: ++ idx1 = 3; ++ idx2 = 7; ++ break; ++ case ES_32: ++ idx1 = 1; ++ idx2 = 3; ++ break; ++ default: ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN); ++ write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); ++ read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN); ++ write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vst(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 tmp = tcg_const_i64(16); ++ ++ /* Probe write access before actually modifying memory */ ++ gen_helper_probe_write_access(cpu_env, o->addr1, tmp); ++ ++ read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vste(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = s->insn->data; ++ const uint8_t enr = get_field(s, m3); ++ TCGv_i64 tmp; ++ ++ if (!valid_vec_element(enr, es)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ read_vec_element_i64(tmp, get_field(s, v1), enr, es); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v3 = get_field(s, v3); ++ uint8_t v1 = get_field(s, v1); ++ TCGv_i64 tmp; ++ ++ while (v3 < v1 || (v3 - v1 + 1) > 16) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* Probe write access before actually modifying memory */ ++ tmp = tcg_const_i64((v3 - v1 + 1) * 16); ++ gen_helper_probe_write_access(cpu_env, o->addr1, tmp); ++ ++ for (;; v1++) { ++ read_vec_element_i64(tmp, v1, 0, ES_64); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ read_vec_element_i64(tmp, v1, 1, ES_64); ++ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); ++ if (v1 == v3) { ++ break; ++ } ++ gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); ++ } ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) ++{ ++ const int v1_offs = vec_full_reg_offset(get_field(s, v1)); ++ TCGv_ptr a0 = tcg_temp_new_ptr(); ++ ++ /* convert highest index into an actual length */ ++ tcg_gen_addi_i64(o->in2, o->in2, 1); ++ tcg_gen_addi_ptr(a0, cpu_env, v1_offs); ++ gen_helper_vstl(cpu_env, a0, o->addr1, o->in2); ++ tcg_temp_free_ptr(a0); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vup(DisasContext *s, DisasOps *o) ++{ ++ const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5; ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t src_es = get_field(s, m3); ++ const uint8_t dst_es = src_es + 1; ++ int dst_idx, src_idx; ++ TCGv_i64 tmp; ++ ++ if (src_es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tmp = tcg_temp_new_i64(); ++ if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) { ++ /* iterate backwards to avoid overwriting data we might need later */ ++ for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) { ++ src_idx = dst_idx; ++ read_vec_element_i64(tmp, v2, src_idx, ++ src_es | (logical ? 0 : MO_SIGN)); ++ write_vec_element_i64(tmp, v1, dst_idx, dst_es); ++ } ++ ++ } else { ++ /* iterate forward to avoid overwriting data we might need later */ ++ for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { ++ src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2; ++ read_vec_element_i64(tmp, v2, src_idx, ++ src_es | (logical ? 0 : MO_SIGN)); ++ write_vec_element_i64(tmp, v1, dst_idx, dst_es); ++ } ++ } ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_va(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ ++ if (es > ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } else if (es == ES_128) { ++ gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++ } ++ gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) ++{ ++ const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1; ++ TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr)); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i64 t2 = tcg_temp_new_i64(); ++ TCGv_i64 t3 = tcg_temp_new_i64(); ++ ++ /* Calculate the carry into the MSB, ignoring the old MSBs */ ++ tcg_gen_andc_i64(t1, a, msb_mask); ++ tcg_gen_andc_i64(t2, b, msb_mask); ++ tcg_gen_add_i64(t1, t1, t2); ++ /* Calculate the MSB without any carry into it */ ++ tcg_gen_xor_i64(t3, a, b); ++ /* Calculate the carry out of the MSB in the MSB bit position */ ++ tcg_gen_and_i64(d, a, b); ++ tcg_gen_and_i64(t1, t1, t3); ++ tcg_gen_or_i64(d, d, t1); ++ /* Isolate and shift the carry into position */ ++ tcg_gen_and_i64(d, d, msb_mask); ++ tcg_gen_shri_i64(d, d, msb_bit_nr); ++ ++ tcg_temp_free_i64(t1); ++ tcg_temp_free_i64(t2); ++ tcg_temp_free_i64(t3); ++} ++ ++static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) ++{ ++ gen_acc(d, a, b, ES_8); ++} ++ ++static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) ++{ ++ gen_acc(d, a, b, ES_16); ++} ++ ++static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ TCGv_i32 t = tcg_temp_new_i32(); ++ ++ tcg_gen_add_i32(t, a, b); ++ tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b); ++ tcg_temp_free_i32(t); ++} ++ ++static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) ++{ ++ TCGv_i64 t = tcg_temp_new_i64(); ++ ++ tcg_gen_add_i64(t, a, b); ++ tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b); ++ tcg_temp_free_i64(t); ++} ++ ++static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, ++ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) ++{ ++ TCGv_i64 th = tcg_temp_new_i64(); ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ TCGv_i64 zero = tcg_const_i64(0); ++ ++ tcg_gen_add2_i64(tl, th, al, zero, bl, zero); ++ tcg_gen_add2_i64(tl, th, th, zero, ah, zero); ++ tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); ++ tcg_gen_mov_i64(dh, zero); ++ ++ tcg_temp_free_i64(th); ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(zero); ++} ++ ++static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g[4] = { ++ { .fni8 = gen_acc8_i64, }, ++ { .fni8 = gen_acc16_i64, }, ++ { .fni4 = gen_acc_i32, }, ++ { .fni8 = gen_acc_i64, }, ++ }; ++ ++ if (es > ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } else if (es == ES_128) { ++ gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++ } ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, ++ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) ++{ ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ TCGv_i64 th = tcg_const_i64(0); ++ ++ /* extract the carry only */ ++ tcg_gen_extract_i64(tl, cl, 0, 1); ++ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); ++ tcg_gen_add2_i64(dl, dh, dl, dh, tl, th); ++ ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(th); ++} ++ ++static DisasJumpType op_vac(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, m5) != ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3), ++ get_field(s, v4)); ++ return DISAS_NEXT; ++} ++ ++static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, ++ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) ++{ ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ TCGv_i64 th = tcg_temp_new_i64(); ++ TCGv_i64 zero = tcg_const_i64(0); ++ ++ tcg_gen_andi_i64(tl, cl, 1); ++ tcg_gen_add2_i64(tl, th, tl, zero, al, zero); ++ tcg_gen_add2_i64(tl, th, tl, th, bl, zero); ++ tcg_gen_add2_i64(tl, th, th, zero, ah, zero); ++ tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); ++ tcg_gen_mov_i64(dh, zero); ++ ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(th); ++ tcg_temp_free_i64(zero); ++} ++ ++static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, m5) != ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3), ++ get_field(s, v4)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vn(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vnc(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(andc, ES_8, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ ++ tcg_gen_ext_i32_i64(t0, a); ++ tcg_gen_ext_i32_i64(t1, b); ++ tcg_gen_add_i64(t0, t0, t1); ++ tcg_gen_addi_i64(t0, t0, 1); ++ tcg_gen_shri_i64(t0, t0, 1); ++ tcg_gen_extrl_i64_i32(d, t0); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++} ++ ++static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) ++{ ++ TCGv_i64 dh = tcg_temp_new_i64(); ++ TCGv_i64 ah = tcg_temp_new_i64(); ++ TCGv_i64 bh = tcg_temp_new_i64(); ++ ++ /* extending the sign by one bit is sufficient */ ++ tcg_gen_extract_i64(ah, al, 63, 1); ++ tcg_gen_extract_i64(bh, bl, 63, 1); ++ tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); ++ gen_addi2_i64(dl, dh, dl, dh, 1); ++ tcg_gen_extract2_i64(dl, dl, dh, 1); ++ ++ tcg_temp_free_i64(dh); ++ tcg_temp_free_i64(ah); ++ tcg_temp_free_i64(bh); ++} ++ ++static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g[4] = { ++ { .fno = gen_helper_gvec_vavg8, }, ++ { .fno = gen_helper_gvec_vavg16, }, ++ { .fni4 = gen_avg_i32, }, ++ { .fni8 = gen_avg_i64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ ++ tcg_gen_extu_i32_i64(t0, a); ++ tcg_gen_extu_i32_i64(t1, b); ++ tcg_gen_add_i64(t0, t0, t1); ++ tcg_gen_addi_i64(t0, t0, 1); ++ tcg_gen_shri_i64(t0, t0, 1); ++ tcg_gen_extrl_i64_i32(d, t0); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++} ++ ++static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) ++{ ++ TCGv_i64 dh = tcg_temp_new_i64(); ++ TCGv_i64 zero = tcg_const_i64(0); ++ ++ tcg_gen_add2_i64(dl, dh, al, zero, bl, zero); ++ gen_addi2_i64(dl, dh, dl, dh, 1); ++ tcg_gen_extract2_i64(dl, dl, dh, 1); ++ ++ tcg_temp_free_i64(dh); ++ tcg_temp_free_i64(zero); ++} ++ ++static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g[4] = { ++ { .fno = gen_helper_gvec_vavgl8, }, ++ { .fno = gen_helper_gvec_vavgl16, }, ++ { .fni4 = gen_avgl_i32, }, ++ { .fni8 = gen_avgl_i64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i32 tmp = tcg_temp_new_i32(); ++ TCGv_i32 sum = tcg_temp_new_i32(); ++ int i; ++ ++ read_vec_element_i32(sum, get_field(s, v3), 1, ES_32); ++ for (i = 0; i < 4; i++) { ++ read_vec_element_i32(tmp, get_field(s, v2), i, ES_32); ++ tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp); ++ } ++ gen_gvec_dup_imm(ES_32, get_field(s, v1), 0); ++ write_vec_element_i32(sum, get_field(s, v1), 1, ES_32); ++ ++ tcg_temp_free_i32(tmp); ++ tcg_temp_free_i32(sum); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vec(DisasContext *s, DisasOps *o) ++{ ++ uint8_t es = get_field(s, m3); ++ const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ if (s->fields.op2 == 0xdb) { ++ es |= MO_SIGN; ++ } ++ ++ o->in1 = tcg_temp_new_i64(); ++ o->in2 = tcg_temp_new_i64(); ++ read_vec_element_i64(o->in1, get_field(s, v1), enr, es); ++ read_vec_element_i64(o->in2, get_field(s, v2), enr, es); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ TCGCond cond = s->insn->data; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ tcg_gen_gvec_cmp(cond, es, ++ vec_full_reg_offset(get_field(s, v1)), ++ vec_full_reg_offset(get_field(s, v2)), ++ vec_full_reg_offset(get_field(s, v3)), 16, 16); ++ if (get_field(s, m5) & 0x1) { ++ TCGv_i64 low = tcg_temp_new_i64(); ++ TCGv_i64 high = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(high, get_field(s, v1), 0, ES_64); ++ read_vec_element_i64(low, get_field(s, v1), 1, ES_64); ++ gen_op_update2_cc_i64(s, CC_OP_VC, low, high); ++ ++ tcg_temp_free_i64(low); ++ tcg_temp_free_i64(high); ++ } ++ return DISAS_NEXT; ++} ++ ++static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a) ++{ ++ tcg_gen_clzi_i32(d, a, 32); ++} ++ ++static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a) ++{ ++ tcg_gen_clzi_i64(d, a, 64); ++} ++ ++static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ static const GVecGen2 g[4] = { ++ { .fno = gen_helper_gvec_vclz8, }, ++ { .fno = gen_helper_gvec_vclz16, }, ++ { .fni4 = gen_clz_i32, }, ++ { .fni8 = gen_clz_i64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a) ++{ ++ tcg_gen_ctzi_i32(d, a, 32); ++} ++ ++static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a) ++{ ++ tcg_gen_ctzi_i64(d, a, 64); ++} ++ ++static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ static const GVecGen2 g[4] = { ++ { .fno = gen_helper_gvec_vctz8, }, ++ { .fno = gen_helper_gvec_vctz16, }, ++ { .fni4 = gen_ctz_i32, }, ++ { .fni8 = gen_ctz_i64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vx(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g[4] = { ++ { .fno = gen_helper_gvec_vgfm8, }, ++ { .fno = gen_helper_gvec_vgfm16, }, ++ { .fno = gen_helper_gvec_vgfm32, }, ++ { .fno = gen_helper_gvec_vgfm64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m5); ++ static const GVecGen4 g[4] = { ++ { .fno = gen_helper_gvec_vgfma8, }, ++ { .fno = gen_helper_gvec_vgfma16, }, ++ { .fno = gen_helper_gvec_vgfma32, }, ++ { .fno = gen_helper_gvec_vgfma64, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ gen_gvec_4(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vlp(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vmx(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t v3 = get_field(s, v3); ++ const uint8_t es = get_field(s, m4); ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (s->fields.op2) { ++ case 0xff: ++ gen_gvec_fn_3(smax, es, v1, v2, v3); ++ break; ++ case 0xfd: ++ gen_gvec_fn_3(umax, es, v1, v2, v3); ++ break; ++ case 0xfe: ++ gen_gvec_fn_3(smin, es, v1, v2, v3); ++ break; ++ case 0xfc: ++ gen_gvec_fn_3(umin, es, v1, v2, v3); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ return DISAS_NEXT; ++} ++ ++static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) ++{ ++ TCGv_i32 t0 = tcg_temp_new_i32(); ++ ++ tcg_gen_mul_i32(t0, a, b); ++ tcg_gen_add_i32(d, t0, c); ++ ++ tcg_temp_free_i32(t0); ++} ++ ++static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i64 t2 = tcg_temp_new_i64(); ++ ++ tcg_gen_ext_i32_i64(t0, a); ++ tcg_gen_ext_i32_i64(t1, b); ++ tcg_gen_ext_i32_i64(t2, c); ++ tcg_gen_mul_i64(t0, t0, t1); ++ tcg_gen_add_i64(t0, t0, t2); ++ tcg_gen_extrh_i64_i32(d, t0); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++ tcg_temp_free(t2); ++} ++ ++static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) ++{ ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i64 t2 = tcg_temp_new_i64(); ++ ++ tcg_gen_extu_i32_i64(t0, a); ++ tcg_gen_extu_i32_i64(t1, b); ++ tcg_gen_extu_i32_i64(t2, c); ++ tcg_gen_mul_i64(t0, t0, t1); ++ tcg_gen_add_i64(t0, t0, t2); ++ tcg_gen_extrh_i64_i32(d, t0); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++ tcg_temp_free(t2); ++} ++ ++static DisasJumpType op_vma(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m5); ++ static const GVecGen4 g_vmal[3] = { ++ { .fno = gen_helper_gvec_vmal8, }, ++ { .fno = gen_helper_gvec_vmal16, }, ++ { .fni4 = gen_mal_i32, }, ++ }; ++ static const GVecGen4 g_vmah[3] = { ++ { .fno = gen_helper_gvec_vmah8, }, ++ { .fno = gen_helper_gvec_vmah16, }, ++ { .fni4 = gen_mah_i32, }, ++ }; ++ static const GVecGen4 g_vmalh[3] = { ++ { .fno = gen_helper_gvec_vmalh8, }, ++ { .fno = gen_helper_gvec_vmalh16, }, ++ { .fni4 = gen_malh_i32, }, ++ }; ++ static const GVecGen4 g_vmae[3] = { ++ { .fno = gen_helper_gvec_vmae8, }, ++ { .fno = gen_helper_gvec_vmae16, }, ++ { .fno = gen_helper_gvec_vmae32, }, ++ }; ++ static const GVecGen4 g_vmale[3] = { ++ { .fno = gen_helper_gvec_vmale8, }, ++ { .fno = gen_helper_gvec_vmale16, }, ++ { .fno = gen_helper_gvec_vmale32, }, ++ }; ++ static const GVecGen4 g_vmao[3] = { ++ { .fno = gen_helper_gvec_vmao8, }, ++ { .fno = gen_helper_gvec_vmao16, }, ++ { .fno = gen_helper_gvec_vmao32, }, ++ }; ++ static const GVecGen4 g_vmalo[3] = { ++ { .fno = gen_helper_gvec_vmalo8, }, ++ { .fno = gen_helper_gvec_vmalo16, }, ++ { .fno = gen_helper_gvec_vmalo32, }, ++ }; ++ const GVecGen4 *fn; ++ ++ if (es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (s->fields.op2) { ++ case 0xaa: ++ fn = &g_vmal[es]; ++ break; ++ case 0xab: ++ fn = &g_vmah[es]; ++ break; ++ case 0xa9: ++ fn = &g_vmalh[es]; ++ break; ++ case 0xae: ++ fn = &g_vmae[es]; ++ break; ++ case 0xac: ++ fn = &g_vmale[es]; ++ break; ++ case 0xaf: ++ fn = &g_vmao[es]; ++ break; ++ case 0xad: ++ fn = &g_vmalo[es]; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ gen_gvec_4(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), fn); ++ return DISAS_NEXT; ++} ++ ++static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ TCGv_i32 t = tcg_temp_new_i32(); ++ ++ tcg_gen_muls2_i32(t, d, a, b); ++ tcg_temp_free_i32(t); ++} ++ ++static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ TCGv_i32 t = tcg_temp_new_i32(); ++ ++ tcg_gen_mulu2_i32(t, d, a, b); ++ tcg_temp_free_i32(t); ++} ++ ++static DisasJumpType op_vm(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g_vmh[3] = { ++ { .fno = gen_helper_gvec_vmh8, }, ++ { .fno = gen_helper_gvec_vmh16, }, ++ { .fni4 = gen_mh_i32, }, ++ }; ++ static const GVecGen3 g_vmlh[3] = { ++ { .fno = gen_helper_gvec_vmlh8, }, ++ { .fno = gen_helper_gvec_vmlh16, }, ++ { .fni4 = gen_mlh_i32, }, ++ }; ++ static const GVecGen3 g_vme[3] = { ++ { .fno = gen_helper_gvec_vme8, }, ++ { .fno = gen_helper_gvec_vme16, }, ++ { .fno = gen_helper_gvec_vme32, }, ++ }; ++ static const GVecGen3 g_vmle[3] = { ++ { .fno = gen_helper_gvec_vmle8, }, ++ { .fno = gen_helper_gvec_vmle16, }, ++ { .fno = gen_helper_gvec_vmle32, }, ++ }; ++ static const GVecGen3 g_vmo[3] = { ++ { .fno = gen_helper_gvec_vmo8, }, ++ { .fno = gen_helper_gvec_vmo16, }, ++ { .fno = gen_helper_gvec_vmo32, }, ++ }; ++ static const GVecGen3 g_vmlo[3] = { ++ { .fno = gen_helper_gvec_vmlo8, }, ++ { .fno = gen_helper_gvec_vmlo16, }, ++ { .fno = gen_helper_gvec_vmlo32, }, ++ }; ++ const GVecGen3 *fn; ++ ++ if (es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (s->fields.op2) { ++ case 0xa2: ++ gen_gvec_fn_3(mul, es, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++ case 0xa3: ++ fn = &g_vmh[es]; ++ break; ++ case 0xa1: ++ fn = &g_vmlh[es]; ++ break; ++ case 0xa6: ++ fn = &g_vme[es]; ++ break; ++ case 0xa4: ++ fn = &g_vmle[es]; ++ break; ++ case 0xa7: ++ fn = &g_vmo[es]; ++ break; ++ case 0xa5: ++ fn = &g_vmlo[es]; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 l1, h1, l2, h2; ++ ++ if (get_field(s, m5) != ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ l1 = tcg_temp_new_i64(); ++ h1 = tcg_temp_new_i64(); ++ l2 = tcg_temp_new_i64(); ++ h2 = tcg_temp_new_i64(); ++ ++ /* Multipy both even elements from v2 and v3 */ ++ read_vec_element_i64(l1, get_field(s, v2), 0, ES_64); ++ read_vec_element_i64(h1, get_field(s, v3), 0, ES_64); ++ tcg_gen_mulu2_i64(l1, h1, l1, h1); ++ /* Shift result left by one (x2) if requested */ ++ if (extract32(get_field(s, m6), 3, 1)) { ++ tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1); ++ } ++ ++ /* Multipy both odd elements from v2 and v3 */ ++ read_vec_element_i64(l2, get_field(s, v2), 1, ES_64); ++ read_vec_element_i64(h2, get_field(s, v3), 1, ES_64); ++ tcg_gen_mulu2_i64(l2, h2, l2, h2); ++ /* Shift result left by one (x2) if requested */ ++ if (extract32(get_field(s, m6), 2, 1)) { ++ tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2); ++ } ++ ++ /* Add both intermediate results */ ++ tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); ++ /* Add whole v4 */ ++ read_vec_element_i64(h2, get_field(s, v4), 0, ES_64); ++ read_vec_element_i64(l2, get_field(s, v4), 1, ES_64); ++ tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); ++ ++ /* Store final result into v1. */ ++ write_vec_element_i64(h1, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(l1, get_field(s, v1), 1, ES_64); ++ ++ tcg_temp_free_i64(l1); ++ tcg_temp_free_i64(h1); ++ tcg_temp_free_i64(l2); ++ tcg_temp_free_i64(h2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(nand, ES_8, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vno(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vnx(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vo(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_voc(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m3); ++ static const GVecGen2 g[4] = { ++ { .fno = gen_helper_gvec_vpopct8, }, ++ { .fno = gen_helper_gvec_vpopct16, }, ++ { .fni4 = tcg_gen_ctpop_i32, }, ++ { .fni8 = tcg_gen_ctpop_i64, }, ++ }; ++ ++ if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) ++{ ++ TCGv_i32 t = tcg_temp_new_i32(); ++ ++ tcg_gen_rotli_i32(t, a, c & 31); ++ tcg_gen_and_i32(t, t, b); ++ tcg_gen_andc_i32(d, d, b); ++ tcg_gen_or_i32(d, d, t); ++ ++ tcg_temp_free_i32(t); ++} ++ ++static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) ++{ ++ TCGv_i64 t = tcg_temp_new_i64(); ++ ++ tcg_gen_rotli_i64(t, a, c & 63); ++ tcg_gen_and_i64(t, t, b); ++ tcg_gen_andc_i64(d, d, b); ++ tcg_gen_or_i64(d, d, t); ++ ++ tcg_temp_free_i64(t); ++} ++ ++static DisasJumpType op_verim(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m5); ++ const uint8_t i4 = get_field(s, i4) & ++ (NUM_VEC_ELEMENT_BITS(es) - 1); ++ static const GVecGen3i g[4] = { ++ { .fno = gen_helper_gvec_verim8, }, ++ { .fno = gen_helper_gvec_verim16, }, ++ { .fni4 = gen_rim_i32, ++ .load_dest = true, }, ++ { .fni8 = gen_rim_i64, ++ .load_dest = true, }, ++ }; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_3i(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), i4, &g[es]); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t v3 = get_field(s, v3); ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (s->fields.op2) { ++ case 0x70: ++ gen_gvec_fn_3(shlv, es, v1, v2, v3); ++ break; ++ case 0x73: ++ gen_gvec_fn_3(rotlv, es, v1, v2, v3); ++ break; ++ case 0x7a: ++ gen_gvec_fn_3(sarv, es, v1, v2, v3); ++ break; ++ case 0x78: ++ gen_gvec_fn_3(shrv, es, v1, v2, v3); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_ves(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t d2 = get_field(s, d2) & ++ (NUM_VEC_ELEMENT_BITS(es) - 1); ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v3 = get_field(s, v3); ++ TCGv_i32 shift; ++ ++ if (es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (likely(!get_field(s, b2))) { ++ switch (s->fields.op2) { ++ case 0x30: ++ gen_gvec_fn_2i(shli, es, v1, v3, d2); ++ break; ++ case 0x33: ++ gen_gvec_fn_2i(rotli, es, v1, v3, d2); ++ break; ++ case 0x3a: ++ gen_gvec_fn_2i(sari, es, v1, v3, d2); ++ break; ++ case 0x38: ++ gen_gvec_fn_2i(shri, es, v1, v3, d2); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } else { ++ shift = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(shift, o->addr1); ++ tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1); ++ switch (s->fields.op2) { ++ case 0x30: ++ gen_gvec_fn_2s(shls, es, v1, v3, shift); ++ break; ++ case 0x33: ++ gen_gvec_fn_2s(rotls, es, v1, v3, shift); ++ break; ++ case 0x3a: ++ gen_gvec_fn_2s(sars, es, v1, v3, shift); ++ break; ++ case 0x38: ++ gen_gvec_fn_2s(shrs, es, v1, v3, shift); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ tcg_temp_free_i32(shift); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 shift = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); ++ if (s->fields.op2 == 0x74) { ++ tcg_gen_andi_i64(shift, shift, 0x7); ++ } else { ++ tcg_gen_andi_i64(shift, shift, 0x78); ++ } ++ ++ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), ++ shift, 0, gen_helper_gvec_vsl); ++ tcg_temp_free_i64(shift); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t i4 = get_field(s, i4) & 0xf; ++ const int left_shift = (i4 & 7) * 8; ++ const int right_shift = 64 - left_shift; ++ TCGv_i64 t0 = tcg_temp_new_i64(); ++ TCGv_i64 t1 = tcg_temp_new_i64(); ++ TCGv_i64 t2 = tcg_temp_new_i64(); ++ ++ if ((i4 & 8) == 0) { ++ read_vec_element_i64(t0, get_field(s, v2), 0, ES_64); ++ read_vec_element_i64(t1, get_field(s, v2), 1, ES_64); ++ read_vec_element_i64(t2, get_field(s, v3), 0, ES_64); ++ } else { ++ read_vec_element_i64(t0, get_field(s, v2), 1, ES_64); ++ read_vec_element_i64(t1, get_field(s, v3), 0, ES_64); ++ read_vec_element_i64(t2, get_field(s, v3), 1, ES_64); ++ } ++ tcg_gen_extract2_i64(t0, t1, t0, right_shift); ++ tcg_gen_extract2_i64(t1, t2, t1, right_shift); ++ write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); ++ ++ tcg_temp_free(t0); ++ tcg_temp_free(t1); ++ tcg_temp_free(t2); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 shift = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); ++ if (s->fields.op2 == 0x7e) { ++ tcg_gen_andi_i64(shift, shift, 0x7); ++ } else { ++ tcg_gen_andi_i64(shift, shift, 0x78); ++ } ++ ++ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), ++ shift, 0, gen_helper_gvec_vsra); ++ tcg_temp_free_i64(shift); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) ++{ ++ TCGv_i64 shift = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); ++ if (s->fields.op2 == 0x7c) { ++ tcg_gen_andi_i64(shift, shift, 0x7); ++ } else { ++ tcg_gen_andi_i64(shift, shift, 0x78); ++ } ++ ++ gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), ++ shift, 0, gen_helper_gvec_vsrl); ++ tcg_temp_free_i64(shift); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vs(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ ++ if (es > ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } else if (es == ES_128) { ++ gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++ } ++ gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2), ++ get_field(s, v3)); ++ return DISAS_NEXT; ++} ++ ++static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) ++{ ++ tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b); ++} ++ ++static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) ++{ ++ tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b); ++} ++ ++static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, ++ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) ++{ ++ TCGv_i64 th = tcg_temp_new_i64(); ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ TCGv_i64 zero = tcg_const_i64(0); ++ ++ tcg_gen_sub2_i64(tl, th, al, zero, bl, zero); ++ tcg_gen_andi_i64(th, th, 1); ++ tcg_gen_sub2_i64(tl, th, ah, zero, th, zero); ++ tcg_gen_sub2_i64(tl, th, tl, th, bh, zero); ++ /* "invert" the result: -1 -> 0; 0 -> 1 */ ++ tcg_gen_addi_i64(dl, th, 1); ++ tcg_gen_mov_i64(dh, zero); ++ ++ tcg_temp_free_i64(th); ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(zero); ++} ++ ++static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ static const GVecGen3 g[4] = { ++ { .fno = gen_helper_gvec_vscbi8, }, ++ { .fno = gen_helper_gvec_vscbi16, }, ++ { .fni4 = gen_scbi_i32, }, ++ { .fni8 = gen_scbi_i64, }, ++ }; ++ ++ if (es > ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } else if (es == ES_128) { ++ gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3)); ++ return DISAS_NEXT; ++ } ++ gen_gvec_3(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), &g[es]); ++ return DISAS_NEXT; ++} ++ ++static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, ++ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) ++{ ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ TCGv_i64 th = tcg_temp_new_i64(); ++ ++ tcg_gen_not_i64(tl, bl); ++ tcg_gen_not_i64(th, bh); ++ gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch); ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(th); ++} ++ ++static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, m5) != ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3), ++ get_field(s, v4)); ++ return DISAS_NEXT; ++} ++ ++static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, ++ TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) ++{ ++ TCGv_i64 th = tcg_temp_new_i64(); ++ TCGv_i64 tl = tcg_temp_new_i64(); ++ ++ tcg_gen_not_i64(tl, bl); ++ tcg_gen_not_i64(th, bh); ++ gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch); ++ ++ tcg_temp_free_i64(tl); ++ tcg_temp_free_i64(th); ++} ++ ++static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) ++{ ++ if (get_field(s, m5) != ES_128) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1), ++ get_field(s, v2), get_field(s, v3), ++ get_field(s, v4)); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ TCGv_i64 sum, tmp; ++ uint8_t dst_idx; ++ ++ if (es == ES_8 || es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ sum = tcg_temp_new_i64(); ++ tmp = tcg_temp_new_i64(); ++ for (dst_idx = 0; dst_idx < 2; dst_idx++) { ++ uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2; ++ const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1; ++ ++ read_vec_element_i64(sum, get_field(s, v3), max_idx, es); ++ for (; idx <= max_idx; idx++) { ++ read_vec_element_i64(tmp, get_field(s, v2), idx, es); ++ tcg_gen_add_i64(sum, sum, tmp); ++ } ++ write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64); ++ } ++ tcg_temp_free_i64(sum); ++ tcg_temp_free_i64(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1; ++ TCGv_i64 sumh, suml, zero, tmpl; ++ uint8_t idx; ++ ++ if (es < ES_32 || es > ES_64) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ sumh = tcg_const_i64(0); ++ suml = tcg_temp_new_i64(); ++ zero = tcg_const_i64(0); ++ tmpl = tcg_temp_new_i64(); ++ ++ read_vec_element_i64(suml, get_field(s, v3), max_idx, es); ++ for (idx = 0; idx <= max_idx; idx++) { ++ read_vec_element_i64(tmpl, get_field(s, v2), idx, es); ++ tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero); ++ } ++ write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64); ++ write_vec_element_i64(suml, get_field(s, v1), 1, ES_64); ++ ++ tcg_temp_free_i64(sumh); ++ tcg_temp_free_i64(suml); ++ tcg_temp_free_i64(zero); ++ tcg_temp_free_i64(tmpl); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ TCGv_i32 sum, tmp; ++ uint8_t dst_idx; ++ ++ if (es > ES_16) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ sum = tcg_temp_new_i32(); ++ tmp = tcg_temp_new_i32(); ++ for (dst_idx = 0; dst_idx < 4; dst_idx++) { ++ uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4; ++ const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1; ++ ++ read_vec_element_i32(sum, get_field(s, v3), max_idx, es); ++ for (; idx <= max_idx; idx++) { ++ read_vec_element_i32(tmp, get_field(s, v2), idx, es); ++ tcg_gen_add_i32(sum, sum, tmp); ++ } ++ write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32); ++ } ++ tcg_temp_free_i32(sum); ++ tcg_temp_free_i32(tmp); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) ++{ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), ++ cpu_env, 0, gen_helper_gvec_vtm); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ static gen_helper_gvec_3 * const g[3] = { ++ gen_helper_gvec_vfae8, ++ gen_helper_gvec_vfae16, ++ gen_helper_gvec_vfae32, ++ }; ++ static gen_helper_gvec_3_ptr * const g_cc[3] = { ++ gen_helper_gvec_vfae_cc8, ++ gen_helper_gvec_vfae_cc16, ++ gen_helper_gvec_vfae_cc32, ++ }; ++ if (es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (extract32(m5, 0, 1)) { ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), cpu_env, m5, g_cc[es]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), m5, g[es]); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ static gen_helper_gvec_3 * const g[3] = { ++ gen_helper_gvec_vfee8, ++ gen_helper_gvec_vfee16, ++ gen_helper_gvec_vfee32, ++ }; ++ static gen_helper_gvec_3_ptr * const g_cc[3] = { ++ gen_helper_gvec_vfee_cc8, ++ gen_helper_gvec_vfee_cc16, ++ gen_helper_gvec_vfee_cc32, ++ }; ++ ++ if (es > ES_32 || m5 & ~0x3) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (extract32(m5, 0, 1)) { ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), cpu_env, m5, g_cc[es]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), m5, g[es]); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ static gen_helper_gvec_3 * const g[3] = { ++ gen_helper_gvec_vfene8, ++ gen_helper_gvec_vfene16, ++ gen_helper_gvec_vfene32, ++ }; ++ static gen_helper_gvec_3_ptr * const g_cc[3] = { ++ gen_helper_gvec_vfene_cc8, ++ gen_helper_gvec_vfene_cc16, ++ gen_helper_gvec_vfene_cc32, ++ }; ++ ++ if (es > ES_32 || m5 & ~0x3) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (extract32(m5, 0, 1)) { ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), cpu_env, m5, g_cc[es]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), m5, g[es]); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ static gen_helper_gvec_2 * const g[3] = { ++ gen_helper_gvec_vistr8, ++ gen_helper_gvec_vistr16, ++ gen_helper_gvec_vistr32, ++ }; ++ static gen_helper_gvec_2_ptr * const g_cc[3] = { ++ gen_helper_gvec_vistr_cc8, ++ gen_helper_gvec_vistr_cc16, ++ gen_helper_gvec_vistr_cc32, ++ }; ++ ++ if (es > ES_32 || m5 & ~0x1) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (extract32(m5, 0, 1)) { ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), ++ cpu_env, 0, g_cc[es]); ++ set_cc_static(s); ++ } else { ++ gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0, ++ g[es]); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t es = get_field(s, m5); ++ const uint8_t m6 = get_field(s, m6); ++ static gen_helper_gvec_4 * const g[3] = { ++ gen_helper_gvec_vstrc8, ++ gen_helper_gvec_vstrc16, ++ gen_helper_gvec_vstrc32, ++ }; ++ static gen_helper_gvec_4 * const g_rt[3] = { ++ gen_helper_gvec_vstrc_rt8, ++ gen_helper_gvec_vstrc_rt16, ++ gen_helper_gvec_vstrc_rt32, ++ }; ++ static gen_helper_gvec_4_ptr * const g_cc[3] = { ++ gen_helper_gvec_vstrc_cc8, ++ gen_helper_gvec_vstrc_cc16, ++ gen_helper_gvec_vstrc_cc32, ++ }; ++ static gen_helper_gvec_4_ptr * const g_cc_rt[3] = { ++ gen_helper_gvec_vstrc_cc_rt8, ++ gen_helper_gvec_vstrc_cc_rt16, ++ gen_helper_gvec_vstrc_cc_rt32, ++ }; ++ ++ if (es > ES_32) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ if (extract32(m6, 0, 1)) { ++ if (extract32(m6, 2, 1)) { ++ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), ++ cpu_env, m6, g_cc_rt[es]); ++ } else { ++ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), ++ cpu_env, m6, g_cc[es]); ++ } ++ set_cc_static(s); ++ } else { ++ if (extract32(m6, 2, 1)) { ++ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), ++ m6, g_rt[es]); ++ } else { ++ gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), ++ m6, g[es]); ++ } ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ gen_helper_gvec_3_ptr *fn = NULL; ++ ++ switch (s->fields.op2) { ++ case 0xe3: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfa32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfa64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfa128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xe5: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfd32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfd64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfd128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xe7: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfm32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfm64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfm128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xe2: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfs32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfs64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfs128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ if (!fn || extract32(m5, 0, 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), cpu_env, m5, fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m3); ++ const uint8_t m4 = get_field(s, m4); ++ gen_helper_gvec_2_ptr *fn = NULL; ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_wfk32; ++ if (s->fields.op2 == 0xcb) { ++ fn = gen_helper_gvec_wfc32; ++ } ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_wfk64; ++ if (s->fields.op2 == 0xcb) { ++ fn = gen_helper_gvec_wfc64; ++ } ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_wfk128; ++ if (s->fields.op2 == 0xcb) { ++ fn = gen_helper_gvec_wfc128; ++ } ++ } ++ break; ++ default: ++ break; ++ }; ++ ++ if (!fn || m4) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ const uint8_t m6 = get_field(s, m6); ++ const bool cs = extract32(m6, 0, 1); ++ const bool sq = extract32(m5, 2, 1); ++ gen_helper_gvec_3_ptr *fn = NULL; ++ ++ switch (s->fields.op2) { ++ case 0xe8: ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32; ++ break; ++ case FPF_LONG: ++ fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64; ++ break; ++ case FPF_EXT: ++ fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xeb: ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32; ++ break; ++ case FPF_LONG: ++ fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64; ++ break; ++ case FPF_EXT: ++ fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xea: ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32; ++ break; ++ case FPF_LONG: ++ fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64; ++ break; ++ case FPF_EXT: ++ fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128; ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) || ++ (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), ++ cpu_env, m5, fn); ++ if (cs) { ++ set_cc_static(s); ++ } ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m3); ++ const uint8_t m4 = get_field(s, m4); ++ const uint8_t erm = get_field(s, m5); ++ gen_helper_gvec_2_ptr *fn = NULL; ++ ++ ++ switch (s->fields.op2) { ++ case 0xc3: ++ if (fpf == FPF_LONG) { ++ fn = gen_helper_gvec_vcdg64; ++ } ++ break; ++ case 0xc1: ++ if (fpf == FPF_LONG) { ++ fn = gen_helper_gvec_vcdlg64; ++ } ++ break; ++ case 0xc2: ++ if (fpf == FPF_LONG) { ++ fn = gen_helper_gvec_vcgd64; ++ } ++ break; ++ case 0xc0: ++ if (fpf == FPF_LONG) { ++ fn = gen_helper_gvec_vclgd64; ++ } ++ break; ++ case 0xc7: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfi32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfi64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfi128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0xc5: ++ switch (fpf) { ++ case FPF_LONG: ++ fn = gen_helper_gvec_vflr64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vflr128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, ++ deposit32(m4, 4, 4, erm), fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m3); ++ const uint8_t m4 = get_field(s, m4); ++ gen_helper_gvec_2_ptr *fn = NULL; ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = gen_helper_gvec_vfll32; ++ break; ++ case FPF_LONG: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfll64; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ if (!fn || extract32(m4, 0, 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfmax(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m4); ++ const uint8_t m6 = get_field(s, m6); ++ const uint8_t m5 = get_field(s, m5); ++ gen_helper_gvec_3_ptr *fn; ++ ++ if (m6 == 5 || m6 == 6 || m6 == 7 || m6 > 13) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s->fields.op2 == 0xef) { ++ fn = gen_helper_gvec_vfmax32; ++ } else { ++ fn = gen_helper_gvec_vfmin32; ++ } ++ break; ++ case FPF_LONG: ++ if (s->fields.op2 == 0xef) { ++ fn = gen_helper_gvec_vfmax64; ++ } else { ++ fn = gen_helper_gvec_vfmin64; ++ } ++ break; ++ case FPF_EXT: ++ if (s->fields.op2 == 0xef) { ++ fn = gen_helper_gvec_vfmax128; ++ } else { ++ fn = gen_helper_gvec_vfmin128; ++ } ++ break; ++ default: ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), ++ cpu_env, deposit32(m5, 4, 4, m6), fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t m5 = get_field(s, m5); ++ const uint8_t fpf = get_field(s, m6); ++ gen_helper_gvec_4_ptr *fn = NULL; ++ ++ switch (s->fields.op2) { ++ case 0x8f: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfma32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfma64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfma128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x8e: ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfms32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfms64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfms128; ++ } ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x9f: ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = gen_helper_gvec_vfnma32; ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfnma64; ++ break; ++ case FPF_EXT: ++ fn = gen_helper_gvec_vfnma128; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x9e: ++ switch (fpf) { ++ case FPF_SHORT: ++ fn = gen_helper_gvec_vfnms32; ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfnms64; ++ break; ++ case FPF_EXT: ++ fn = gen_helper_gvec_vfnms128; ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ if (!fn || extract32(m5, 0, 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), ++ get_field(s, v3), get_field(s, v4), cpu_env, m5, fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t v1 = get_field(s, v1); ++ const uint8_t v2 = get_field(s, v2); ++ const uint8_t fpf = get_field(s, m3); ++ const uint8_t m4 = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ const bool se = extract32(m4, 3, 1); ++ TCGv_i64 tmp; ++ ++ if ((fpf != FPF_LONG && !s390_has_feat(S390_FEAT_VECTOR_ENH)) || ++ extract32(m4, 0, 3) || m5 > 2) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ if (!se) { ++ switch (m5) { ++ case 0: ++ /* sign bit is inverted (complement) */ ++ gen_gvec_fn_2i(xori, ES_32, v1, v2, 1ull << 31); ++ break; ++ case 1: ++ /* sign bit is set to one (negative) */ ++ gen_gvec_fn_2i(ori, ES_32, v1, v2, 1ull << 31); ++ break; ++ case 2: ++ /* sign bit is set to zero (positive) */ ++ gen_gvec_fn_2i(andi, ES_32, v1, v2, (1ull << 31) - 1); ++ break; ++ } ++ return DISAS_NEXT; ++ } ++ break; ++ case FPF_LONG: ++ if (!se) { ++ switch (m5) { ++ case 0: ++ /* sign bit is inverted (complement) */ ++ gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63); ++ break; ++ case 1: ++ /* sign bit is set to one (negative) */ ++ gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63); ++ break; ++ case 2: ++ /* sign bit is set to zero (positive) */ ++ gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1); ++ break; ++ } ++ return DISAS_NEXT; ++ } ++ break; ++ case FPF_EXT: ++ /* Only a single element. */ ++ break; ++ default: ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ /* With a single element, we are only interested in bit 0. */ ++ tmp = tcg_temp_new_i64(); ++ read_vec_element_i64(tmp, v2, 0, ES_64); ++ switch (m5) { ++ case 0: ++ /* sign bit is inverted (complement) */ ++ tcg_gen_xori_i64(tmp, tmp, 1ull << 63); ++ break; ++ case 1: ++ /* sign bit is set to one (negative) */ ++ tcg_gen_ori_i64(tmp, tmp, 1ull << 63); ++ break; ++ case 2: ++ /* sign bit is set to zero (positive) */ ++ tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1); ++ break; ++ } ++ write_vec_element_i64(tmp, v1, 0, ES_64); ++ ++ if (fpf == FPF_EXT) { ++ read_vec_element_i64(tmp, v2, 1, ES_64); ++ write_vec_element_i64(tmp, v1, 1, ES_64); ++ } ++ ++ tcg_temp_free_i64(tmp); ++ ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) ++{ ++ const uint8_t fpf = get_field(s, m3); ++ const uint8_t m4 = get_field(s, m4); ++ gen_helper_gvec_2_ptr *fn = NULL; ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfsq32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vfsq64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vfsq128; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ if (!fn || extract32(m4, 0, 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); ++ return DISAS_NEXT; ++} ++ ++static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) ++{ ++ const uint16_t i3 = get_field(s, i3); ++ const uint8_t fpf = get_field(s, m4); ++ const uint8_t m5 = get_field(s, m5); ++ gen_helper_gvec_2_ptr *fn = NULL; ++ ++ switch (fpf) { ++ case FPF_SHORT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vftci32; ++ } ++ break; ++ case FPF_LONG: ++ fn = gen_helper_gvec_vftci64; ++ break; ++ case FPF_EXT: ++ if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { ++ fn = gen_helper_gvec_vftci128; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ if (!fn || extract32(m5, 0, 3)) { ++ gen_program_exception(s, PGM_SPECIFICATION); ++ return DISAS_NORETURN; ++ } ++ ++ gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, ++ deposit32(m5, 4, 12, i3), fn); ++ set_cc_static(s); ++ return DISAS_NEXT; ++} +diff --git a/target/s390x/tcg/vec.h b/target/s390x/tcg/vec.h +new file mode 100644 +index 0000000000..a6e361869b +--- /dev/null ++++ b/target/s390x/tcg/vec.h +@@ -0,0 +1,141 @@ ++/* ++ * QEMU TCG support -- s390x vector utilitites ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++#ifndef S390X_VEC_H ++#define S390X_VEC_H ++ ++#include "tcg/tcg.h" ++ ++typedef union S390Vector { ++ uint64_t doubleword[2]; ++ uint32_t word[4]; ++ uint16_t halfword[8]; ++ uint8_t byte[16]; ++} S390Vector; ++ ++/* ++ * Each vector is stored as two 64bit host values. So when talking about ++ * byte/halfword/word numbers, we have to take care of proper translation ++ * between element numbers. ++ * ++ * Big Endian (target/possible host) ++ * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] ++ * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] ++ * W: [ 0][ 1] - [ 2][ 3] ++ * DW: [ 0] - [ 1] ++ * ++ * Little Endian (possible host) ++ * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] ++ * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] ++ * W: [ 1][ 0] - [ 3][ 2] ++ * DW: [ 0] - [ 1] ++ */ ++#ifndef HOST_WORDS_BIGENDIAN ++#define H1(x) ((x) ^ 7) ++#define H2(x) ((x) ^ 3) ++#define H4(x) ((x) ^ 1) ++#else ++#define H1(x) (x) ++#define H2(x) (x) ++#define H4(x) (x) ++#endif ++ ++static inline uint8_t s390_vec_read_element8(const S390Vector *v, uint8_t enr) ++{ ++ g_assert(enr < 16); ++ return v->byte[H1(enr)]; ++} ++ ++static inline uint16_t s390_vec_read_element16(const S390Vector *v, uint8_t enr) ++{ ++ g_assert(enr < 8); ++ return v->halfword[H2(enr)]; ++} ++ ++static inline uint32_t s390_vec_read_element32(const S390Vector *v, uint8_t enr) ++{ ++ g_assert(enr < 4); ++ return v->word[H4(enr)]; ++} ++ ++static inline uint64_t s390_vec_read_element64(const S390Vector *v, uint8_t enr) ++{ ++ g_assert(enr < 2); ++ return v->doubleword[enr]; ++} ++ ++static inline uint64_t s390_vec_read_element(const S390Vector *v, uint8_t enr, ++ uint8_t es) ++{ ++ switch (es) { ++ case MO_8: ++ return s390_vec_read_element8(v, enr); ++ case MO_16: ++ return s390_vec_read_element16(v, enr); ++ case MO_32: ++ return s390_vec_read_element32(v, enr); ++ case MO_64: ++ return s390_vec_read_element64(v, enr); ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static inline void s390_vec_write_element8(S390Vector *v, uint8_t enr, ++ uint8_t data) ++{ ++ g_assert(enr < 16); ++ v->byte[H1(enr)] = data; ++} ++ ++static inline void s390_vec_write_element16(S390Vector *v, uint8_t enr, ++ uint16_t data) ++{ ++ g_assert(enr < 8); ++ v->halfword[H2(enr)] = data; ++} ++ ++static inline void s390_vec_write_element32(S390Vector *v, uint8_t enr, ++ uint32_t data) ++{ ++ g_assert(enr < 4); ++ v->word[H4(enr)] = data; ++} ++ ++static inline void s390_vec_write_element64(S390Vector *v, uint8_t enr, ++ uint64_t data) ++{ ++ g_assert(enr < 2); ++ v->doubleword[enr] = data; ++} ++ ++static inline void s390_vec_write_element(S390Vector *v, uint8_t enr, ++ uint8_t es, uint64_t data) ++{ ++ switch (es) { ++ case MO_8: ++ s390_vec_write_element8(v, enr, data); ++ break; ++ case MO_16: ++ s390_vec_write_element16(v, enr, data); ++ break; ++ case MO_32: ++ s390_vec_write_element32(v, enr, data); ++ break; ++ case MO_64: ++ s390_vec_write_element64(v, enr, data); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++#endif /* S390X_VEC_H */ +diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c +new file mode 100644 +index 0000000000..1a77993471 +--- /dev/null ++++ b/target/s390x/tcg/vec_fpu_helper.c +@@ -0,0 +1,1072 @@ ++/* ++ * QEMU TCG support -- s390x vector floating point instruction support ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "vec.h" ++#include "tcg_s390x.h" ++#include "tcg/tcg-gvec-desc.h" ++#include "exec/exec-all.h" ++#include "exec/helper-proto.h" ++#include "fpu/softfloat.h" ++ ++#define VIC_INVALID 0x1 ++#define VIC_DIVBYZERO 0x2 ++#define VIC_OVERFLOW 0x3 ++#define VIC_UNDERFLOW 0x4 ++#define VIC_INEXACT 0x5 ++ ++/* returns the VEX. If the VEX is 0, there is no trap */ ++static uint8_t check_ieee_exc(CPUS390XState *env, uint8_t enr, bool XxC, ++ uint8_t *vec_exc) ++{ ++ uint8_t vece_exc = 0, trap_exc; ++ unsigned qemu_exc; ++ ++ /* Retrieve and clear the softfloat exceptions */ ++ qemu_exc = env->fpu_status.float_exception_flags; ++ if (qemu_exc == 0) { ++ return 0; ++ } ++ env->fpu_status.float_exception_flags = 0; ++ ++ vece_exc = s390_softfloat_exc_to_ieee(qemu_exc); ++ ++ /* Add them to the vector-wide s390x exception bits */ ++ *vec_exc |= vece_exc; ++ ++ /* Check for traps and construct the VXC */ ++ trap_exc = vece_exc & env->fpc >> 24; ++ if (trap_exc) { ++ if (trap_exc & S390_IEEE_MASK_INVALID) { ++ return enr << 4 | VIC_INVALID; ++ } else if (trap_exc & S390_IEEE_MASK_DIVBYZERO) { ++ return enr << 4 | VIC_DIVBYZERO; ++ } else if (trap_exc & S390_IEEE_MASK_OVERFLOW) { ++ return enr << 4 | VIC_OVERFLOW; ++ } else if (trap_exc & S390_IEEE_MASK_UNDERFLOW) { ++ return enr << 4 | VIC_UNDERFLOW; ++ } else if (!XxC) { ++ g_assert(trap_exc & S390_IEEE_MASK_INEXACT); ++ /* inexact has lowest priority on traps */ ++ return enr << 4 | VIC_INEXACT; ++ } ++ } ++ return 0; ++} ++ ++static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc, ++ uintptr_t retaddr) ++{ ++ if (vxc) { ++ /* on traps, the fpc flags are not updated, instruction is suppressed */ ++ tcg_s390_vector_exception(env, vxc, retaddr); ++ } ++ if (vec_exc) { ++ /* indicate exceptions for all elements combined */ ++ env->fpc |= vec_exc << 16; ++ } ++} ++ ++static float32 s390_vec_read_float32(const S390Vector *v, uint8_t enr) ++{ ++ return make_float32(s390_vec_read_element32(v, enr)); ++} ++ ++static float64 s390_vec_read_float64(const S390Vector *v, uint8_t enr) ++{ ++ return make_float64(s390_vec_read_element64(v, enr)); ++} ++ ++static float128 s390_vec_read_float128(const S390Vector *v) ++{ ++ return make_float128(s390_vec_read_element64(v, 0), ++ s390_vec_read_element64(v, 1)); ++} ++ ++static void s390_vec_write_float32(S390Vector *v, uint8_t enr, float32 data) ++{ ++ return s390_vec_write_element32(v, enr, data); ++} ++ ++static void s390_vec_write_float64(S390Vector *v, uint8_t enr, float64 data) ++{ ++ return s390_vec_write_element64(v, enr, data); ++} ++ ++static void s390_vec_write_float128(S390Vector *v, float128 data) ++{ ++ s390_vec_write_element64(v, 0, data.high); ++ s390_vec_write_element64(v, 1, data.low); ++} ++ ++typedef float32 (*vop32_2_fn)(float32 a, float_status *s); ++static void vop32_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, ++ bool s, bool XxC, uint8_t erm, vop32_2_fn fn, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i, old_mode; ++ ++ old_mode = s390_swap_bfp_rounding_mode(env, erm); ++ for (i = 0; i < 4; i++) { ++ const float32 a = s390_vec_read_float32(v2, i); ++ ++ s390_vec_write_float32(&tmp, i, fn(a, &env->fpu_status)); ++ vxc = check_ieee_exc(env, i, XxC, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++typedef float64 (*vop64_2_fn)(float64 a, float_status *s); ++static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, ++ bool s, bool XxC, uint8_t erm, vop64_2_fn fn, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i, old_mode; ++ ++ old_mode = s390_swap_bfp_rounding_mode(env, erm); ++ for (i = 0; i < 2; i++) { ++ const float64 a = s390_vec_read_float64(v2, i); ++ ++ s390_vec_write_float64(&tmp, i, fn(a, &env->fpu_status)); ++ vxc = check_ieee_exc(env, i, XxC, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++typedef float128 (*vop128_2_fn)(float128 a, float_status *s); ++static void vop128_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, ++ bool s, bool XxC, uint8_t erm, vop128_2_fn fn, ++ uintptr_t retaddr) ++{ ++ const float128 a = s390_vec_read_float128(v2); ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int old_mode; ++ ++ old_mode = s390_swap_bfp_rounding_mode(env, erm); ++ s390_vec_write_float128(&tmp, fn(a, &env->fpu_status)); ++ vxc = check_ieee_exc(env, 0, XxC, &vec_exc); ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++static float64 vcdg64(float64 a, float_status *s) ++{ ++ return int64_to_float64(a, s); ++} ++ ++static float64 vcdlg64(float64 a, float_status *s) ++{ ++ return uint64_to_float64(a, s); ++} ++ ++static float64 vcgd64(float64 a, float_status *s) ++{ ++ const float64 tmp = float64_to_int64(a, s); ++ ++ return float64_is_any_nan(a) ? INT64_MIN : tmp; ++} ++ ++static float64 vclgd64(float64 a, float_status *s) ++{ ++ const float64 tmp = float64_to_uint64(a, s); ++ ++ return float64_is_any_nan(a) ? 0 : tmp; ++} ++ ++#define DEF_GVEC_VOP2_FN(NAME, FN, BITS) \ ++void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, CPUS390XState *env, \ ++ uint32_t desc) \ ++{ \ ++ const uint8_t erm = extract32(simd_data(desc), 4, 4); \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ const bool XxC = extract32(simd_data(desc), 2, 1); \ ++ \ ++ vop##BITS##_2(v1, v2, env, se, XxC, erm, FN, GETPC()); \ ++} ++ ++#define DEF_GVEC_VOP2_64(NAME) \ ++DEF_GVEC_VOP2_FN(NAME, NAME##64, 64) ++ ++#define DEF_GVEC_VOP2(NAME, OP) \ ++DEF_GVEC_VOP2_FN(NAME, float32_##OP, 32) \ ++DEF_GVEC_VOP2_FN(NAME, float64_##OP, 64) \ ++DEF_GVEC_VOP2_FN(NAME, float128_##OP, 128) ++ ++DEF_GVEC_VOP2_64(vcdg) ++DEF_GVEC_VOP2_64(vcdlg) ++DEF_GVEC_VOP2_64(vcgd) ++DEF_GVEC_VOP2_64(vclgd) ++DEF_GVEC_VOP2(vfi, round_to_int) ++DEF_GVEC_VOP2(vfsq, sqrt) ++ ++typedef float32 (*vop32_3_fn)(float32 a, float32 b, float_status *s); ++static void vop32_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vop32_3_fn fn, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ const float32 a = s390_vec_read_float32(v2, i); ++ const float32 b = s390_vec_read_float32(v3, i); ++ ++ s390_vec_write_float32(&tmp, i, fn(a, b, &env->fpu_status)); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++typedef float64 (*vop64_3_fn)(float64 a, float64 b, float_status *s); ++static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vop64_3_fn fn, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ const float64 a = s390_vec_read_float64(v2, i); ++ const float64 b = s390_vec_read_float64(v3, i); ++ ++ s390_vec_write_float64(&tmp, i, fn(a, b, &env->fpu_status)); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++typedef float128 (*vop128_3_fn)(float128 a, float128 b, float_status *s); ++static void vop128_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vop128_3_fn fn, ++ uintptr_t retaddr) ++{ ++ const float128 a = s390_vec_read_float128(v2); ++ const float128 b = s390_vec_read_float128(v3); ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ ++ s390_vec_write_float128(&tmp, fn(a, b, &env->fpu_status)); ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++#define DEF_GVEC_VOP3_B(NAME, OP, BITS) \ ++void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ \ ++ vop##BITS##_3(v1, v2, v3, env, se, float##BITS##_##OP, GETPC()); \ ++} ++ ++#define DEF_GVEC_VOP3(NAME, OP) \ ++DEF_GVEC_VOP3_B(NAME, OP, 32) \ ++DEF_GVEC_VOP3_B(NAME, OP, 64) \ ++DEF_GVEC_VOP3_B(NAME, OP, 128) ++ ++DEF_GVEC_VOP3(vfa, add) ++DEF_GVEC_VOP3(vfs, sub) ++DEF_GVEC_VOP3(vfd, div) ++DEF_GVEC_VOP3(vfm, mul) ++ ++static int wfc32(const S390Vector *v1, const S390Vector *v2, ++ CPUS390XState *env, bool signal, uintptr_t retaddr) ++{ ++ /* only the zero-indexed elements are compared */ ++ const float32 a = s390_vec_read_float32(v1, 0); ++ const float32 b = s390_vec_read_float32(v2, 0); ++ uint8_t vxc, vec_exc = 0; ++ int cmp; ++ ++ if (signal) { ++ cmp = float32_compare(a, b, &env->fpu_status); ++ } else { ++ cmp = float32_compare_quiet(a, b, &env->fpu_status); ++ } ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ ++ return float_comp_to_cc(env, cmp); ++} ++ ++static int wfc64(const S390Vector *v1, const S390Vector *v2, ++ CPUS390XState *env, bool signal, uintptr_t retaddr) ++{ ++ /* only the zero-indexed elements are compared */ ++ const float64 a = s390_vec_read_float64(v1, 0); ++ const float64 b = s390_vec_read_float64(v2, 0); ++ uint8_t vxc, vec_exc = 0; ++ int cmp; ++ ++ if (signal) { ++ cmp = float64_compare(a, b, &env->fpu_status); ++ } else { ++ cmp = float64_compare_quiet(a, b, &env->fpu_status); ++ } ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ ++ return float_comp_to_cc(env, cmp); ++} ++ ++static int wfc128(const S390Vector *v1, const S390Vector *v2, ++ CPUS390XState *env, bool signal, uintptr_t retaddr) ++{ ++ /* only the zero-indexed elements are compared */ ++ const float128 a = s390_vec_read_float128(v1); ++ const float128 b = s390_vec_read_float128(v2); ++ uint8_t vxc, vec_exc = 0; ++ int cmp; ++ ++ if (signal) { ++ cmp = float128_compare(a, b, &env->fpu_status); ++ } else { ++ cmp = float128_compare_quiet(a, b, &env->fpu_status); ++ } ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ ++ return float_comp_to_cc(env, cmp); ++} ++ ++#define DEF_GVEC_WFC_B(NAME, SIGNAL, BITS) \ ++void HELPER(gvec_##NAME##BITS)(const void *v1, const void *v2, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ env->cc_op = wfc##BITS(v1, v2, env, SIGNAL, GETPC()); \ ++} ++ ++#define DEF_GVEC_WFC(NAME, SIGNAL) \ ++ DEF_GVEC_WFC_B(NAME, SIGNAL, 32) \ ++ DEF_GVEC_WFC_B(NAME, SIGNAL, 64) \ ++ DEF_GVEC_WFC_B(NAME, SIGNAL, 128) ++ ++DEF_GVEC_WFC(wfc, false) ++DEF_GVEC_WFC(wfk, true) ++ ++typedef bool (*vfc32_fn)(float32 a, float32 b, float_status *status); ++static int vfc32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vfc32_fn fn, uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int match = 0; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ const float32 a = s390_vec_read_float32(v2, i); ++ const float32 b = s390_vec_read_float32(v3, i); ++ ++ /* swap the order of the parameters, so we can use existing functions */ ++ if (fn(b, a, &env->fpu_status)) { ++ match++; ++ s390_vec_write_element32(&tmp, i, -1u); ++ } ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++ if (match) { ++ return s || match == 4 ? 0 : 1; ++ } ++ return 3; ++} ++ ++typedef bool (*vfc64_fn)(float64 a, float64 b, float_status *status); ++static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int match = 0; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ const float64 a = s390_vec_read_float64(v2, i); ++ const float64 b = s390_vec_read_float64(v3, i); ++ ++ /* swap the order of the parameters, so we can use existing functions */ ++ if (fn(b, a, &env->fpu_status)) { ++ match++; ++ s390_vec_write_element64(&tmp, i, -1ull); ++ } ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++ if (match) { ++ return s || match == 2 ? 0 : 1; ++ } ++ return 3; ++} ++ ++typedef bool (*vfc128_fn)(float128 a, float128 b, float_status *status); ++static int vfc128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ CPUS390XState *env, bool s, vfc128_fn fn, uintptr_t retaddr) ++{ ++ const float128 a = s390_vec_read_float128(v2); ++ const float128 b = s390_vec_read_float128(v3); ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ bool match = false; ++ ++ /* swap the order of the parameters, so we can use existing functions */ ++ if (fn(b, a, &env->fpu_status)) { ++ match = true; ++ s390_vec_write_element64(&tmp, 0, -1ull); ++ s390_vec_write_element64(&tmp, 1, -1ull); ++ } ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++ return match ? 0 : 3; ++} ++ ++#define DEF_GVEC_VFC_B(NAME, OP, BITS) \ ++void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ const bool sq = extract32(simd_data(desc), 2, 1); \ ++ vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ ++ \ ++ vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ ++} \ ++ \ ++void HELPER(gvec_##NAME##BITS##_cc)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ const bool sq = extract32(simd_data(desc), 2, 1); \ ++ vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ ++ \ ++ env->cc_op = vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ ++} ++ ++#define DEF_GVEC_VFC(NAME, OP) \ ++DEF_GVEC_VFC_B(NAME, OP, 32) \ ++DEF_GVEC_VFC_B(NAME, OP, 64) \ ++DEF_GVEC_VFC_B(NAME, OP, 128) \ ++ ++DEF_GVEC_VFC(vfce, eq) ++DEF_GVEC_VFC(vfch, lt) ++DEF_GVEC_VFC(vfche, le) ++ ++void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ const bool s = extract32(simd_data(desc), 3, 1); ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ /* load from even element */ ++ const float32 a = s390_vec_read_element32(v2, i * 2); ++ const uint64_t ret = float32_to_float64(a, &env->fpu_status); ++ ++ s390_vec_write_element64(&tmp, i, ret); ++ /* indicate the source element */ ++ vxc = check_ieee_exc(env, i * 2, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, GETPC()); ++ *(S390Vector *)v1 = tmp; ++} ++ ++void HELPER(gvec_vfll64)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ /* load from even element */ ++ const float128 ret = float64_to_float128(s390_vec_read_float64(v2, 0), ++ &env->fpu_status); ++ uint8_t vxc, vec_exc = 0; ++ ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, GETPC()); ++ s390_vec_write_float128(v1, ret); ++} ++ ++void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ const uint8_t erm = extract32(simd_data(desc), 4, 4); ++ const bool s = extract32(simd_data(desc), 3, 1); ++ const bool XxC = extract32(simd_data(desc), 2, 1); ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i, old_mode; ++ ++ old_mode = s390_swap_bfp_rounding_mode(env, erm); ++ for (i = 0; i < 2; i++) { ++ float64 a = s390_vec_read_element64(v2, i); ++ uint32_t ret = float64_to_float32(a, &env->fpu_status); ++ ++ /* place at even element */ ++ s390_vec_write_element32(&tmp, i * 2, ret); ++ /* indicate the source element */ ++ vxc = check_ieee_exc(env, i, XxC, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_ieee_exc(env, vxc, vec_exc, GETPC()); ++ *(S390Vector *)v1 = tmp; ++} ++ ++void HELPER(gvec_vflr128)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ const uint8_t erm = extract32(simd_data(desc), 4, 4); ++ const bool XxC = extract32(simd_data(desc), 2, 1); ++ uint8_t vxc, vec_exc = 0; ++ int old_mode; ++ float64 ret; ++ ++ old_mode = s390_swap_bfp_rounding_mode(env, erm); ++ ret = float128_to_float64(s390_vec_read_float128(v2), &env->fpu_status); ++ vxc = check_ieee_exc(env, 0, XxC, &vec_exc); ++ s390_restore_bfp_rounding_mode(env, old_mode); ++ handle_ieee_exc(env, vxc, vec_exc, GETPC()); ++ ++ /* place at even element, odd element is unpredictable */ ++ s390_vec_write_float64(v1, 0, ret); ++} ++ ++static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ const S390Vector *v4, CPUS390XState *env, bool s, int flags, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ const float32 a = s390_vec_read_float32(v2, i); ++ const float32 b = s390_vec_read_float32(v3, i); ++ const float32 c = s390_vec_read_float32(v4, i); ++ float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status); ++ ++ s390_vec_write_float32(&tmp, i, ret); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ const S390Vector *v4, CPUS390XState *env, bool s, int flags, ++ uintptr_t retaddr) ++{ ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ const float64 a = s390_vec_read_float64(v2, i); ++ const float64 b = s390_vec_read_float64(v3, i); ++ const float64 c = s390_vec_read_float64(v4, i); ++ const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status); ++ ++ s390_vec_write_float64(&tmp, i, ret); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (s || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, ++ const S390Vector *v4, CPUS390XState *env, bool s, int flags, ++ uintptr_t retaddr) ++{ ++ const float128 a = s390_vec_read_float128(v2); ++ const float128 b = s390_vec_read_float128(v3); ++ const float128 c = s390_vec_read_float128(v4); ++ uint8_t vxc, vec_exc = 0; ++ float128 ret; ++ ++ ret = float128_muladd(a, b, c, flags, &env->fpu_status); ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ s390_vec_write_float128(v1, ret); ++} ++ ++#define DEF_GVEC_VFMA_B(NAME, FLAGS, BITS) \ ++void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, CPUS390XState *env, \ ++ uint32_t desc) \ ++{ \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ \ ++ vfma##BITS(v1, v2, v3, v4, env, se, FLAGS, GETPC()); \ ++} ++ ++#define DEF_GVEC_VFMA(NAME, FLAGS) \ ++ DEF_GVEC_VFMA_B(NAME, FLAGS, 32) \ ++ DEF_GVEC_VFMA_B(NAME, FLAGS, 64) \ ++ DEF_GVEC_VFMA_B(NAME, FLAGS, 128) ++ ++DEF_GVEC_VFMA(vfma, 0) ++DEF_GVEC_VFMA(vfms, float_muladd_negate_c) ++DEF_GVEC_VFMA(vfnma, float_muladd_negate_result) ++DEF_GVEC_VFMA(vfnms, float_muladd_negate_c | float_muladd_negate_result) ++ ++void HELPER(gvec_vftci32)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ uint16_t i3 = extract32(simd_data(desc), 4, 12); ++ bool s = extract32(simd_data(desc), 3, 1); ++ int i, match = 0; ++ ++ for (i = 0; i < 4; i++) { ++ float32 a = s390_vec_read_float32(v2, i); ++ ++ if (float32_dcmask(env, a) & i3) { ++ match++; ++ s390_vec_write_element32(v1, i, -1u); ++ } else { ++ s390_vec_write_element32(v1, i, 0); ++ } ++ if (s) { ++ break; ++ } ++ } ++ ++ if (match == 4 || (s && match)) { ++ env->cc_op = 0; ++ } else if (match) { ++ env->cc_op = 1; ++ } else { ++ env->cc_op = 3; ++ } ++} ++ ++void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ const uint16_t i3 = extract32(simd_data(desc), 4, 12); ++ const bool s = extract32(simd_data(desc), 3, 1); ++ int i, match = 0; ++ ++ for (i = 0; i < 2; i++) { ++ const float64 a = s390_vec_read_float64(v2, i); ++ ++ if (float64_dcmask(env, a) & i3) { ++ match++; ++ s390_vec_write_element64(v1, i, -1ull); ++ } else { ++ s390_vec_write_element64(v1, i, 0); ++ } ++ if (s) { ++ break; ++ } ++ } ++ ++ if (match == 2 || (s && match)) { ++ env->cc_op = 0; ++ } else if (match) { ++ env->cc_op = 1; ++ } else { ++ env->cc_op = 3; ++ } ++} ++ ++void HELPER(gvec_vftci128)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ const float128 a = s390_vec_read_float128(v2); ++ uint16_t i3 = extract32(simd_data(desc), 4, 12); ++ ++ if (float128_dcmask(env, a) & i3) { ++ env->cc_op = 0; ++ s390_vec_write_element64(v1, 0, -1ull); ++ s390_vec_write_element64(v1, 1, -1ull); ++ } else { ++ env->cc_op = 3; ++ s390_vec_write_element64(v1, 0, 0); ++ s390_vec_write_element64(v1, 1, 0); ++ } ++} ++ ++typedef enum S390MinMaxType { ++ S390_MINMAX_TYPE_IEEE = 0, ++ S390_MINMAX_TYPE_JAVA, ++ S390_MINMAX_TYPE_C_MACRO, ++ S390_MINMAX_TYPE_CPP, ++ S390_MINMAX_TYPE_F, ++} S390MinMaxType; ++ ++typedef enum S390MinMaxRes { ++ S390_MINMAX_RES_MINMAX = 0, ++ S390_MINMAX_RES_A, ++ S390_MINMAX_RES_B, ++ S390_MINMAX_RES_SILENCE_A, ++ S390_MINMAX_RES_SILENCE_B, ++} S390MinMaxRes; ++ ++static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, ++ S390MinMaxType type, float_status *s) ++{ ++ const bool neg_a = dcmask_a & DCMASK_NEGATIVE; ++ const bool nan_a = dcmask_a & DCMASK_NAN; ++ const bool nan_b = dcmask_b & DCMASK_NAN; ++ ++ g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); ++ ++ if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { ++ const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; ++ const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; ++ ++ if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { ++ s->float_exception_flags |= float_flag_invalid; ++ } ++ switch (type) { ++ case S390_MINMAX_TYPE_JAVA: ++ if (sig_a) { ++ return S390_MINMAX_RES_SILENCE_A; ++ } else if (sig_b) { ++ return S390_MINMAX_RES_SILENCE_B; ++ } ++ return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_F: ++ return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_C_MACRO: ++ s->float_exception_flags |= float_flag_invalid; ++ return S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_CPP: ++ s->float_exception_flags |= float_flag_invalid; ++ return S390_MINMAX_RES_A; ++ default: ++ g_assert_not_reached(); ++ } ++ } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { ++ switch (type) { ++ case S390_MINMAX_TYPE_JAVA: ++ return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_C_MACRO: ++ return S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_F: ++ return !neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; ++ case S390_MINMAX_TYPE_CPP: ++ return S390_MINMAX_RES_A; ++ default: ++ g_assert_not_reached(); ++ } ++ } ++ return S390_MINMAX_RES_MINMAX; ++} ++ ++static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, ++ S390MinMaxType type, float_status *s) ++{ ++ g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); ++ ++ if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { ++ const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; ++ const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; ++ const bool nan_a = dcmask_a & DCMASK_NAN; ++ const bool nan_b = dcmask_b & DCMASK_NAN; ++ ++ if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { ++ s->float_exception_flags |= float_flag_invalid; ++ } ++ switch (type) { ++ case S390_MINMAX_TYPE_JAVA: ++ if (sig_a) { ++ return S390_MINMAX_RES_SILENCE_A; ++ } else if (sig_b) { ++ return S390_MINMAX_RES_SILENCE_B; ++ } ++ return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_F: ++ return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_C_MACRO: ++ s->float_exception_flags |= float_flag_invalid; ++ return S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_CPP: ++ s->float_exception_flags |= float_flag_invalid; ++ return S390_MINMAX_RES_A; ++ default: ++ g_assert_not_reached(); ++ } ++ } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { ++ const bool neg_a = dcmask_a & DCMASK_NEGATIVE; ++ ++ switch (type) { ++ case S390_MINMAX_TYPE_JAVA: ++ case S390_MINMAX_TYPE_F: ++ return neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; ++ case S390_MINMAX_TYPE_C_MACRO: ++ return S390_MINMAX_RES_B; ++ case S390_MINMAX_TYPE_CPP: ++ return S390_MINMAX_RES_A; ++ default: ++ g_assert_not_reached(); ++ } ++ } ++ return S390_MINMAX_RES_MINMAX; ++} ++ ++static S390MinMaxRes vfminmax_res(uint16_t dcmask_a, uint16_t dcmask_b, ++ S390MinMaxType type, bool is_min, ++ float_status *s) ++{ ++ return is_min ? vfmin_res(dcmask_a, dcmask_b, type, s) : ++ vfmax_res(dcmask_a, dcmask_b, type, s); ++} ++ ++static void vfminmax32(S390Vector *v1, const S390Vector *v2, ++ const S390Vector *v3, CPUS390XState *env, ++ S390MinMaxType type, bool is_min, bool is_abs, bool se, ++ uintptr_t retaddr) ++{ ++ float_status *s = &env->fpu_status; ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ float32 a = s390_vec_read_float32(v2, i); ++ float32 b = s390_vec_read_float32(v3, i); ++ float32 result; ++ ++ if (type != S390_MINMAX_TYPE_IEEE) { ++ S390MinMaxRes res; ++ ++ if (is_abs) { ++ a = float32_abs(a); ++ b = float32_abs(b); ++ } ++ ++ res = vfminmax_res(float32_dcmask(env, a), float32_dcmask(env, b), ++ type, is_min, s); ++ switch (res) { ++ case S390_MINMAX_RES_MINMAX: ++ result = is_min ? float32_min(a, b, s) : float32_max(a, b, s); ++ break; ++ case S390_MINMAX_RES_A: ++ result = a; ++ break; ++ case S390_MINMAX_RES_B: ++ result = b; ++ break; ++ case S390_MINMAX_RES_SILENCE_A: ++ result = float32_silence_nan(a, s); ++ break; ++ case S390_MINMAX_RES_SILENCE_B: ++ result = float32_silence_nan(b, s); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } else if (!is_abs) { ++ result = is_min ? float32_minnum(a, b, &env->fpu_status) : ++ float32_maxnum(a, b, &env->fpu_status); ++ } else { ++ result = is_min ? float32_minnummag(a, b, &env->fpu_status) : ++ float32_maxnummag(a, b, &env->fpu_status); ++ } ++ ++ s390_vec_write_float32(&tmp, i, result); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (se || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++static void vfminmax64(S390Vector *v1, const S390Vector *v2, ++ const S390Vector *v3, CPUS390XState *env, ++ S390MinMaxType type, bool is_min, bool is_abs, bool se, ++ uintptr_t retaddr) ++{ ++ float_status *s = &env->fpu_status; ++ uint8_t vxc, vec_exc = 0; ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < 2; i++) { ++ float64 a = s390_vec_read_float64(v2, i); ++ float64 b = s390_vec_read_float64(v3, i); ++ float64 result; ++ ++ if (type != S390_MINMAX_TYPE_IEEE) { ++ S390MinMaxRes res; ++ ++ if (is_abs) { ++ a = float64_abs(a); ++ b = float64_abs(b); ++ } ++ ++ res = vfminmax_res(float64_dcmask(env, a), float64_dcmask(env, b), ++ type, is_min, s); ++ switch (res) { ++ case S390_MINMAX_RES_MINMAX: ++ result = is_min ? float64_min(a, b, s) : float64_max(a, b, s); ++ break; ++ case S390_MINMAX_RES_A: ++ result = a; ++ break; ++ case S390_MINMAX_RES_B: ++ result = b; ++ break; ++ case S390_MINMAX_RES_SILENCE_A: ++ result = float64_silence_nan(a, s); ++ break; ++ case S390_MINMAX_RES_SILENCE_B: ++ result = float64_silence_nan(b, s); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } else if (!is_abs) { ++ result = is_min ? float64_minnum(a, b, &env->fpu_status) : ++ float64_maxnum(a, b, &env->fpu_status); ++ } else { ++ result = is_min ? float64_minnummag(a, b, &env->fpu_status) : ++ float64_maxnummag(a, b, &env->fpu_status); ++ } ++ ++ s390_vec_write_float64(&tmp, i, result); ++ vxc = check_ieee_exc(env, i, false, &vec_exc); ++ if (se || vxc) { ++ break; ++ } ++ } ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ *v1 = tmp; ++} ++ ++static void vfminmax128(S390Vector *v1, const S390Vector *v2, ++ const S390Vector *v3, CPUS390XState *env, ++ S390MinMaxType type, bool is_min, bool is_abs, bool se, ++ uintptr_t retaddr) ++{ ++ float128 a = s390_vec_read_float128(v2); ++ float128 b = s390_vec_read_float128(v3); ++ float_status *s = &env->fpu_status; ++ uint8_t vxc, vec_exc = 0; ++ float128 result; ++ ++ if (type != S390_MINMAX_TYPE_IEEE) { ++ S390MinMaxRes res; ++ ++ if (is_abs) { ++ a = float128_abs(a); ++ b = float128_abs(b); ++ } ++ ++ res = vfminmax_res(float128_dcmask(env, a), float128_dcmask(env, b), ++ type, is_min, s); ++ switch (res) { ++ case S390_MINMAX_RES_MINMAX: ++ result = is_min ? float128_min(a, b, s) : float128_max(a, b, s); ++ break; ++ case S390_MINMAX_RES_A: ++ result = a; ++ break; ++ case S390_MINMAX_RES_B: ++ result = b; ++ break; ++ case S390_MINMAX_RES_SILENCE_A: ++ result = float128_silence_nan(a, s); ++ break; ++ case S390_MINMAX_RES_SILENCE_B: ++ result = float128_silence_nan(b, s); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ } else if (!is_abs) { ++ result = is_min ? float128_minnum(a, b, &env->fpu_status) : ++ float128_maxnum(a, b, &env->fpu_status); ++ } else { ++ result = is_min ? float128_minnummag(a, b, &env->fpu_status) : ++ float128_maxnummag(a, b, &env->fpu_status); ++ } ++ ++ vxc = check_ieee_exc(env, 0, false, &vec_exc); ++ handle_ieee_exc(env, vxc, vec_exc, retaddr); ++ s390_vec_write_float128(v1, result); ++} ++ ++#define DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, BITS) \ ++void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool se = extract32(simd_data(desc), 3, 1); \ ++ uint8_t type = extract32(simd_data(desc), 4, 4); \ ++ bool is_abs = false; \ ++ \ ++ if (type >= 8) { \ ++ is_abs = true; \ ++ type -= 8; \ ++ } \ ++ \ ++ vfminmax##BITS(v1, v2, v3, env, type, IS_MIN, is_abs, se, GETPC()); \ ++} ++ ++#define DEF_GVEC_VFMINMAX(NAME, IS_MIN) \ ++ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 32) \ ++ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 64) \ ++ DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 128) ++ ++DEF_GVEC_VFMINMAX(vfmax, false) ++DEF_GVEC_VFMINMAX(vfmin, true) +diff --git a/target/s390x/tcg/vec_helper.c b/target/s390x/tcg/vec_helper.c +new file mode 100644 +index 0000000000..ededf13cf0 +--- /dev/null ++++ b/target/s390x/tcg/vec_helper.c +@@ -0,0 +1,214 @@ ++/* ++ * QEMU TCG support -- s390x vector support instructions ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "vec.h" ++#include "tcg/tcg.h" ++#include "tcg/tcg-gvec-desc.h" ++#include "exec/helper-proto.h" ++#include "exec/cpu_ldst.h" ++#include "exec/exec-all.h" ++ ++void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3, ++ uint32_t desc) ++{ ++ S390Vector tmp = {}; ++ uint16_t result = 0; ++ int i; ++ ++ for (i = 0; i < 16; i++) { ++ const uint8_t bit_nr = s390_vec_read_element8(v3, i); ++ uint16_t bit; ++ ++ if (bit_nr >= 128) { ++ continue; ++ } ++ bit = (s390_vec_read_element8(v2, bit_nr / 8) ++ >> (7 - (bit_nr % 8))) & 1; ++ result |= (bit << (15 - i)); ++ } ++ s390_vec_write_element16(&tmp, 3, result); ++ *(S390Vector *)v1 = tmp; ++} ++ ++void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) ++{ ++ if (likely(bytes >= 16)) { ++ uint64_t t0, t1; ++ ++ t0 = cpu_ldq_data_ra(env, addr, GETPC()); ++ addr = wrap_address(env, addr + 8); ++ t1 = cpu_ldq_data_ra(env, addr, GETPC()); ++ s390_vec_write_element64(v1, 0, t0); ++ s390_vec_write_element64(v1, 1, t1); ++ } else { ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < bytes; i++) { ++ uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC()); ++ ++ s390_vec_write_element8(&tmp, i, byte); ++ addr = wrap_address(env, addr + 1); ++ } ++ *(S390Vector *)v1 = tmp; ++ } ++} ++ ++#define DEF_VPK_HFN(BITS, TBITS) \ ++typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \ ++static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \ ++ const S390Vector *v3, vpk##BITS##_fn fn) \ ++{ \ ++ int i, saturated = 0; \ ++ S390Vector tmp; \ ++ \ ++ for (i = 0; i < (128 / TBITS); i++) { \ ++ uint##BITS##_t src; \ ++ \ ++ if (i < (128 / BITS)) { \ ++ src = s390_vec_read_element##BITS(v2, i); \ ++ } else { \ ++ src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \ ++ } \ ++ s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \ ++ } \ ++ *v1 = tmp; \ ++ return saturated; \ ++} ++DEF_VPK_HFN(64, 32) ++DEF_VPK_HFN(32, 16) ++DEF_VPK_HFN(16, 8) ++ ++#define DEF_VPK(BITS, TBITS) \ ++static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \ ++{ \ ++ return src; \ ++} \ ++void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \ ++} ++DEF_VPK(64, 32) ++DEF_VPK(32, 16) ++DEF_VPK(16, 8) ++ ++#define DEF_VPKS(BITS, TBITS) \ ++static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \ ++{ \ ++ if ((int##BITS##_t)src > INT##TBITS##_MAX) { \ ++ (*saturated)++; \ ++ return INT##TBITS##_MAX; \ ++ } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \ ++ (*saturated)++; \ ++ return INT##TBITS##_MIN; \ ++ } \ ++ return src; \ ++} \ ++void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ ++} \ ++void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ ++ \ ++ if (saturated == (128 / TBITS)) { \ ++ env->cc_op = 3; \ ++ } else if (saturated) { \ ++ env->cc_op = 1; \ ++ } else { \ ++ env->cc_op = 0; \ ++ } \ ++} ++DEF_VPKS(64, 32) ++DEF_VPKS(32, 16) ++DEF_VPKS(16, 8) ++ ++#define DEF_VPKLS(BITS, TBITS) \ ++static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \ ++{ \ ++ if (src > UINT##TBITS##_MAX) { \ ++ (*saturated)++; \ ++ return UINT##TBITS##_MAX; \ ++ } \ ++ return src; \ ++} \ ++void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ ++} \ ++void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ ++ \ ++ if (saturated == (128 / TBITS)) { \ ++ env->cc_op = 3; \ ++ } else if (saturated) { \ ++ env->cc_op = 1; \ ++ } else { \ ++ env->cc_op = 0; \ ++ } \ ++} ++DEF_VPKLS(64, 32) ++DEF_VPKLS(32, 16) ++DEF_VPKLS(16, 8) ++ ++void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3, ++ const void *v4, uint32_t desc) ++{ ++ S390Vector tmp; ++ int i; ++ ++ for (i = 0; i < 16; i++) { ++ const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f; ++ uint8_t byte; ++ ++ if (selector < 16) { ++ byte = s390_vec_read_element8(v2, selector); ++ } else { ++ byte = s390_vec_read_element8(v3, selector - 16); ++ } ++ s390_vec_write_element8(&tmp, i, byte); ++ } ++ *(S390Vector *)v1 = tmp; ++} ++ ++void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, ++ uint64_t bytes) ++{ ++ /* Probe write access before actually modifying memory */ ++ probe_write_access(env, addr, bytes, GETPC()); ++ ++ if (likely(bytes >= 16)) { ++ cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC()); ++ addr = wrap_address(env, addr + 8); ++ cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); ++ } else { ++ S390Vector tmp = {}; ++ int i; ++ ++ for (i = 0; i < bytes; i++) { ++ uint8_t byte = s390_vec_read_element8(v1, i); ++ ++ cpu_stb_data_ra(env, addr, byte, GETPC()); ++ addr = wrap_address(env, addr + 1); ++ } ++ *(S390Vector *)v1 = tmp; ++ } ++} +diff --git a/target/s390x/tcg/vec_int_helper.c b/target/s390x/tcg/vec_int_helper.c +new file mode 100644 +index 0000000000..5561b3ed90 +--- /dev/null ++++ b/target/s390x/tcg/vec_int_helper.c +@@ -0,0 +1,587 @@ ++/* ++ * QEMU TCG support -- s390x vector integer instruction support ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "cpu.h" ++#include "vec.h" ++#include "exec/helper-proto.h" ++#include "tcg/tcg-gvec-desc.h" ++ ++static bool s390_vec_is_zero(const S390Vector *v) ++{ ++ return !v->doubleword[0] && !v->doubleword[1]; ++} ++ ++static void s390_vec_xor(S390Vector *res, const S390Vector *a, ++ const S390Vector *b) ++{ ++ res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0]; ++ res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1]; ++} ++ ++static void s390_vec_and(S390Vector *res, const S390Vector *a, ++ const S390Vector *b) ++{ ++ res->doubleword[0] = a->doubleword[0] & b->doubleword[0]; ++ res->doubleword[1] = a->doubleword[1] & b->doubleword[1]; ++} ++ ++static bool s390_vec_equal(const S390Vector *a, const S390Vector *b) ++{ ++ return a->doubleword[0] == b->doubleword[0] && ++ a->doubleword[1] == b->doubleword[1]; ++} ++ ++static void s390_vec_shl(S390Vector *d, const S390Vector *a, uint64_t count) ++{ ++ uint64_t tmp; ++ ++ g_assert(count < 128); ++ if (count == 0) { ++ d->doubleword[0] = a->doubleword[0]; ++ d->doubleword[1] = a->doubleword[1]; ++ } else if (count == 64) { ++ d->doubleword[0] = a->doubleword[1]; ++ d->doubleword[1] = 0; ++ } else if (count < 64) { ++ tmp = extract64(a->doubleword[1], 64 - count, count); ++ d->doubleword[1] = a->doubleword[1] << count; ++ d->doubleword[0] = (a->doubleword[0] << count) | tmp; ++ } else { ++ d->doubleword[0] = a->doubleword[1] << (count - 64); ++ d->doubleword[1] = 0; ++ } ++} ++ ++static void s390_vec_sar(S390Vector *d, const S390Vector *a, uint64_t count) ++{ ++ uint64_t tmp; ++ ++ if (count == 0) { ++ d->doubleword[0] = a->doubleword[0]; ++ d->doubleword[1] = a->doubleword[1]; ++ } else if (count == 64) { ++ tmp = (int64_t)a->doubleword[0] >> 63; ++ d->doubleword[1] = a->doubleword[0]; ++ d->doubleword[0] = tmp; ++ } else if (count < 64) { ++ tmp = a->doubleword[1] >> count; ++ d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); ++ d->doubleword[0] = (int64_t)a->doubleword[0] >> count; ++ } else { ++ tmp = (int64_t)a->doubleword[0] >> 63; ++ d->doubleword[1] = (int64_t)a->doubleword[0] >> (count - 64); ++ d->doubleword[0] = tmp; ++ } ++} ++ ++static void s390_vec_shr(S390Vector *d, const S390Vector *a, uint64_t count) ++{ ++ uint64_t tmp; ++ ++ g_assert(count < 128); ++ if (count == 0) { ++ d->doubleword[0] = a->doubleword[0]; ++ d->doubleword[1] = a->doubleword[1]; ++ } else if (count == 64) { ++ d->doubleword[1] = a->doubleword[0]; ++ d->doubleword[0] = 0; ++ } else if (count < 64) { ++ tmp = a->doubleword[1] >> count; ++ d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); ++ d->doubleword[0] = a->doubleword[0] >> count; ++ } else { ++ d->doubleword[1] = a->doubleword[0] >> (count - 64); ++ d->doubleword[0] = 0; ++ } ++} ++#define DEF_VAVG(BITS) \ ++void HELPER(gvec_vavg##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ ++ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ ++ } \ ++} ++DEF_VAVG(8) ++DEF_VAVG(16) ++ ++#define DEF_VAVGL(BITS) \ ++void HELPER(gvec_vavgl##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ ++ } \ ++} ++DEF_VAVGL(8) ++DEF_VAVGL(16) ++ ++#define DEF_VCLZ(BITS) \ ++void HELPER(gvec_vclz##BITS)(void *v1, const void *v2, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, clz32(a) - 32 + BITS); \ ++ } \ ++} ++DEF_VCLZ(8) ++DEF_VCLZ(16) ++ ++#define DEF_VCTZ(BITS) \ ++void HELPER(gvec_vctz##BITS)(void *v1, const void *v2, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, a ? ctz32(a) : BITS); \ ++ } \ ++} ++DEF_VCTZ(8) ++DEF_VCTZ(16) ++ ++/* like binary multiplication, but XOR instead of addition */ ++#define DEF_GALOIS_MULTIPLY(BITS, TBITS) \ ++static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \ ++ uint##TBITS##_t b) \ ++{ \ ++ uint##TBITS##_t res = 0; \ ++ \ ++ while (b) { \ ++ if (b & 0x1) { \ ++ res = res ^ a; \ ++ } \ ++ a = a << 1; \ ++ b = b >> 1; \ ++ } \ ++ return res; \ ++} ++DEF_GALOIS_MULTIPLY(8, 16) ++DEF_GALOIS_MULTIPLY(16, 32) ++DEF_GALOIS_MULTIPLY(32, 64) ++ ++static S390Vector galois_multiply64(uint64_t a, uint64_t b) ++{ ++ S390Vector res = {}; ++ S390Vector va = { ++ .doubleword[1] = a, ++ }; ++ S390Vector vb = { ++ .doubleword[1] = b, ++ }; ++ ++ while (!s390_vec_is_zero(&vb)) { ++ if (vb.doubleword[1] & 0x1) { ++ s390_vec_xor(&res, &res, &va); ++ } ++ s390_vec_shl(&va, &va, 1); ++ s390_vec_shr(&vb, &vb, 1); ++ } ++ return res; ++} ++ ++#define DEF_VGFM(BITS, TBITS) \ ++void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / TBITS); i++) { \ ++ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ ++ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ ++ uint##TBITS##_t d = galois_multiply##BITS(a, b); \ ++ \ ++ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ ++ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ ++ d = d ^ galois_multiply32(a, b); \ ++ s390_vec_write_element##TBITS(v1, i, d); \ ++ } \ ++} ++DEF_VGFM(8, 16) ++DEF_VGFM(16, 32) ++DEF_VGFM(32, 64) ++ ++void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3, ++ uint32_t desc) ++{ ++ S390Vector tmp1, tmp2; ++ uint64_t a, b; ++ ++ a = s390_vec_read_element64(v2, 0); ++ b = s390_vec_read_element64(v3, 0); ++ tmp1 = galois_multiply64(a, b); ++ a = s390_vec_read_element64(v2, 1); ++ b = s390_vec_read_element64(v3, 1); ++ tmp2 = galois_multiply64(a, b); ++ s390_vec_xor(v1, &tmp1, &tmp2); ++} ++ ++#define DEF_VGFMA(BITS, TBITS) \ ++void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / TBITS); i++) { \ ++ uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ ++ uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ ++ uint##TBITS##_t d = galois_multiply##BITS(a, b); \ ++ \ ++ a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ ++ b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ ++ d = d ^ galois_multiply32(a, b); \ ++ d = d ^ s390_vec_read_element##TBITS(v4, i); \ ++ s390_vec_write_element##TBITS(v1, i, d); \ ++ } \ ++} ++DEF_VGFMA(8, 16) ++DEF_VGFMA(16, 32) ++DEF_VGFMA(32, 64) ++ ++void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3, ++ const void *v4, uint32_t desc) ++{ ++ S390Vector tmp1, tmp2; ++ uint64_t a, b; ++ ++ a = s390_vec_read_element64(v2, 0); ++ b = s390_vec_read_element64(v3, 0); ++ tmp1 = galois_multiply64(a, b); ++ a = s390_vec_read_element64(v2, 1); ++ b = s390_vec_read_element64(v3, 1); ++ tmp2 = galois_multiply64(a, b); ++ s390_vec_xor(&tmp1, &tmp1, &tmp2); ++ s390_vec_xor(v1, &tmp1, v4); ++} ++ ++#define DEF_VMAL(BITS) \ ++void HELPER(gvec_vmal##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ ++ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, a * b + c); \ ++ } \ ++} ++DEF_VMAL(8) ++DEF_VMAL(16) ++ ++#define DEF_VMAH(BITS) \ ++void HELPER(gvec_vmah##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ ++ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ ++ const int32_t c = (int##BITS##_t)s390_vec_read_element##BITS(v4, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ ++ } \ ++} ++DEF_VMAH(8) ++DEF_VMAH(16) ++ ++#define DEF_VMALH(BITS) \ ++void HELPER(gvec_vmalh##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ ++ const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ ++ } \ ++} ++DEF_VMALH(8) ++DEF_VMALH(16) ++ ++#define DEF_VMAE(BITS, TBITS) \ ++void HELPER(gvec_vmae##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ ++ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ ++ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ ++ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b + c); \ ++ } \ ++} ++DEF_VMAE(8, 16) ++DEF_VMAE(16, 32) ++DEF_VMAE(32, 64) ++ ++#define DEF_VMALE(BITS, TBITS) \ ++void HELPER(gvec_vmale##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ ++ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ ++ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ ++ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b + c); \ ++ } \ ++} ++DEF_VMALE(8, 16) ++DEF_VMALE(16, 32) ++DEF_VMALE(32, 64) ++ ++#define DEF_VMAO(BITS, TBITS) \ ++void HELPER(gvec_vmao##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ ++ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ ++ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ ++ int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b + c); \ ++ } \ ++} ++DEF_VMAO(8, 16) ++DEF_VMAO(16, 32) ++DEF_VMAO(32, 64) ++ ++#define DEF_VMALO(BITS, TBITS) \ ++void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ ++ uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ ++ uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ ++ uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b + c); \ ++ } \ ++} ++DEF_VMALO(8, 16) ++DEF_VMALO(16, 32) ++DEF_VMALO(32, 64) ++ ++#define DEF_VMH(BITS) \ ++void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ ++ const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ ++ } \ ++} ++DEF_VMH(8) ++DEF_VMH(16) ++ ++#define DEF_VMLH(BITS) \ ++void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ ++ } \ ++} ++DEF_VMLH(8) ++DEF_VMLH(16) ++ ++#define DEF_VME(BITS, TBITS) \ ++void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ ++ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ ++ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b); \ ++ } \ ++} ++DEF_VME(8, 16) ++DEF_VME(16, 32) ++DEF_VME(32, 64) ++ ++#define DEF_VMLE(BITS, TBITS) \ ++void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ ++ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ ++ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b); \ ++ } \ ++} ++DEF_VMLE(8, 16) ++DEF_VMLE(16, 32) ++DEF_VMLE(32, 64) ++ ++#define DEF_VMO(BITS, TBITS) \ ++void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ ++ int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ ++ int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b); \ ++ } \ ++} ++DEF_VMO(8, 16) ++DEF_VMO(16, 32) ++DEF_VMO(32, 64) ++ ++#define DEF_VMLO(BITS, TBITS) \ ++void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i, j; \ ++ \ ++ for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ ++ const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ ++ const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ ++ \ ++ s390_vec_write_element##TBITS(v1, i, a * b); \ ++ } \ ++} ++DEF_VMLO(8, 16) ++DEF_VMLO(16, 32) ++DEF_VMLO(32, 64) ++ ++#define DEF_VPOPCT(BITS) \ ++void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, ctpop32(a)); \ ++ } \ ++} ++DEF_VPOPCT(8) ++DEF_VPOPCT(16) ++ ++#define DEF_VERIM(BITS) \ ++void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ const uint8_t count = simd_data(desc); \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v1, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t mask = s390_vec_read_element##BITS(v3, i); \ ++ const uint##BITS##_t d = (a & ~mask) | (rol##BITS(b, count) & mask); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, d); \ ++ } \ ++} ++DEF_VERIM(8) ++DEF_VERIM(16) ++ ++void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count, ++ uint32_t desc) ++{ ++ s390_vec_shl(v1, v2, count); ++} ++ ++void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count, ++ uint32_t desc) ++{ ++ s390_vec_sar(v1, v2, count); ++} ++ ++void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count, ++ uint32_t desc) ++{ ++ s390_vec_shr(v1, v2, count); ++} ++ ++#define DEF_VSCBI(BITS) \ ++void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ int i; \ ++ \ ++ for (i = 0; i < (128 / BITS); i++) { \ ++ const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ ++ const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ ++ \ ++ s390_vec_write_element##BITS(v1, i, a >= b); \ ++ } \ ++} ++DEF_VSCBI(8) ++DEF_VSCBI(16) ++ ++void HELPER(gvec_vtm)(void *v1, const void *v2, CPUS390XState *env, ++ uint32_t desc) ++{ ++ S390Vector tmp; ++ ++ s390_vec_and(&tmp, v1, v2); ++ if (s390_vec_is_zero(&tmp)) { ++ /* Selected bits all zeros; or all mask bits zero */ ++ env->cc_op = 0; ++ } else if (s390_vec_equal(&tmp, v2)) { ++ /* Selected bits all ones */ ++ env->cc_op = 3; ++ } else { ++ /* Selected bits a mix of zeros and ones */ ++ env->cc_op = 1; ++ } ++} +diff --git a/target/s390x/tcg/vec_string_helper.c b/target/s390x/tcg/vec_string_helper.c +new file mode 100644 +index 0000000000..ac315eb095 +--- /dev/null ++++ b/target/s390x/tcg/vec_string_helper.c +@@ -0,0 +1,473 @@ ++/* ++ * QEMU TCG support -- s390x vector string instruction support ++ * ++ * Copyright (C) 2019 Red Hat Inc ++ * ++ * Authors: ++ * David Hildenbrand ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++#include "qemu/osdep.h" ++#include "qemu-common.h" ++#include "cpu.h" ++#include "s390x-internal.h" ++#include "vec.h" ++#include "tcg/tcg.h" ++#include "tcg/tcg-gvec-desc.h" ++#include "exec/helper-proto.h" ++ ++/* ++ * Returns a bit set in the MSB of each element that is zero, ++ * as defined by the mask. ++ */ ++static inline uint64_t zero_search(uint64_t a, uint64_t mask) ++{ ++ return ~(((a & mask) + mask) | a | mask); ++} ++ ++/* ++ * Returns a bit set in the MSB of each element that is not zero, ++ * as defined by the mask. ++ */ ++static inline uint64_t nonzero_search(uint64_t a, uint64_t mask) ++{ ++ return (((a & mask) + mask) | a) & ~mask; ++} ++ ++/* ++ * Returns the byte offset for the first match, or 16 for no match. ++ */ ++static inline int match_index(uint64_t c0, uint64_t c1) ++{ ++ return (c0 ? clz64(c0) : clz64(c1) + 64) >> 3; ++} ++ ++/* ++ * Returns the number of bits composing one element. ++ */ ++static uint8_t get_element_bits(uint8_t es) ++{ ++ return (1 << es) * BITS_PER_BYTE; ++} ++ ++/* ++ * Returns the bitmask for a single element. ++ */ ++static uint64_t get_single_element_mask(uint8_t es) ++{ ++ return -1ull >> (64 - get_element_bits(es)); ++} ++ ++/* ++ * Returns the bitmask for a single element (excluding the MSB). ++ */ ++static uint64_t get_single_element_lsbs_mask(uint8_t es) ++{ ++ return -1ull >> (65 - get_element_bits(es)); ++} ++ ++/* ++ * Returns the bitmasks for multiple elements (excluding the MSBs). ++ */ ++static uint64_t get_element_lsbs_mask(uint8_t es) ++{ ++ return dup_const(es, get_single_element_lsbs_mask(es)); ++} ++ ++static int vfae(void *v1, const void *v2, const void *v3, bool in, ++ bool rt, bool zs, uint8_t es) ++{ ++ const uint64_t mask = get_element_lsbs_mask(es); ++ const int bits = get_element_bits(es); ++ uint64_t a0, a1, b0, b1, e0, e1, t0, t1, z0, z1; ++ uint64_t first_zero = 16; ++ uint64_t first_equal; ++ int i; ++ ++ a0 = s390_vec_read_element64(v2, 0); ++ a1 = s390_vec_read_element64(v2, 1); ++ b0 = s390_vec_read_element64(v3, 0); ++ b1 = s390_vec_read_element64(v3, 1); ++ e0 = 0; ++ e1 = 0; ++ /* compare against equality with every other element */ ++ for (i = 0; i < 64; i += bits) { ++ t0 = rol64(b0, i); ++ t1 = rol64(b1, i); ++ e0 |= zero_search(a0 ^ t0, mask); ++ e0 |= zero_search(a0 ^ t1, mask); ++ e1 |= zero_search(a1 ^ t0, mask); ++ e1 |= zero_search(a1 ^ t1, mask); ++ } ++ /* invert the result if requested - invert only the MSBs */ ++ if (in) { ++ e0 = ~e0 & ~mask; ++ e1 = ~e1 & ~mask; ++ } ++ first_equal = match_index(e0, e1); ++ ++ if (zs) { ++ z0 = zero_search(a0, mask); ++ z1 = zero_search(a1, mask); ++ first_zero = match_index(z0, z1); ++ } ++ ++ if (rt) { ++ e0 = (e0 >> (bits - 1)) * get_single_element_mask(es); ++ e1 = (e1 >> (bits - 1)) * get_single_element_mask(es); ++ s390_vec_write_element64(v1, 0, e0); ++ s390_vec_write_element64(v1, 1, e1); ++ } else { ++ s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); ++ s390_vec_write_element64(v1, 1, 0); ++ } ++ ++ if (first_zero == 16 && first_equal == 16) { ++ return 3; /* no match */ ++ } else if (first_zero == 16) { ++ return 1; /* matching elements, no match for zero */ ++ } else if (first_equal < first_zero) { ++ return 2; /* matching elements before match for zero */ ++ } ++ return 0; /* match for zero */ ++} ++ ++#define DEF_VFAE_HELPER(BITS) \ ++void HELPER(gvec_vfae##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool rt = extract32(simd_data(desc), 2, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ ++} ++DEF_VFAE_HELPER(8) ++DEF_VFAE_HELPER(16) ++DEF_VFAE_HELPER(32) ++ ++#define DEF_VFAE_CC_HELPER(BITS) \ ++void HELPER(gvec_vfae_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool rt = extract32(simd_data(desc), 2, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ env->cc_op = vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ ++} ++DEF_VFAE_CC_HELPER(8) ++DEF_VFAE_CC_HELPER(16) ++DEF_VFAE_CC_HELPER(32) ++ ++static int vfee(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) ++{ ++ const uint64_t mask = get_element_lsbs_mask(es); ++ uint64_t a0, a1, b0, b1, e0, e1, z0, z1; ++ uint64_t first_zero = 16; ++ uint64_t first_equal; ++ ++ a0 = s390_vec_read_element64(v2, 0); ++ a1 = s390_vec_read_element64(v2, 1); ++ b0 = s390_vec_read_element64(v3, 0); ++ b1 = s390_vec_read_element64(v3, 1); ++ e0 = zero_search(a0 ^ b0, mask); ++ e1 = zero_search(a1 ^ b1, mask); ++ first_equal = match_index(e0, e1); ++ ++ if (zs) { ++ z0 = zero_search(a0, mask); ++ z1 = zero_search(a1, mask); ++ first_zero = match_index(z0, z1); ++ } ++ ++ s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); ++ s390_vec_write_element64(v1, 1, 0); ++ if (first_zero == 16 && first_equal == 16) { ++ return 3; /* no match */ ++ } else if (first_zero == 16) { ++ return 1; /* matching elements, no match for zero */ ++ } else if (first_equal < first_zero) { ++ return 2; /* matching elements before match for zero */ ++ } ++ return 0; /* match for zero */ ++} ++ ++#define DEF_VFEE_HELPER(BITS) \ ++void HELPER(gvec_vfee##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ vfee(v1, v2, v3, zs, MO_##BITS); \ ++} ++DEF_VFEE_HELPER(8) ++DEF_VFEE_HELPER(16) ++DEF_VFEE_HELPER(32) ++ ++#define DEF_VFEE_CC_HELPER(BITS) \ ++void HELPER(gvec_vfee_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ env->cc_op = vfee(v1, v2, v3, zs, MO_##BITS); \ ++} ++DEF_VFEE_CC_HELPER(8) ++DEF_VFEE_CC_HELPER(16) ++DEF_VFEE_CC_HELPER(32) ++ ++static int vfene(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) ++{ ++ const uint64_t mask = get_element_lsbs_mask(es); ++ uint64_t a0, a1, b0, b1, e0, e1, z0, z1; ++ uint64_t first_zero = 16; ++ uint64_t first_inequal; ++ bool smaller = false; ++ ++ a0 = s390_vec_read_element64(v2, 0); ++ a1 = s390_vec_read_element64(v2, 1); ++ b0 = s390_vec_read_element64(v3, 0); ++ b1 = s390_vec_read_element64(v3, 1); ++ e0 = nonzero_search(a0 ^ b0, mask); ++ e1 = nonzero_search(a1 ^ b1, mask); ++ first_inequal = match_index(e0, e1); ++ ++ /* identify the smaller element */ ++ if (first_inequal < 16) { ++ uint8_t enr = first_inequal / (1 << es); ++ uint32_t a = s390_vec_read_element(v2, enr, es); ++ uint32_t b = s390_vec_read_element(v3, enr, es); ++ ++ smaller = a < b; ++ } ++ ++ if (zs) { ++ z0 = zero_search(a0, mask); ++ z1 = zero_search(a1, mask); ++ first_zero = match_index(z0, z1); ++ } ++ ++ s390_vec_write_element64(v1, 0, MIN(first_inequal, first_zero)); ++ s390_vec_write_element64(v1, 1, 0); ++ if (first_zero == 16 && first_inequal == 16) { ++ return 3; ++ } else if (first_zero < first_inequal) { ++ return 0; ++ } ++ return smaller ? 1 : 2; ++} ++ ++#define DEF_VFENE_HELPER(BITS) \ ++void HELPER(gvec_vfene##BITS)(void *v1, const void *v2, const void *v3, \ ++ uint32_t desc) \ ++{ \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ vfene(v1, v2, v3, zs, MO_##BITS); \ ++} ++DEF_VFENE_HELPER(8) ++DEF_VFENE_HELPER(16) ++DEF_VFENE_HELPER(32) ++ ++#define DEF_VFENE_CC_HELPER(BITS) \ ++void HELPER(gvec_vfene_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ CPUS390XState *env, uint32_t desc) \ ++{ \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ env->cc_op = vfene(v1, v2, v3, zs, MO_##BITS); \ ++} ++DEF_VFENE_CC_HELPER(8) ++DEF_VFENE_CC_HELPER(16) ++DEF_VFENE_CC_HELPER(32) ++ ++static int vistr(void *v1, const void *v2, uint8_t es) ++{ ++ const uint64_t mask = get_element_lsbs_mask(es); ++ uint64_t a0 = s390_vec_read_element64(v2, 0); ++ uint64_t a1 = s390_vec_read_element64(v2, 1); ++ uint64_t z; ++ int cc = 3; ++ ++ z = zero_search(a0, mask); ++ if (z) { ++ a0 &= ~(-1ull >> clz64(z)); ++ a1 = 0; ++ cc = 0; ++ } else { ++ z = zero_search(a1, mask); ++ if (z) { ++ a1 &= ~(-1ull >> clz64(z)); ++ cc = 0; ++ } ++ } ++ ++ s390_vec_write_element64(v1, 0, a0); ++ s390_vec_write_element64(v1, 1, a1); ++ return cc; ++} ++ ++#define DEF_VISTR_HELPER(BITS) \ ++void HELPER(gvec_vistr##BITS)(void *v1, const void *v2, uint32_t desc) \ ++{ \ ++ vistr(v1, v2, MO_##BITS); \ ++} ++DEF_VISTR_HELPER(8) ++DEF_VISTR_HELPER(16) ++DEF_VISTR_HELPER(32) ++ ++#define DEF_VISTR_CC_HELPER(BITS) \ ++void HELPER(gvec_vistr_cc##BITS)(void *v1, const void *v2, CPUS390XState *env, \ ++ uint32_t desc) \ ++{ \ ++ env->cc_op = vistr(v1, v2, MO_##BITS); \ ++} ++DEF_VISTR_CC_HELPER(8) ++DEF_VISTR_CC_HELPER(16) ++DEF_VISTR_CC_HELPER(32) ++ ++static bool element_compare(uint32_t data, uint32_t l, uint8_t c) ++{ ++ const bool equal = extract32(c, 7, 1); ++ const bool lower = extract32(c, 6, 1); ++ const bool higher = extract32(c, 5, 1); ++ ++ if (data < l) { ++ return lower; ++ } else if (data > l) { ++ return higher; ++ } ++ return equal; ++} ++ ++static int vstrc(void *v1, const void *v2, const void *v3, const void *v4, ++ bool in, bool rt, bool zs, uint8_t es) ++{ ++ const uint64_t mask = get_element_lsbs_mask(es); ++ uint64_t a0 = s390_vec_read_element64(v2, 0); ++ uint64_t a1 = s390_vec_read_element64(v2, 1); ++ int first_zero = 16, first_match = 16; ++ S390Vector rt_result = {}; ++ uint64_t z0, z1; ++ int i, j; ++ ++ if (zs) { ++ z0 = zero_search(a0, mask); ++ z1 = zero_search(a1, mask); ++ first_zero = match_index(z0, z1); ++ } ++ ++ for (i = 0; i < 16 / (1 << es); i++) { ++ const uint32_t data = s390_vec_read_element(v2, i, es); ++ const int cur_byte = i * (1 << es); ++ bool any_match = false; ++ ++ /* if we don't need a bit vector, we can stop early */ ++ if (cur_byte == first_zero && !rt) { ++ break; ++ } ++ ++ for (j = 0; j < 16 / (1 << es); j += 2) { ++ const uint32_t l1 = s390_vec_read_element(v3, j, es); ++ const uint32_t l2 = s390_vec_read_element(v3, j + 1, es); ++ /* we are only interested in the highest byte of each element */ ++ const uint8_t c1 = s390_vec_read_element8(v4, j * (1 << es)); ++ const uint8_t c2 = s390_vec_read_element8(v4, (j + 1) * (1 << es)); ++ ++ if (element_compare(data, l1, c1) && ++ element_compare(data, l2, c2)) { ++ any_match = true; ++ break; ++ } ++ } ++ /* invert the result if requested */ ++ any_match = in ^ any_match; ++ ++ if (any_match) { ++ /* indicate bit vector if requested */ ++ if (rt) { ++ const uint64_t val = -1ull; ++ ++ first_match = MIN(cur_byte, first_match); ++ s390_vec_write_element(&rt_result, i, es, val); ++ } else { ++ /* stop on the first match */ ++ first_match = cur_byte; ++ break; ++ } ++ } ++ } ++ ++ if (rt) { ++ *(S390Vector *)v1 = rt_result; ++ } else { ++ s390_vec_write_element64(v1, 0, MIN(first_match, first_zero)); ++ s390_vec_write_element64(v1, 1, 0); ++ } ++ ++ if (first_zero == 16 && first_match == 16) { ++ return 3; /* no match */ ++ } else if (first_zero == 16) { ++ return 1; /* matching elements, no match for zero */ ++ } else if (first_match < first_zero) { ++ return 2; /* matching elements before match for zero */ ++ } ++ return 0; /* match for zero */ ++} ++ ++#define DEF_VSTRC_HELPER(BITS) \ ++void HELPER(gvec_vstrc##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ ++} ++DEF_VSTRC_HELPER(8) ++DEF_VSTRC_HELPER(16) ++DEF_VSTRC_HELPER(32) ++ ++#define DEF_VSTRC_RT_HELPER(BITS) \ ++void HELPER(gvec_vstrc_rt##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ ++} ++DEF_VSTRC_RT_HELPER(8) ++DEF_VSTRC_RT_HELPER(16) ++DEF_VSTRC_RT_HELPER(32) ++ ++#define DEF_VSTRC_CC_HELPER(BITS) \ ++void HELPER(gvec_vstrc_cc##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, CPUS390XState *env, \ ++ uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ env->cc_op = vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ ++} ++DEF_VSTRC_CC_HELPER(8) ++DEF_VSTRC_CC_HELPER(16) ++DEF_VSTRC_CC_HELPER(32) ++ ++#define DEF_VSTRC_CC_RT_HELPER(BITS) \ ++void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \ ++ const void *v4, CPUS390XState *env, \ ++ uint32_t desc) \ ++{ \ ++ const bool in = extract32(simd_data(desc), 3, 1); \ ++ const bool zs = extract32(simd_data(desc), 1, 1); \ ++ \ ++ env->cc_op = vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ ++} ++DEF_VSTRC_CC_RT_HELPER(8) ++DEF_VSTRC_CC_RT_HELPER(16) ++DEF_VSTRC_CC_RT_HELPER(32) +diff --git a/target/s390x/tcg_s390x.h b/target/s390x/tcg_s390x.h +deleted file mode 100644 +index 2f54ccb027..0000000000 +--- a/target/s390x/tcg_s390x.h ++++ /dev/null +@@ -1,24 +0,0 @@ +-/* +- * QEMU TCG support -- s390x specific functions. +- * +- * Copyright 2018 Red Hat, Inc. +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-#ifndef TCG_S390X_H +-#define TCG_S390X_H +- +-void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); +-void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, +- uint32_t code, uintptr_t ra); +-void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, +- uintptr_t ra); +-void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc, +- uintptr_t ra); +- +-#endif /* TCG_S390X_H */ +diff --git a/target/s390x/trace-events b/target/s390x/trace-events +index e661a81e3a..729cb012b4 100644 +--- a/target/s390x/trace-events ++++ b/target/s390x/trace-events +@@ -10,13 +10,7 @@ ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x. + ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)" + ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command 0x%04x, len 0x%04x" + +-# kvm.c +-kvm_enable_cmma(int rc) "CMMA: enabling with result code %d" +-kvm_clear_cmma(int rc) "CMMA: clearing with result code %d" +-kvm_failed_cpu_state_set(int cpu_index, uint8_t state, const char *msg) "Warning: Unable to set cpu %d state %" PRIu8 " to KVM: %s" +-kvm_assign_subch_ioeventfd(int fd, uint32_t addr, bool assign, int datamatch) "fd: %d sch: @0x%x assign: %d vq: %d" +- +-# cpu.c ++# cpu-sysemu.c + cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8 + cpu_halt(int cpu_index) "halting cpu %d" + cpu_unhalt(int cpu_index) "unhalting cpu %d" +diff --git a/target/s390x/translate.c b/target/s390x/translate.c +deleted file mode 100644 +index c8d55d1f83..0000000000 +--- a/target/s390x/translate.c ++++ /dev/null +@@ -1,6687 +0,0 @@ +-/* +- * S/390 translation +- * +- * Copyright (c) 2009 Ulrich Hecht +- * Copyright (c) 2010 Alexander Graf +- * +- * This library is free software; you can redistribute it and/or +- * modify it under the terms of the GNU Lesser General Public +- * License as published by the Free Software Foundation; either +- * version 2.1 of the License, or (at your option) any later version. +- * +- * This library is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * Lesser General Public License for more details. +- * +- * You should have received a copy of the GNU Lesser General Public +- * License along with this library; if not, see . +- */ +- +-/* #define DEBUG_INLINE_BRANCHES */ +-#define S390X_DEBUG_DISAS +-/* #define S390X_DEBUG_DISAS_VERBOSE */ +- +-#ifdef S390X_DEBUG_DISAS_VERBOSE +-# define LOG_DISAS(...) qemu_log(__VA_ARGS__) +-#else +-# define LOG_DISAS(...) do { } while (0) +-#endif +- +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "disas/disas.h" +-#include "exec/exec-all.h" +-#include "tcg/tcg-op.h" +-#include "tcg/tcg-op-gvec.h" +-#include "qemu/log.h" +-#include "qemu/host-utils.h" +-#include "exec/cpu_ldst.h" +-#include "exec/gen-icount.h" +-#include "exec/helper-proto.h" +-#include "exec/helper-gen.h" +- +-#include "exec/translator.h" +-#include "exec/log.h" +-#include "qemu/atomic128.h" +- +- +-/* Information that (most) every instruction needs to manipulate. */ +-typedef struct DisasContext DisasContext; +-typedef struct DisasInsn DisasInsn; +-typedef struct DisasFields DisasFields; +- +-/* +- * Define a structure to hold the decoded fields. We'll store each inside +- * an array indexed by an enum. In order to conserve memory, we'll arrange +- * for fields that do not exist at the same time to overlap, thus the "C" +- * for compact. For checking purposes there is an "O" for original index +- * as well that will be applied to availability bitmaps. +- */ +- +-enum DisasFieldIndexO { +- FLD_O_r1, +- FLD_O_r2, +- FLD_O_r3, +- FLD_O_m1, +- FLD_O_m3, +- FLD_O_m4, +- FLD_O_m5, +- FLD_O_m6, +- FLD_O_b1, +- FLD_O_b2, +- FLD_O_b4, +- FLD_O_d1, +- FLD_O_d2, +- FLD_O_d4, +- FLD_O_x2, +- FLD_O_l1, +- FLD_O_l2, +- FLD_O_i1, +- FLD_O_i2, +- FLD_O_i3, +- FLD_O_i4, +- FLD_O_i5, +- FLD_O_v1, +- FLD_O_v2, +- FLD_O_v3, +- FLD_O_v4, +-}; +- +-enum DisasFieldIndexC { +- FLD_C_r1 = 0, +- FLD_C_m1 = 0, +- FLD_C_b1 = 0, +- FLD_C_i1 = 0, +- FLD_C_v1 = 0, +- +- FLD_C_r2 = 1, +- FLD_C_b2 = 1, +- FLD_C_i2 = 1, +- +- FLD_C_r3 = 2, +- FLD_C_m3 = 2, +- FLD_C_i3 = 2, +- FLD_C_v3 = 2, +- +- FLD_C_m4 = 3, +- FLD_C_b4 = 3, +- FLD_C_i4 = 3, +- FLD_C_l1 = 3, +- FLD_C_v4 = 3, +- +- FLD_C_i5 = 4, +- FLD_C_d1 = 4, +- FLD_C_m5 = 4, +- +- FLD_C_d2 = 5, +- FLD_C_m6 = 5, +- +- FLD_C_d4 = 6, +- FLD_C_x2 = 6, +- FLD_C_l2 = 6, +- FLD_C_v2 = 6, +- +- NUM_C_FIELD = 7 +-}; +- +-struct DisasFields { +- uint64_t raw_insn; +- unsigned op:8; +- unsigned op2:8; +- unsigned presentC:16; +- unsigned int presentO; +- int c[NUM_C_FIELD]; +-}; +- +-struct DisasContext { +- DisasContextBase base; +- const DisasInsn *insn; +- DisasFields fields; +- uint64_t ex_value; +- /* +- * During translate_one(), pc_tmp is used to determine the instruction +- * to be executed after base.pc_next - e.g. next sequential instruction +- * or a branch target. +- */ +- uint64_t pc_tmp; +- uint32_t ilen; +- enum cc_op cc_op; +- bool do_debug; +-}; +- +-/* Information carried about a condition to be evaluated. */ +-typedef struct { +- TCGCond cond:8; +- bool is_64; +- bool g1; +- bool g2; +- union { +- struct { TCGv_i64 a, b; } s64; +- struct { TCGv_i32 a, b; } s32; +- } u; +-} DisasCompare; +- +-#ifdef DEBUG_INLINE_BRANCHES +-static uint64_t inline_branch_hit[CC_OP_MAX]; +-static uint64_t inline_branch_miss[CC_OP_MAX]; +-#endif +- +-static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc) +-{ +- TCGv_i64 tmp; +- +- if (s->base.tb->flags & FLAG_MASK_32) { +- if (s->base.tb->flags & FLAG_MASK_64) { +- tcg_gen_movi_i64(out, pc); +- return; +- } +- pc |= 0x80000000; +- } +- assert(!(s->base.tb->flags & FLAG_MASK_64)); +- tmp = tcg_const_i64(pc); +- tcg_gen_deposit_i64(out, out, tmp, 0, 32); +- tcg_temp_free_i64(tmp); +-} +- +-static TCGv_i64 psw_addr; +-static TCGv_i64 psw_mask; +-static TCGv_i64 gbea; +- +-static TCGv_i32 cc_op; +-static TCGv_i64 cc_src; +-static TCGv_i64 cc_dst; +-static TCGv_i64 cc_vr; +- +-static char cpu_reg_names[16][4]; +-static TCGv_i64 regs[16]; +- +-void s390x_translate_init(void) +-{ +- int i; +- +- psw_addr = tcg_global_mem_new_i64(cpu_env, +- offsetof(CPUS390XState, psw.addr), +- "psw_addr"); +- psw_mask = tcg_global_mem_new_i64(cpu_env, +- offsetof(CPUS390XState, psw.mask), +- "psw_mask"); +- gbea = tcg_global_mem_new_i64(cpu_env, +- offsetof(CPUS390XState, gbea), +- "gbea"); +- +- cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op), +- "cc_op"); +- cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src), +- "cc_src"); +- cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst), +- "cc_dst"); +- cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr), +- "cc_vr"); +- +- for (i = 0; i < 16; i++) { +- snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i); +- regs[i] = tcg_global_mem_new(cpu_env, +- offsetof(CPUS390XState, regs[i]), +- cpu_reg_names[i]); +- } +-} +- +-static inline int vec_full_reg_offset(uint8_t reg) +-{ +- g_assert(reg < 32); +- return offsetof(CPUS390XState, vregs[reg][0]); +-} +- +-static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) +-{ +- /* Convert element size (es) - e.g. MO_8 - to bytes */ +- const uint8_t bytes = 1 << es; +- int offs = enr * bytes; +- +- /* +- * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte +- * of the 16 byte vector, on both, little and big endian systems. +- * +- * Big Endian (target/possible host) +- * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] +- * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] +- * W: [ 0][ 1] - [ 2][ 3] +- * DW: [ 0] - [ 1] +- * +- * Little Endian (possible host) +- * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] +- * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] +- * W: [ 1][ 0] - [ 3][ 2] +- * DW: [ 0] - [ 1] +- * +- * For 16 byte elements, the two 8 byte halves will not form a host +- * int128 if the host is little endian, since they're in the wrong order. +- * Some operations (e.g. xor) do not care. For operations like addition, +- * the two 8 byte elements have to be loaded separately. Let's force all +- * 16 byte operations to handle it in a special way. +- */ +- g_assert(es <= MO_64); +-#ifndef HOST_WORDS_BIGENDIAN +- offs ^= (8 - bytes); +-#endif +- return offs + vec_full_reg_offset(reg); +-} +- +-static inline int freg64_offset(uint8_t reg) +-{ +- g_assert(reg < 16); +- return vec_reg_offset(reg, 0, MO_64); +-} +- +-static inline int freg32_offset(uint8_t reg) +-{ +- g_assert(reg < 16); +- return vec_reg_offset(reg, 0, MO_32); +-} +- +-static TCGv_i64 load_reg(int reg) +-{ +- TCGv_i64 r = tcg_temp_new_i64(); +- tcg_gen_mov_i64(r, regs[reg]); +- return r; +-} +- +-static TCGv_i64 load_freg(int reg) +-{ +- TCGv_i64 r = tcg_temp_new_i64(); +- +- tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg)); +- return r; +-} +- +-static TCGv_i64 load_freg32_i64(int reg) +-{ +- TCGv_i64 r = tcg_temp_new_i64(); +- +- tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg)); +- return r; +-} +- +-static void store_reg(int reg, TCGv_i64 v) +-{ +- tcg_gen_mov_i64(regs[reg], v); +-} +- +-static void store_freg(int reg, TCGv_i64 v) +-{ +- tcg_gen_st_i64(v, cpu_env, freg64_offset(reg)); +-} +- +-static void store_reg32_i64(int reg, TCGv_i64 v) +-{ +- /* 32 bit register writes keep the upper half */ +- tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); +-} +- +-static void store_reg32h_i64(int reg, TCGv_i64 v) +-{ +- tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32); +-} +- +-static void store_freg32_i64(int reg, TCGv_i64 v) +-{ +- tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg)); +-} +- +-static void return_low128(TCGv_i64 dest) +-{ +- tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl)); +-} +- +-static void update_psw_addr(DisasContext *s) +-{ +- /* psw.addr */ +- tcg_gen_movi_i64(psw_addr, s->base.pc_next); +-} +- +-static void per_branch(DisasContext *s, bool to_next) +-{ +-#ifndef CONFIG_USER_ONLY +- tcg_gen_movi_i64(gbea, s->base.pc_next); +- +- if (s->base.tb->flags & FLAG_MASK_PER) { +- TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr; +- gen_helper_per_branch(cpu_env, gbea, next_pc); +- if (to_next) { +- tcg_temp_free_i64(next_pc); +- } +- } +-#endif +-} +- +-static void per_branch_cond(DisasContext *s, TCGCond cond, +- TCGv_i64 arg1, TCGv_i64 arg2) +-{ +-#ifndef CONFIG_USER_ONLY +- if (s->base.tb->flags & FLAG_MASK_PER) { +- TCGLabel *lab = gen_new_label(); +- tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab); +- +- tcg_gen_movi_i64(gbea, s->base.pc_next); +- gen_helper_per_branch(cpu_env, gbea, psw_addr); +- +- gen_set_label(lab); +- } else { +- TCGv_i64 pc = tcg_const_i64(s->base.pc_next); +- tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc); +- tcg_temp_free_i64(pc); +- } +-#endif +-} +- +-static void per_breaking_event(DisasContext *s) +-{ +- tcg_gen_movi_i64(gbea, s->base.pc_next); +-} +- +-static void update_cc_op(DisasContext *s) +-{ +- if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { +- tcg_gen_movi_i32(cc_op, s->cc_op); +- } +-} +- +-static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) +-{ +- return (uint64_t)cpu_lduw_code(env, pc); +-} +- +-static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) +-{ +- return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); +-} +- +-static int get_mem_index(DisasContext *s) +-{ +-#ifdef CONFIG_USER_ONLY +- return MMU_USER_IDX; +-#else +- if (!(s->base.tb->flags & FLAG_MASK_DAT)) { +- return MMU_REAL_IDX; +- } +- +- switch (s->base.tb->flags & FLAG_MASK_ASC) { +- case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: +- return MMU_PRIMARY_IDX; +- case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: +- return MMU_SECONDARY_IDX; +- case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: +- return MMU_HOME_IDX; +- default: +- tcg_abort(); +- break; +- } +-#endif +-} +- +-static void gen_exception(int excp) +-{ +- TCGv_i32 tmp = tcg_const_i32(excp); +- gen_helper_exception(cpu_env, tmp); +- tcg_temp_free_i32(tmp); +-} +- +-static void gen_program_exception(DisasContext *s, int code) +-{ +- TCGv_i32 tmp; +- +- /* Remember what pgm exeption this was. */ +- tmp = tcg_const_i32(code); +- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); +- tcg_temp_free_i32(tmp); +- +- tmp = tcg_const_i32(s->ilen); +- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); +- tcg_temp_free_i32(tmp); +- +- /* update the psw */ +- update_psw_addr(s); +- +- /* Save off cc. */ +- update_cc_op(s); +- +- /* Trigger exception. */ +- gen_exception(EXCP_PGM); +-} +- +-static inline void gen_illegal_opcode(DisasContext *s) +-{ +- gen_program_exception(s, PGM_OPERATION); +-} +- +-static inline void gen_data_exception(uint8_t dxc) +-{ +- TCGv_i32 tmp = tcg_const_i32(dxc); +- gen_helper_data_exception(cpu_env, tmp); +- tcg_temp_free_i32(tmp); +-} +- +-static inline void gen_trap(DisasContext *s) +-{ +- /* Set DXC to 0xff */ +- gen_data_exception(0xff); +-} +- +-static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, +- int64_t imm) +-{ +- tcg_gen_addi_i64(dst, src, imm); +- if (!(s->base.tb->flags & FLAG_MASK_64)) { +- if (s->base.tb->flags & FLAG_MASK_32) { +- tcg_gen_andi_i64(dst, dst, 0x7fffffff); +- } else { +- tcg_gen_andi_i64(dst, dst, 0x00ffffff); +- } +- } +-} +- +-static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) +-{ +- TCGv_i64 tmp = tcg_temp_new_i64(); +- +- /* +- * Note that d2 is limited to 20 bits, signed. If we crop negative +- * displacements early we create larger immedate addends. +- */ +- if (b2 && x2) { +- tcg_gen_add_i64(tmp, regs[b2], regs[x2]); +- gen_addi_and_wrap_i64(s, tmp, tmp, d2); +- } else if (b2) { +- gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); +- } else if (x2) { +- gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); +- } else if (!(s->base.tb->flags & FLAG_MASK_64)) { +- if (s->base.tb->flags & FLAG_MASK_32) { +- tcg_gen_movi_i64(tmp, d2 & 0x7fffffff); +- } else { +- tcg_gen_movi_i64(tmp, d2 & 0x00ffffff); +- } +- } else { +- tcg_gen_movi_i64(tmp, d2); +- } +- +- return tmp; +-} +- +-static inline bool live_cc_data(DisasContext *s) +-{ +- return (s->cc_op != CC_OP_DYNAMIC +- && s->cc_op != CC_OP_STATIC +- && s->cc_op > 3); +-} +- +-static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) +-{ +- if (live_cc_data(s)) { +- tcg_gen_discard_i64(cc_src); +- tcg_gen_discard_i64(cc_dst); +- tcg_gen_discard_i64(cc_vr); +- } +- s->cc_op = CC_OP_CONST0 + val; +-} +- +-static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) +-{ +- if (live_cc_data(s)) { +- tcg_gen_discard_i64(cc_src); +- tcg_gen_discard_i64(cc_vr); +- } +- tcg_gen_mov_i64(cc_dst, dst); +- s->cc_op = op; +-} +- +-static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, +- TCGv_i64 dst) +-{ +- if (live_cc_data(s)) { +- tcg_gen_discard_i64(cc_vr); +- } +- tcg_gen_mov_i64(cc_src, src); +- tcg_gen_mov_i64(cc_dst, dst); +- s->cc_op = op; +-} +- +-static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, +- TCGv_i64 dst, TCGv_i64 vr) +-{ +- tcg_gen_mov_i64(cc_src, src); +- tcg_gen_mov_i64(cc_dst, dst); +- tcg_gen_mov_i64(cc_vr, vr); +- s->cc_op = op; +-} +- +-static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ, val); +-} +- +-static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val); +-} +- +-static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val); +-} +- +-static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl) +-{ +- gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl); +-} +- +-/* CC value is in env->cc_op */ +-static void set_cc_static(DisasContext *s) +-{ +- if (live_cc_data(s)) { +- tcg_gen_discard_i64(cc_src); +- tcg_gen_discard_i64(cc_dst); +- tcg_gen_discard_i64(cc_vr); +- } +- s->cc_op = CC_OP_STATIC; +-} +- +-/* calculates cc into cc_op */ +-static void gen_op_calc_cc(DisasContext *s) +-{ +- TCGv_i32 local_cc_op = NULL; +- TCGv_i64 dummy = NULL; +- +- switch (s->cc_op) { +- default: +- dummy = tcg_const_i64(0); +- /* FALLTHRU */ +- case CC_OP_ADD_64: +- case CC_OP_SUB_64: +- case CC_OP_ADD_32: +- case CC_OP_SUB_32: +- local_cc_op = tcg_const_i32(s->cc_op); +- break; +- case CC_OP_CONST0: +- case CC_OP_CONST1: +- case CC_OP_CONST2: +- case CC_OP_CONST3: +- case CC_OP_STATIC: +- case CC_OP_DYNAMIC: +- break; +- } +- +- switch (s->cc_op) { +- case CC_OP_CONST0: +- case CC_OP_CONST1: +- case CC_OP_CONST2: +- case CC_OP_CONST3: +- /* s->cc_op is the cc value */ +- tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); +- break; +- case CC_OP_STATIC: +- /* env->cc_op already is the cc value */ +- break; +- case CC_OP_NZ: +- case CC_OP_ABS_64: +- case CC_OP_NABS_64: +- case CC_OP_ABS_32: +- case CC_OP_NABS_32: +- case CC_OP_LTGT0_32: +- case CC_OP_LTGT0_64: +- case CC_OP_COMP_32: +- case CC_OP_COMP_64: +- case CC_OP_NZ_F32: +- case CC_OP_NZ_F64: +- case CC_OP_FLOGR: +- case CC_OP_LCBB: +- case CC_OP_MULS_32: +- /* 1 argument */ +- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy); +- break; +- case CC_OP_ADDU: +- case CC_OP_ICM: +- case CC_OP_LTGT_32: +- case CC_OP_LTGT_64: +- case CC_OP_LTUGTU_32: +- case CC_OP_LTUGTU_64: +- case CC_OP_TM_32: +- case CC_OP_TM_64: +- case CC_OP_SLA_32: +- case CC_OP_SLA_64: +- case CC_OP_SUBU: +- case CC_OP_NZ_F128: +- case CC_OP_VC: +- case CC_OP_MULS_64: +- /* 2 arguments */ +- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy); +- break; +- case CC_OP_ADD_64: +- case CC_OP_SUB_64: +- case CC_OP_ADD_32: +- case CC_OP_SUB_32: +- /* 3 arguments */ +- gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); +- break; +- case CC_OP_DYNAMIC: +- /* unknown operation - assume 3 arguments and cc_op in env */ +- gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr); +- break; +- default: +- tcg_abort(); +- } +- +- if (local_cc_op) { +- tcg_temp_free_i32(local_cc_op); +- } +- if (dummy) { +- tcg_temp_free_i64(dummy); +- } +- +- /* We now have cc in cc_op as constant */ +- set_cc_static(s); +-} +- +-static bool use_goto_tb(DisasContext *s, uint64_t dest) +-{ +- if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) { +- return false; +- } +- return translator_use_goto_tb(&s->base, dest); +-} +- +-static void account_noninline_branch(DisasContext *s, int cc_op) +-{ +-#ifdef DEBUG_INLINE_BRANCHES +- inline_branch_miss[cc_op]++; +-#endif +-} +- +-static void account_inline_branch(DisasContext *s, int cc_op) +-{ +-#ifdef DEBUG_INLINE_BRANCHES +- inline_branch_hit[cc_op]++; +-#endif +-} +- +-/* Table of mask values to comparison codes, given a comparison as input. +- For such, CC=3 should not be possible. */ +-static const TCGCond ltgt_cond[16] = { +- TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ +- TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ +- TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ +- TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ +- TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ +- TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ +- TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ +- TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ +-}; +- +-/* Table of mask values to comparison codes, given a logic op as input. +- For such, only CC=0 and CC=1 should be possible. */ +-static const TCGCond nz_cond[16] = { +- TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ +- TCG_COND_NEVER, TCG_COND_NEVER, +- TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ +- TCG_COND_NE, TCG_COND_NE, +- TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ +- TCG_COND_EQ, TCG_COND_EQ, +- TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ +- TCG_COND_ALWAYS, TCG_COND_ALWAYS, +-}; +- +-/* Interpret MASK in terms of S->CC_OP, and fill in C with all the +- details required to generate a TCG comparison. */ +-static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) +-{ +- TCGCond cond; +- enum cc_op old_cc_op = s->cc_op; +- +- if (mask == 15 || mask == 0) { +- c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); +- c->u.s32.a = cc_op; +- c->u.s32.b = cc_op; +- c->g1 = c->g2 = true; +- c->is_64 = false; +- return; +- } +- +- /* Find the TCG condition for the mask + cc op. */ +- switch (old_cc_op) { +- case CC_OP_LTGT0_32: +- case CC_OP_LTGT0_64: +- case CC_OP_LTGT_32: +- case CC_OP_LTGT_64: +- cond = ltgt_cond[mask]; +- if (cond == TCG_COND_NEVER) { +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_LTUGTU_32: +- case CC_OP_LTUGTU_64: +- cond = tcg_unsigned_cond(ltgt_cond[mask]); +- if (cond == TCG_COND_NEVER) { +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_NZ: +- cond = nz_cond[mask]; +- if (cond == TCG_COND_NEVER) { +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_TM_32: +- case CC_OP_TM_64: +- switch (mask) { +- case 8: +- cond = TCG_COND_EQ; +- break; +- case 4 | 2 | 1: +- cond = TCG_COND_NE; +- break; +- default: +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_ICM: +- switch (mask) { +- case 8: +- cond = TCG_COND_EQ; +- break; +- case 4 | 2 | 1: +- case 4 | 2: +- cond = TCG_COND_NE; +- break; +- default: +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_FLOGR: +- switch (mask & 0xa) { +- case 8: /* src == 0 -> no one bit found */ +- cond = TCG_COND_EQ; +- break; +- case 2: /* src != 0 -> one bit found */ +- cond = TCG_COND_NE; +- break; +- default: +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- case CC_OP_ADDU: +- case CC_OP_SUBU: +- switch (mask) { +- case 8 | 2: /* result == 0 */ +- cond = TCG_COND_EQ; +- break; +- case 4 | 1: /* result != 0 */ +- cond = TCG_COND_NE; +- break; +- case 8 | 4: /* !carry (borrow) */ +- cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE; +- break; +- case 2 | 1: /* carry (!borrow) */ +- cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ; +- break; +- default: +- goto do_dynamic; +- } +- account_inline_branch(s, old_cc_op); +- break; +- +- default: +- do_dynamic: +- /* Calculate cc value. */ +- gen_op_calc_cc(s); +- /* FALLTHRU */ +- +- case CC_OP_STATIC: +- /* Jump based on CC. We'll load up the real cond below; +- the assignment here merely avoids a compiler warning. */ +- account_noninline_branch(s, old_cc_op); +- old_cc_op = CC_OP_STATIC; +- cond = TCG_COND_NEVER; +- break; +- } +- +- /* Load up the arguments of the comparison. */ +- c->is_64 = true; +- c->g1 = c->g2 = false; +- switch (old_cc_op) { +- case CC_OP_LTGT0_32: +- c->is_64 = false; +- c->u.s32.a = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); +- c->u.s32.b = tcg_const_i32(0); +- break; +- case CC_OP_LTGT_32: +- case CC_OP_LTUGTU_32: +- c->is_64 = false; +- c->u.s32.a = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); +- c->u.s32.b = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); +- break; +- +- case CC_OP_LTGT0_64: +- case CC_OP_NZ: +- case CC_OP_FLOGR: +- c->u.s64.a = cc_dst; +- c->u.s64.b = tcg_const_i64(0); +- c->g1 = true; +- break; +- case CC_OP_LTGT_64: +- case CC_OP_LTUGTU_64: +- c->u.s64.a = cc_src; +- c->u.s64.b = cc_dst; +- c->g1 = c->g2 = true; +- break; +- +- case CC_OP_TM_32: +- case CC_OP_TM_64: +- case CC_OP_ICM: +- c->u.s64.a = tcg_temp_new_i64(); +- c->u.s64.b = tcg_const_i64(0); +- tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst); +- break; +- +- case CC_OP_ADDU: +- case CC_OP_SUBU: +- c->is_64 = true; +- c->u.s64.b = tcg_const_i64(0); +- c->g1 = true; +- switch (mask) { +- case 8 | 2: +- case 4 | 1: /* result */ +- c->u.s64.a = cc_dst; +- break; +- case 8 | 4: +- case 2 | 1: /* carry */ +- c->u.s64.a = cc_src; +- break; +- default: +- g_assert_not_reached(); +- } +- break; +- +- case CC_OP_STATIC: +- c->is_64 = false; +- c->u.s32.a = cc_op; +- c->g1 = true; +- switch (mask) { +- case 0x8 | 0x4 | 0x2: /* cc != 3 */ +- cond = TCG_COND_NE; +- c->u.s32.b = tcg_const_i32(3); +- break; +- case 0x8 | 0x4 | 0x1: /* cc != 2 */ +- cond = TCG_COND_NE; +- c->u.s32.b = tcg_const_i32(2); +- break; +- case 0x8 | 0x2 | 0x1: /* cc != 1 */ +- cond = TCG_COND_NE; +- c->u.s32.b = tcg_const_i32(1); +- break; +- case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ +- cond = TCG_COND_EQ; +- c->g1 = false; +- c->u.s32.a = tcg_temp_new_i32(); +- c->u.s32.b = tcg_const_i32(0); +- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); +- break; +- case 0x8 | 0x4: /* cc < 2 */ +- cond = TCG_COND_LTU; +- c->u.s32.b = tcg_const_i32(2); +- break; +- case 0x8: /* cc == 0 */ +- cond = TCG_COND_EQ; +- c->u.s32.b = tcg_const_i32(0); +- break; +- case 0x4 | 0x2 | 0x1: /* cc != 0 */ +- cond = TCG_COND_NE; +- c->u.s32.b = tcg_const_i32(0); +- break; +- case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ +- cond = TCG_COND_NE; +- c->g1 = false; +- c->u.s32.a = tcg_temp_new_i32(); +- c->u.s32.b = tcg_const_i32(0); +- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); +- break; +- case 0x4: /* cc == 1 */ +- cond = TCG_COND_EQ; +- c->u.s32.b = tcg_const_i32(1); +- break; +- case 0x2 | 0x1: /* cc > 1 */ +- cond = TCG_COND_GTU; +- c->u.s32.b = tcg_const_i32(1); +- break; +- case 0x2: /* cc == 2 */ +- cond = TCG_COND_EQ; +- c->u.s32.b = tcg_const_i32(2); +- break; +- case 0x1: /* cc == 3 */ +- cond = TCG_COND_EQ; +- c->u.s32.b = tcg_const_i32(3); +- break; +- default: +- /* CC is masked by something else: (8 >> cc) & mask. */ +- cond = TCG_COND_NE; +- c->g1 = false; +- c->u.s32.a = tcg_const_i32(8); +- c->u.s32.b = tcg_const_i32(0); +- tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op); +- tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask); +- break; +- } +- break; +- +- default: +- abort(); +- } +- c->cond = cond; +-} +- +-static void free_compare(DisasCompare *c) +-{ +- if (!c->g1) { +- if (c->is_64) { +- tcg_temp_free_i64(c->u.s64.a); +- } else { +- tcg_temp_free_i32(c->u.s32.a); +- } +- } +- if (!c->g2) { +- if (c->is_64) { +- tcg_temp_free_i64(c->u.s64.b); +- } else { +- tcg_temp_free_i32(c->u.s32.b); +- } +- } +-} +- +-/* ====================================================================== */ +-/* Define the insn format enumeration. */ +-#define F0(N) FMT_##N, +-#define F1(N, X1) F0(N) +-#define F2(N, X1, X2) F0(N) +-#define F3(N, X1, X2, X3) F0(N) +-#define F4(N, X1, X2, X3, X4) F0(N) +-#define F5(N, X1, X2, X3, X4, X5) F0(N) +-#define F6(N, X1, X2, X3, X4, X5, X6) F0(N) +- +-typedef enum { +-#include "insn-format.def" +-} DisasFormat; +- +-#undef F0 +-#undef F1 +-#undef F2 +-#undef F3 +-#undef F4 +-#undef F5 +-#undef F6 +- +-/* This is the way fields are to be accessed out of DisasFields. */ +-#define have_field(S, F) have_field1((S), FLD_O_##F) +-#define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) +- +-static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c) +-{ +- return (s->fields.presentO >> c) & 1; +-} +- +-static int get_field1(const DisasContext *s, enum DisasFieldIndexO o, +- enum DisasFieldIndexC c) +-{ +- assert(have_field1(s, o)); +- return s->fields.c[c]; +-} +- +-/* Describe the layout of each field in each format. */ +-typedef struct DisasField { +- unsigned int beg:8; +- unsigned int size:8; +- unsigned int type:2; +- unsigned int indexC:6; +- enum DisasFieldIndexO indexO:8; +-} DisasField; +- +-typedef struct DisasFormatInfo { +- DisasField op[NUM_C_FIELD]; +-} DisasFormatInfo; +- +-#define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } +-#define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } +-#define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } +-#define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ +- { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } +-#define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ +- { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ +- { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } +-#define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ +- { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } +-#define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ +- { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ +- { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } +-#define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } +-#define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } +- +-#define F0(N) { { } }, +-#define F1(N, X1) { { X1 } }, +-#define F2(N, X1, X2) { { X1, X2 } }, +-#define F3(N, X1, X2, X3) { { X1, X2, X3 } }, +-#define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, +-#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, +-#define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, +- +-static const DisasFormatInfo format_info[] = { +-#include "insn-format.def" +-}; +- +-#undef F0 +-#undef F1 +-#undef F2 +-#undef F3 +-#undef F4 +-#undef F5 +-#undef F6 +-#undef R +-#undef M +-#undef V +-#undef BD +-#undef BXD +-#undef BDL +-#undef BXDL +-#undef I +-#undef L +- +-/* Generally, we'll extract operands into this structures, operate upon +- them, and store them back. See the "in1", "in2", "prep", "wout" sets +- of routines below for more details. */ +-typedef struct { +- bool g_out, g_out2, g_in1, g_in2; +- TCGv_i64 out, out2, in1, in2; +- TCGv_i64 addr1; +-} DisasOps; +- +-/* Instructions can place constraints on their operands, raising specification +- exceptions if they are violated. To make this easy to automate, each "in1", +- "in2", "prep", "wout" helper will have a SPEC_ define that equals one +- of the following, or 0. To make this easy to document, we'll put the +- SPEC_ defines next to . */ +- +-#define SPEC_r1_even 1 +-#define SPEC_r2_even 2 +-#define SPEC_r3_even 4 +-#define SPEC_r1_f128 8 +-#define SPEC_r2_f128 16 +- +-/* Return values from translate_one, indicating the state of the TB. */ +- +-/* We are not using a goto_tb (for whatever reason), but have updated +- the PC (for whatever reason), so there's no need to do it again on +- exiting the TB. */ +-#define DISAS_PC_UPDATED DISAS_TARGET_0 +- +-/* We have emitted one or more goto_tb. No fixup required. */ +-#define DISAS_GOTO_TB DISAS_TARGET_1 +- +-/* We have updated the PC and CC values. */ +-#define DISAS_PC_CC_UPDATED DISAS_TARGET_2 +- +-/* We are exiting the TB, but have neither emitted a goto_tb, nor +- updated the PC for the next instruction to be executed. */ +-#define DISAS_PC_STALE DISAS_TARGET_3 +- +-/* We are exiting the TB to the main loop. */ +-#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 +- +- +-/* Instruction flags */ +-#define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ +-#define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ +-#define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ +-#define IF_BFP 0x0008 /* binary floating point instruction */ +-#define IF_DFP 0x0010 /* decimal floating point instruction */ +-#define IF_PRIV 0x0020 /* privileged instruction */ +-#define IF_VEC 0x0040 /* vector instruction */ +-#define IF_IO 0x0080 /* input/output instruction */ +- +-struct DisasInsn { +- unsigned opc:16; +- unsigned flags:16; +- DisasFormat fmt:8; +- unsigned fac:8; +- unsigned spec:8; +- +- const char *name; +- +- /* Pre-process arguments before HELP_OP. */ +- void (*help_in1)(DisasContext *, DisasOps *); +- void (*help_in2)(DisasContext *, DisasOps *); +- void (*help_prep)(DisasContext *, DisasOps *); +- +- /* +- * Post-process output after HELP_OP. +- * Note that these are not called if HELP_OP returns DISAS_NORETURN. +- */ +- void (*help_wout)(DisasContext *, DisasOps *); +- void (*help_cout)(DisasContext *, DisasOps *); +- +- /* Implement the operation itself. */ +- DisasJumpType (*help_op)(DisasContext *, DisasOps *); +- +- uint64_t data; +-}; +- +-/* ====================================================================== */ +-/* Miscellaneous helpers, used by several operations. */ +- +-static void help_l2_shift(DisasContext *s, DisasOps *o, int mask) +-{ +- int b2 = get_field(s, b2); +- int d2 = get_field(s, d2); +- +- if (b2 == 0) { +- o->in2 = tcg_const_i64(d2 & mask); +- } else { +- o->in2 = get_address(s, 0, b2, d2); +- tcg_gen_andi_i64(o->in2, o->in2, mask); +- } +-} +- +-static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) +-{ +- if (dest == s->pc_tmp) { +- per_branch(s, true); +- return DISAS_NEXT; +- } +- if (use_goto_tb(s, dest)) { +- update_cc_op(s); +- per_breaking_event(s); +- tcg_gen_goto_tb(0); +- tcg_gen_movi_i64(psw_addr, dest); +- tcg_gen_exit_tb(s->base.tb, 0); +- return DISAS_GOTO_TB; +- } else { +- tcg_gen_movi_i64(psw_addr, dest); +- per_branch(s, false); +- return DISAS_PC_UPDATED; +- } +-} +- +-static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, +- bool is_imm, int imm, TCGv_i64 cdest) +-{ +- DisasJumpType ret; +- uint64_t dest = s->base.pc_next + 2 * imm; +- TCGLabel *lab; +- +- /* Take care of the special cases first. */ +- if (c->cond == TCG_COND_NEVER) { +- ret = DISAS_NEXT; +- goto egress; +- } +- if (is_imm) { +- if (dest == s->pc_tmp) { +- /* Branch to next. */ +- per_branch(s, true); +- ret = DISAS_NEXT; +- goto egress; +- } +- if (c->cond == TCG_COND_ALWAYS) { +- ret = help_goto_direct(s, dest); +- goto egress; +- } +- } else { +- if (!cdest) { +- /* E.g. bcr %r0 -> no branch. */ +- ret = DISAS_NEXT; +- goto egress; +- } +- if (c->cond == TCG_COND_ALWAYS) { +- tcg_gen_mov_i64(psw_addr, cdest); +- per_branch(s, false); +- ret = DISAS_PC_UPDATED; +- goto egress; +- } +- } +- +- if (use_goto_tb(s, s->pc_tmp)) { +- if (is_imm && use_goto_tb(s, dest)) { +- /* Both exits can use goto_tb. */ +- update_cc_op(s); +- +- lab = gen_new_label(); +- if (c->is_64) { +- tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); +- } else { +- tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); +- } +- +- /* Branch not taken. */ +- tcg_gen_goto_tb(0); +- tcg_gen_movi_i64(psw_addr, s->pc_tmp); +- tcg_gen_exit_tb(s->base.tb, 0); +- +- /* Branch taken. */ +- gen_set_label(lab); +- per_breaking_event(s); +- tcg_gen_goto_tb(1); +- tcg_gen_movi_i64(psw_addr, dest); +- tcg_gen_exit_tb(s->base.tb, 1); +- +- ret = DISAS_GOTO_TB; +- } else { +- /* Fallthru can use goto_tb, but taken branch cannot. */ +- /* Store taken branch destination before the brcond. This +- avoids having to allocate a new local temp to hold it. +- We'll overwrite this in the not taken case anyway. */ +- if (!is_imm) { +- tcg_gen_mov_i64(psw_addr, cdest); +- } +- +- lab = gen_new_label(); +- if (c->is_64) { +- tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); +- } else { +- tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); +- } +- +- /* Branch not taken. */ +- update_cc_op(s); +- tcg_gen_goto_tb(0); +- tcg_gen_movi_i64(psw_addr, s->pc_tmp); +- tcg_gen_exit_tb(s->base.tb, 0); +- +- gen_set_label(lab); +- if (is_imm) { +- tcg_gen_movi_i64(psw_addr, dest); +- } +- per_breaking_event(s); +- ret = DISAS_PC_UPDATED; +- } +- } else { +- /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. +- Most commonly we're single-stepping or some other condition that +- disables all use of goto_tb. Just update the PC and exit. */ +- +- TCGv_i64 next = tcg_const_i64(s->pc_tmp); +- if (is_imm) { +- cdest = tcg_const_i64(dest); +- } +- +- if (c->is_64) { +- tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b, +- cdest, next); +- per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); +- } else { +- TCGv_i32 t0 = tcg_temp_new_i32(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i64 z = tcg_const_i64(0); +- tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); +- tcg_gen_extu_i32_i64(t1, t0); +- tcg_temp_free_i32(t0); +- tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); +- per_branch_cond(s, TCG_COND_NE, t1, z); +- tcg_temp_free_i64(t1); +- tcg_temp_free_i64(z); +- } +- +- if (is_imm) { +- tcg_temp_free_i64(cdest); +- } +- tcg_temp_free_i64(next); +- +- ret = DISAS_PC_UPDATED; +- } +- +- egress: +- free_compare(c); +- return ret; +-} +- +-/* ====================================================================== */ +-/* The operations. These perform the bulk of the work for any insn, +- usually after the operands have been loaded and output initialized. */ +- +-static DisasJumpType op_abs(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_abs_i64(o->out, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull); +- tcg_gen_mov_i64(o->out2, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_add(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_add_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_addu64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_movi_i64(cc_src, 0); +- tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); +- return DISAS_NEXT; +-} +- +-/* Compute carry into cc_src. */ +-static void compute_carry(DisasContext *s) +-{ +- switch (s->cc_op) { +- case CC_OP_ADDU: +- /* The carry value is already in cc_src (1,0). */ +- break; +- case CC_OP_SUBU: +- tcg_gen_addi_i64(cc_src, cc_src, 1); +- break; +- default: +- gen_op_calc_cc(s); +- /* fall through */ +- case CC_OP_STATIC: +- /* The carry flag is the msb of CC; compute into cc_src. */ +- tcg_gen_extu_i32_i64(cc_src, cc_op); +- tcg_gen_shri_i64(cc_src, cc_src, 1); +- break; +- } +-} +- +-static DisasJumpType op_addc32(DisasContext *s, DisasOps *o) +-{ +- compute_carry(s); +- tcg_gen_add_i64(o->out, o->in1, o->in2); +- tcg_gen_add_i64(o->out, o->out, cc_src); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_addc64(DisasContext *s, DisasOps *o) +-{ +- compute_carry(s); +- +- TCGv_i64 zero = tcg_const_i64(0); +- tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero); +- tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); +- tcg_temp_free_i64(zero); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_asi(DisasContext *s, DisasOps *o) +-{ +- bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); +- +- o->in1 = tcg_temp_new_i64(); +- if (non_atomic) { +- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); +- } else { +- /* Perform the atomic addition in memory. */ +- tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), +- s->insn->data); +- } +- +- /* Recompute also for atomic case: needed for setting CC. */ +- tcg_gen_add_i64(o->out, o->in1, o->in2); +- +- if (non_atomic) { +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o) +-{ +- bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45); +- +- o->in1 = tcg_temp_new_i64(); +- if (non_atomic) { +- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); +- } else { +- /* Perform the atomic addition in memory. */ +- tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), +- s->insn->data); +- } +- +- /* Recompute also for atomic case: needed for setting CC. */ +- tcg_gen_movi_i64(cc_src, 0); +- tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); +- +- if (non_atomic) { +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_aeb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_adb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_adb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_axb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_and(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_and_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_andi(DisasContext *s, DisasOps *o) +-{ +- int shift = s->insn->data & 0xff; +- int size = s->insn->data >> 8; +- uint64_t mask = ((1ull << size) - 1) << shift; +- +- assert(!o->g_in2); +- tcg_gen_shli_i64(o->in2, o->in2, shift); +- tcg_gen_ori_i64(o->in2, o->in2, ~mask); +- tcg_gen_and_i64(o->out, o->in1, o->in2); +- +- /* Produce the CC from only the bits manipulated. */ +- tcg_gen_andi_i64(cc_dst, o->out, mask); +- set_cc_nz_u64(s, cc_dst); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ni(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); +- } else { +- /* Perform the atomic operation in memory. */ +- tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s), +- s->insn->data); +- } +- +- /* Recompute also for atomic case: needed for setting CC. */ +- tcg_gen_and_i64(o->out, o->in1, o->in2); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_bas(DisasContext *s, DisasOps *o) +-{ +- pc_to_link_info(o->out, s, s->pc_tmp); +- if (o->in2) { +- tcg_gen_mov_i64(psw_addr, o->in2); +- per_branch(s, false); +- return DISAS_PC_UPDATED; +- } else { +- return DISAS_NEXT; +- } +-} +- +-static void save_link_info(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t; +- +- if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { +- pc_to_link_info(o->out, s, s->pc_tmp); +- return; +- } +- gen_op_calc_cc(s); +- tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull); +- tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); +- t = tcg_temp_new_i64(); +- tcg_gen_shri_i64(t, psw_mask, 16); +- tcg_gen_andi_i64(t, t, 0x0f000000); +- tcg_gen_or_i64(o->out, o->out, t); +- tcg_gen_extu_i32_i64(t, cc_op); +- tcg_gen_shli_i64(t, t, 28); +- tcg_gen_or_i64(o->out, o->out, t); +- tcg_temp_free_i64(t); +-} +- +-static DisasJumpType op_bal(DisasContext *s, DisasOps *o) +-{ +- save_link_info(s, o); +- if (o->in2) { +- tcg_gen_mov_i64(psw_addr, o->in2); +- per_branch(s, false); +- return DISAS_PC_UPDATED; +- } else { +- return DISAS_NEXT; +- } +-} +- +-static DisasJumpType op_basi(DisasContext *s, DisasOps *o) +-{ +- pc_to_link_info(o->out, s, s->pc_tmp); +- return help_goto_direct(s, s->base.pc_next + 2 * get_field(s, i2)); +-} +- +-static DisasJumpType op_bc(DisasContext *s, DisasOps *o) +-{ +- int m1 = get_field(s, m1); +- bool is_imm = have_field(s, i2); +- int imm = is_imm ? get_field(s, i2) : 0; +- DisasCompare c; +- +- /* BCR with R2 = 0 causes no branching */ +- if (have_field(s, r2) && get_field(s, r2) == 0) { +- if (m1 == 14) { +- /* Perform serialization */ +- /* FIXME: check for fast-BCR-serialization facility */ +- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); +- } +- if (m1 == 15) { +- /* Perform serialization */ +- /* FIXME: perform checkpoint-synchronisation */ +- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); +- } +- return DISAS_NEXT; +- } +- +- disas_jcc(s, &c, m1); +- return help_branch(s, &c, is_imm, imm, o->in2); +-} +- +-static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- bool is_imm = have_field(s, i2); +- int imm = is_imm ? get_field(s, i2) : 0; +- DisasCompare c; +- TCGv_i64 t; +- +- c.cond = TCG_COND_NE; +- c.is_64 = false; +- c.g1 = false; +- c.g2 = false; +- +- t = tcg_temp_new_i64(); +- tcg_gen_subi_i64(t, regs[r1], 1); +- store_reg32_i64(r1, t); +- c.u.s32.a = tcg_temp_new_i32(); +- c.u.s32.b = tcg_const_i32(0); +- tcg_gen_extrl_i64_i32(c.u.s32.a, t); +- tcg_temp_free_i64(t); +- +- return help_branch(s, &c, is_imm, imm, o->in2); +-} +- +-static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int imm = get_field(s, i2); +- DisasCompare c; +- TCGv_i64 t; +- +- c.cond = TCG_COND_NE; +- c.is_64 = false; +- c.g1 = false; +- c.g2 = false; +- +- t = tcg_temp_new_i64(); +- tcg_gen_shri_i64(t, regs[r1], 32); +- tcg_gen_subi_i64(t, t, 1); +- store_reg32h_i64(r1, t); +- c.u.s32.a = tcg_temp_new_i32(); +- c.u.s32.b = tcg_const_i32(0); +- tcg_gen_extrl_i64_i32(c.u.s32.a, t); +- tcg_temp_free_i64(t); +- +- return help_branch(s, &c, 1, imm, o->in2); +-} +- +-static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- bool is_imm = have_field(s, i2); +- int imm = is_imm ? get_field(s, i2) : 0; +- DisasCompare c; +- +- c.cond = TCG_COND_NE; +- c.is_64 = true; +- c.g1 = true; +- c.g2 = false; +- +- tcg_gen_subi_i64(regs[r1], regs[r1], 1); +- c.u.s64.a = regs[r1]; +- c.u.s64.b = tcg_const_i64(0); +- +- return help_branch(s, &c, is_imm, imm, o->in2); +-} +- +-static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- bool is_imm = have_field(s, i2); +- int imm = is_imm ? get_field(s, i2) : 0; +- DisasCompare c; +- TCGv_i64 t; +- +- c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); +- c.is_64 = false; +- c.g1 = false; +- c.g2 = false; +- +- t = tcg_temp_new_i64(); +- tcg_gen_add_i64(t, regs[r1], regs[r3]); +- c.u.s32.a = tcg_temp_new_i32(); +- c.u.s32.b = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(c.u.s32.a, t); +- tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); +- store_reg32_i64(r1, t); +- tcg_temp_free_i64(t); +- +- return help_branch(s, &c, is_imm, imm, o->in2); +-} +- +-static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- bool is_imm = have_field(s, i2); +- int imm = is_imm ? get_field(s, i2) : 0; +- DisasCompare c; +- +- c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); +- c.is_64 = true; +- +- if (r1 == (r3 | 1)) { +- c.u.s64.b = load_reg(r3 | 1); +- c.g2 = false; +- } else { +- c.u.s64.b = regs[r3 | 1]; +- c.g2 = true; +- } +- +- tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); +- c.u.s64.a = regs[r1]; +- c.g1 = true; +- +- return help_branch(s, &c, is_imm, imm, o->in2); +-} +- +-static DisasJumpType op_cj(DisasContext *s, DisasOps *o) +-{ +- int imm, m3 = get_field(s, m3); +- bool is_imm; +- DisasCompare c; +- +- c.cond = ltgt_cond[m3]; +- if (s->insn->data) { +- c.cond = tcg_unsigned_cond(c.cond); +- } +- c.is_64 = c.g1 = c.g2 = true; +- c.u.s64.a = o->in1; +- c.u.s64.b = o->in2; +- +- is_imm = have_field(s, i4); +- if (is_imm) { +- imm = get_field(s, i4); +- } else { +- imm = 0; +- o->out = get_address(s, 0, get_field(s, b4), +- get_field(s, d4)); +- } +- +- return help_branch(s, &c, is_imm, imm, o->out); +-} +- +-static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe, +- bool m4_with_fpe) +-{ +- const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT); +- uint8_t m3 = get_field(s, m3); +- uint8_t m4 = get_field(s, m4); +- +- /* m3 field was introduced with FPE */ +- if (!fpe && m3_with_fpe) { +- m3 = 0; +- } +- /* m4 field was introduced with FPE */ +- if (!fpe && m4_with_fpe) { +- m4 = 0; +- } +- +- /* Check for valid rounding modes. Mode 3 was introduced later. */ +- if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return NULL; +- } +- +- return tcg_const_i32(deposit32(m3, 4, 4, m4)); +-} +- +-static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cfeb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f32(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cfdb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f64(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f128(s, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cgeb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f32(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cgdb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f64(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f128(s, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clfeb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f32(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clfdb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f64(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f128(s, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clgeb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f32(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clgdb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f64(s, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- gen_set_cc_nz_f128(s, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cegb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cdgb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cxgb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_celgb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cdlgb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, false); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_cxlgb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) +-{ +- int r2 = get_field(s, r2); +- TCGv_i64 len = tcg_temp_new_i64(); +- +- gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]); +- set_cc_static(s); +- return_low128(o->out); +- +- tcg_gen_add_i64(regs[r2], regs[r2], len); +- tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); +- tcg_temp_free_i64(len); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clc(DisasContext *s, DisasOps *o) +-{ +- int l = get_field(s, l1); +- TCGv_i32 vl; +- +- switch (l + 1) { +- case 1: +- tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); +- tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); +- break; +- case 2: +- tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); +- tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); +- break; +- case 4: +- tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); +- tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); +- break; +- case 8: +- tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); +- tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); +- break; +- default: +- vl = tcg_const_i32(l); +- gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); +- tcg_temp_free_i32(vl); +- set_cc_static(s); +- return DISAS_NEXT; +- } +- gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r2 = get_field(s, r2); +- TCGv_i32 t1, t2; +- +- /* r1 and r2 must be even. */ +- if (r1 & 1 || r2 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t2 = tcg_const_i32(r2); +- gen_helper_clcl(cc_op, cpu_env, t1, t2); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i32 t1, t3; +- +- /* r1 and r3 must be even. */ +- if (r1 & 1 || r3 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t3 = tcg_const_i32(r3); +- gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t3); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i32 t1, t3; +- +- /* r1 and r3 must be even. */ +- if (r1 & 1 || r3 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t3 = tcg_const_i32(r3); +- gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t3); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clm(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m3 = tcg_const_i32(get_field(s, m3)); +- TCGv_i32 t1 = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(t1, o->in1); +- gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); +- set_cc_static(s); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(m3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_clst(DisasContext *s, DisasOps *o) +-{ +- gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2); +- set_cc_static(s); +- return_low128(o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cps(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t = tcg_temp_new_i64(); +- tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); +- tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); +- tcg_gen_or_i64(o->out, o->out, t); +- tcg_temp_free_i64(t); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cs(DisasContext *s, DisasOps *o) +-{ +- int d2 = get_field(s, d2); +- int b2 = get_field(s, b2); +- TCGv_i64 addr, cc; +- +- /* Note that in1 = R3 (new value) and +- in2 = (zero-extended) R1 (expected value). */ +- +- addr = get_address(s, 0, b2, d2); +- tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, +- get_mem_index(s), s->insn->data | MO_ALIGN); +- tcg_temp_free_i64(addr); +- +- /* Are the memory and expected values (un)equal? Note that this setcond +- produces the output CC value, thus the NE sense of the test. */ +- cc = tcg_temp_new_i64(); +- tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); +- tcg_gen_extrl_i64_i32(cc_op, cc); +- tcg_temp_free_i64(cc); +- set_cc_static(s); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- int d2 = get_field(s, d2); +- int b2 = get_field(s, b2); +- DisasJumpType ret = DISAS_NEXT; +- TCGv_i64 addr; +- TCGv_i32 t_r1, t_r3; +- +- /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ +- addr = get_address(s, 0, b2, d2); +- t_r1 = tcg_const_i32(r1); +- t_r3 = tcg_const_i32(r3); +- if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { +- gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); +- } else if (HAVE_CMPXCHG128) { +- gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3); +- } else { +- gen_helper_exit_atomic(cpu_env); +- ret = DISAS_NORETURN; +- } +- tcg_temp_free_i64(addr); +- tcg_temp_free_i32(t_r1); +- tcg_temp_free_i32(t_r3); +- +- set_cc_static(s); +- return ret; +-} +- +-static DisasJumpType op_csst(DisasContext *s, DisasOps *o) +-{ +- int r3 = get_field(s, r3); +- TCGv_i32 t_r3 = tcg_const_i32(r3); +- +- if (tb_cflags(s->base.tb) & CF_PARALLEL) { +- gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2); +- } else { +- gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2); +- } +- tcg_temp_free_i32(t_r3); +- +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_csp(DisasContext *s, DisasOps *o) +-{ +- MemOp mop = s->insn->data; +- TCGv_i64 addr, old, cc; +- TCGLabel *lab = gen_new_label(); +- +- /* Note that in1 = R1 (zero-extended expected value), +- out = R1 (original reg), out2 = R1+1 (new value). */ +- +- addr = tcg_temp_new_i64(); +- old = tcg_temp_new_i64(); +- tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); +- tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, +- get_mem_index(s), mop | MO_ALIGN); +- tcg_temp_free_i64(addr); +- +- /* Are the memory and expected values (un)equal? */ +- cc = tcg_temp_new_i64(); +- tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old); +- tcg_gen_extrl_i64_i32(cc_op, cc); +- +- /* Write back the output now, so that it happens before the +- following branch, so that we don't need local temps. */ +- if ((mop & MO_SIZE) == MO_32) { +- tcg_gen_deposit_i64(o->out, o->out, old, 0, 32); +- } else { +- tcg_gen_mov_i64(o->out, old); +- } +- tcg_temp_free_i64(old); +- +- /* If the comparison was equal, and the LSB of R2 was set, +- then we need to flush the TLB (for all cpus). */ +- tcg_gen_xori_i64(cc, cc, 1); +- tcg_gen_and_i64(cc, cc, o->in2); +- tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); +- tcg_temp_free_i64(cc); +- +- gen_helper_purge(cpu_env); +- gen_set_label(lab); +- +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i32 t2 = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(t2, o->in1); +- gen_helper_cvd(t1, t2); +- tcg_temp_free_i32(t2); +- tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); +- tcg_temp_free_i64(t1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ct(DisasContext *s, DisasOps *o) +-{ +- int m3 = get_field(s, m3); +- TCGLabel *lab = gen_new_label(); +- TCGCond c; +- +- c = tcg_invert_cond(ltgt_cond[m3]); +- if (s->insn->data) { +- c = tcg_unsigned_cond(c); +- } +- tcg_gen_brcond_i64(c, o->in1, o->in2, lab); +- +- /* Trap. */ +- gen_trap(s); +- +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) +-{ +- int m3 = get_field(s, m3); +- int r1 = get_field(s, r1); +- int r2 = get_field(s, r2); +- TCGv_i32 tr1, tr2, chk; +- +- /* R1 and R2 must both be even. */ +- if ((r1 | r2) & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- if (!s390_has_feat(S390_FEAT_ETF3_ENH)) { +- m3 = 0; +- } +- +- tr1 = tcg_const_i32(r1); +- tr2 = tcg_const_i32(r2); +- chk = tcg_const_i32(m3); +- +- switch (s->insn->data) { +- case 12: +- gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk); +- break; +- case 14: +- gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk); +- break; +- case 21: +- gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk); +- break; +- case 24: +- gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk); +- break; +- case 41: +- gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk); +- break; +- case 42: +- gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk); +- break; +- default: +- g_assert_not_reached(); +- } +- +- tcg_temp_free_i32(tr1); +- tcg_temp_free_i32(tr2); +- tcg_temp_free_i32(chk); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_diag(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- TCGv_i32 func_code = tcg_const_i32(get_field(s, i2)); +- +- gen_helper_diag(cpu_env, r1, r3, func_code); +- +- tcg_temp_free_i32(func_code); +- tcg_temp_free_i32(r3); +- tcg_temp_free_i32(r1); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) +-{ +- gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2); +- return_low128(o->out); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) +-{ +- gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2); +- return_low128(o->out); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) +-{ +- gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2); +- return_low128(o->out); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) +-{ +- gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2); +- return_low128(o->out); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_deb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_deb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_ddb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ear(DisasContext *s, DisasOps *o) +-{ +- int r2 = get_field(s, r2); +- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2])); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) +-{ +- /* No cache information provided. */ +- tcg_gen_movi_i64(o->out, -1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r2 = get_field(s, r2); +- TCGv_i64 t = tcg_temp_new_i64(); +- +- /* Note the "subsequently" in the PoO, which implies a defined result +- if r1 == r2. Thus we cannot defer these writes to an output hook. */ +- tcg_gen_shri_i64(t, psw_mask, 32); +- store_reg32_i64(r1, t); +- if (r2 != 0) { +- store_reg32_i64(r2, psw_mask); +- } +- +- tcg_temp_free_i64(t); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ex(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- TCGv_i32 ilen; +- TCGv_i64 v1; +- +- /* Nested EXECUTE is not allowed. */ +- if (unlikely(s->ex_value)) { +- gen_program_exception(s, PGM_EXECUTE); +- return DISAS_NORETURN; +- } +- +- update_psw_addr(s); +- update_cc_op(s); +- +- if (r1 == 0) { +- v1 = tcg_const_i64(0); +- } else { +- v1 = regs[r1]; +- } +- +- ilen = tcg_const_i32(s->ilen); +- gen_helper_ex(cpu_env, ilen, v1, o->in2); +- tcg_temp_free_i32(ilen); +- +- if (r1 == 0) { +- tcg_temp_free_i64(v1); +- } +- +- return DISAS_PC_CC_UPDATED; +-} +- +-static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_fieb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_fidb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, false, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34); +- return_low128(o->out2); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) +-{ +- /* We'll use the original input for cc computation, since we get to +- compare that against 0, which ought to be better than comparing +- the real output against 64. It also lets cc_dst be a convenient +- temporary during our computation. */ +- gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); +- +- /* R1 = IN ? CLZ(IN) : 64. */ +- tcg_gen_clzi_i64(o->out, o->in2, 64); +- +- /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this +- value by 64, which is undefined. But since the shift is 64 iff the +- input is zero, we still get the correct result after and'ing. */ +- tcg_gen_movi_i64(o->out2, 0x8000000000000000ull); +- tcg_gen_shr_i64(o->out2, o->out2, o->out); +- tcg_gen_andc_i64(o->out2, cc_dst, o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_icm(DisasContext *s, DisasOps *o) +-{ +- int m3 = get_field(s, m3); +- int pos, len, base = s->insn->data; +- TCGv_i64 tmp = tcg_temp_new_i64(); +- uint64_t ccm; +- +- switch (m3) { +- case 0xf: +- /* Effectively a 32-bit load. */ +- tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); +- len = 32; +- goto one_insert; +- +- case 0xc: +- case 0x6: +- case 0x3: +- /* Effectively a 16-bit load. */ +- tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); +- len = 16; +- goto one_insert; +- +- case 0x8: +- case 0x4: +- case 0x2: +- case 0x1: +- /* Effectively an 8-bit load. */ +- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); +- len = 8; +- goto one_insert; +- +- one_insert: +- pos = base + ctz32(m3) * 8; +- tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len); +- ccm = ((1ull << len) - 1) << pos; +- break; +- +- default: +- /* This is going to be a sequence of loads and inserts. */ +- pos = base + 32 - 8; +- ccm = 0; +- while (m3) { +- if (m3 & 0x8) { +- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(o->in2, o->in2, 1); +- tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); +- ccm |= 0xff << pos; +- } +- m3 = (m3 << 1) & 0xf; +- pos -= 8; +- } +- break; +- } +- +- tcg_gen_movi_i64(tmp, ccm); +- gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_insi(DisasContext *s, DisasOps *o) +-{ +- int shift = s->insn->data & 0xff; +- int size = s->insn->data >> 8; +- tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t1, t2; +- +- gen_op_calc_cc(s); +- t1 = tcg_temp_new_i64(); +- tcg_gen_extract_i64(t1, psw_mask, 40, 4); +- t2 = tcg_temp_new_i64(); +- tcg_gen_extu_i32_i64(t2, cc_op); +- tcg_gen_deposit_i64(t1, t1, t2, 4, 60); +- tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); +- tcg_temp_free_i64(t1); +- tcg_temp_free_i64(t2); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_idte(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m4; +- +- if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { +- m4 = tcg_const_i32(get_field(s, m4)); +- } else { +- m4 = tcg_const_i32(0); +- } +- gen_helper_idte(cpu_env, o->in1, o->in2, m4); +- tcg_temp_free_i32(m4); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m4; +- +- if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { +- m4 = tcg_const_i32(get_field(s, m4)); +- } else { +- m4 = tcg_const_i32(0); +- } +- gen_helper_ipte(cpu_env, o->in1, o->in2, m4); +- tcg_temp_free_i32(m4); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_iske(DisasContext *s, DisasOps *o) +-{ +- gen_helper_iske(o->out, cpu_env, o->in2); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_msa(DisasContext *s, DisasOps *o) +-{ +- int r1 = have_field(s, r1) ? get_field(s, r1) : 0; +- int r2 = have_field(s, r2) ? get_field(s, r2) : 0; +- int r3 = have_field(s, r3) ? get_field(s, r3) : 0; +- TCGv_i32 t_r1, t_r2, t_r3, type; +- +- switch (s->insn->data) { +- case S390_FEAT_TYPE_KMA: +- if (r3 == r1 || r3 == r2) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- /* FALL THROUGH */ +- case S390_FEAT_TYPE_KMCTR: +- if (r3 & 1 || !r3) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- /* FALL THROUGH */ +- case S390_FEAT_TYPE_PPNO: +- case S390_FEAT_TYPE_KMF: +- case S390_FEAT_TYPE_KMC: +- case S390_FEAT_TYPE_KMO: +- case S390_FEAT_TYPE_KM: +- if (r1 & 1 || !r1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- /* FALL THROUGH */ +- case S390_FEAT_TYPE_KMAC: +- case S390_FEAT_TYPE_KIMD: +- case S390_FEAT_TYPE_KLMD: +- if (r2 & 1 || !r2) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- /* FALL THROUGH */ +- case S390_FEAT_TYPE_PCKMO: +- case S390_FEAT_TYPE_PCC: +- break; +- default: +- g_assert_not_reached(); +- }; +- +- t_r1 = tcg_const_i32(r1); +- t_r2 = tcg_const_i32(r2); +- t_r3 = tcg_const_i32(r3); +- type = tcg_const_i32(s->insn->data); +- gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type); +- set_cc_static(s); +- tcg_temp_free_i32(t_r1); +- tcg_temp_free_i32(t_r2); +- tcg_temp_free_i32(t_r3); +- tcg_temp_free_i32(type); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_keb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_keb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_laa(DisasContext *s, DisasOps *o) +-{ +- /* The real output is indeed the original value in memory; +- recompute the addition for the computation of CC. */ +- tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s), +- s->insn->data | MO_ALIGN); +- /* However, we need to recompute the addition for setting CC. */ +- tcg_gen_add_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lan(DisasContext *s, DisasOps *o) +-{ +- /* The real output is indeed the original value in memory; +- recompute the addition for the computation of CC. */ +- tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s), +- s->insn->data | MO_ALIGN); +- /* However, we need to recompute the operation for setting CC. */ +- tcg_gen_and_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lao(DisasContext *s, DisasOps *o) +-{ +- /* The real output is indeed the original value in memory; +- recompute the addition for the computation of CC. */ +- tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s), +- s->insn->data | MO_ALIGN); +- /* However, we need to recompute the operation for setting CC. */ +- tcg_gen_or_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lax(DisasContext *s, DisasOps *o) +-{ +- /* The real output is indeed the original value in memory; +- recompute the addition for the computation of CC. */ +- tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s), +- s->insn->data | MO_ALIGN); +- /* However, we need to recompute the operation for setting CC. */ +- tcg_gen_xor_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_ldeb(o->out, cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_ledb(o->out, cpu_env, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 m34 = fpinst_extract_m34(s, true, true); +- +- if (!m34) { +- return DISAS_NORETURN; +- } +- gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34); +- tcg_temp_free_i32(m34); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_lxdb(o->out, cpu_env, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_lxeb(o->out, cpu_env, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lde(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_shli_i64(o->out, o->in2, 32); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lat(DisasContext *s, DisasOps *o) +-{ +- TCGLabel *lab = gen_new_label(); +- store_reg32_i64(get_field(s, r1), o->in2); +- /* The value is stored even in case of trap. */ +- tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); +- gen_trap(s); +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) +-{ +- TCGLabel *lab = gen_new_label(); +- tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); +- /* The value is stored even in case of trap. */ +- tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); +- gen_trap(s); +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) +-{ +- TCGLabel *lab = gen_new_label(); +- store_reg32h_i64(get_field(s, r1), o->in2); +- /* The value is stored even in case of trap. */ +- tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); +- gen_trap(s); +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) +-{ +- TCGLabel *lab = gen_new_label(); +- tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); +- /* The value is stored even in case of trap. */ +- tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); +- gen_trap(s); +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) +-{ +- TCGLabel *lab = gen_new_label(); +- tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); +- /* The value is stored even in case of trap. */ +- tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); +- gen_trap(s); +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_loc(DisasContext *s, DisasOps *o) +-{ +- DisasCompare c; +- +- disas_jcc(s, &c, get_field(s, m3)); +- +- if (c.is_64) { +- tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, +- o->in2, o->in1); +- free_compare(&c); +- } else { +- TCGv_i32 t32 = tcg_temp_new_i32(); +- TCGv_i64 t, z; +- +- tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); +- free_compare(&c); +- +- t = tcg_temp_new_i64(); +- tcg_gen_extu_i32_i64(t, t32); +- tcg_temp_free_i32(t32); +- +- z = tcg_const_i64(0); +- tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); +- tcg_temp_free_i64(t); +- tcg_temp_free_i64(z); +- } +- +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_lctl(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ +- return DISAS_PC_STALE_NOCHAIN; +-} +- +-static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_lctlg(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ +- return DISAS_PC_STALE_NOCHAIN; +-} +- +-static DisasJumpType op_lra(DisasContext *s, DisasOps *o) +-{ +- gen_helper_lra(o->out, cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t1, t2; +- +- per_breaking_event(s); +- +- t1 = tcg_temp_new_i64(); +- t2 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), +- MO_TEUL | MO_ALIGN_8); +- tcg_gen_addi_i64(o->in2, o->in2, 4); +- tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); +- /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ +- tcg_gen_shli_i64(t1, t1, 32); +- gen_helper_load_psw(cpu_env, t1, t2); +- tcg_temp_free_i64(t1); +- tcg_temp_free_i64(t2); +- return DISAS_NORETURN; +-} +- +-static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t1, t2; +- +- per_breaking_event(s); +- +- t1 = tcg_temp_new_i64(); +- t2 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), +- MO_TEQ | MO_ALIGN_8); +- tcg_gen_addi_i64(o->in2, o->in2, 8); +- tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); +- gen_helper_load_psw(cpu_env, t1, t2); +- tcg_temp_free_i64(t1); +- tcg_temp_free_i64(t2); +- return DISAS_NORETURN; +-} +-#endif +- +-static DisasJumpType op_lam(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_lam(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i64 t1, t2; +- +- /* Only one register to read. */ +- t1 = tcg_temp_new_i64(); +- if (unlikely(r1 == r3)) { +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- store_reg32_i64(r1, t1); +- tcg_temp_free(t1); +- return DISAS_NEXT; +- } +- +- /* First load the values of the first and last registers to trigger +- possible page faults. */ +- t2 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); +- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); +- store_reg32_i64(r1, t1); +- store_reg32_i64(r3, t2); +- +- /* Only two registers to read. */ +- if (((r1 + 1) & 15) == r3) { +- tcg_temp_free(t2); +- tcg_temp_free(t1); +- return DISAS_NEXT; +- } +- +- /* Then load the remaining registers. Page fault can't occur. */ +- r3 = (r3 - 1) & 15; +- tcg_gen_movi_i64(t2, 4); +- while (r1 != r3) { +- r1 = (r1 + 1) & 15; +- tcg_gen_add_i64(o->in2, o->in2, t2); +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- store_reg32_i64(r1, t1); +- } +- tcg_temp_free(t2); +- tcg_temp_free(t1); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i64 t1, t2; +- +- /* Only one register to read. */ +- t1 = tcg_temp_new_i64(); +- if (unlikely(r1 == r3)) { +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- store_reg32h_i64(r1, t1); +- tcg_temp_free(t1); +- return DISAS_NEXT; +- } +- +- /* First load the values of the first and last registers to trigger +- possible page faults. */ +- t2 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); +- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); +- store_reg32h_i64(r1, t1); +- store_reg32h_i64(r3, t2); +- +- /* Only two registers to read. */ +- if (((r1 + 1) & 15) == r3) { +- tcg_temp_free(t2); +- tcg_temp_free(t1); +- return DISAS_NEXT; +- } +- +- /* Then load the remaining registers. Page fault can't occur. */ +- r3 = (r3 - 1) & 15; +- tcg_gen_movi_i64(t2, 4); +- while (r1 != r3) { +- r1 = (r1 + 1) & 15; +- tcg_gen_add_i64(o->in2, o->in2, t2); +- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); +- store_reg32h_i64(r1, t1); +- } +- tcg_temp_free(t2); +- tcg_temp_free(t1); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i64 t1, t2; +- +- /* Only one register to read. */ +- if (unlikely(r1 == r3)) { +- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); +- return DISAS_NEXT; +- } +- +- /* First load the values of the first and last registers to trigger +- possible page faults. */ +- t1 = tcg_temp_new_i64(); +- t2 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); +- tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); +- tcg_gen_mov_i64(regs[r1], t1); +- tcg_temp_free(t2); +- +- /* Only two registers to read. */ +- if (((r1 + 1) & 15) == r3) { +- tcg_temp_free(t1); +- return DISAS_NEXT; +- } +- +- /* Then load the remaining registers. Page fault can't occur. */ +- r3 = (r3 - 1) & 15; +- tcg_gen_movi_i64(t1, 8); +- while (r1 != r3) { +- r1 = (r1 + 1) & 15; +- tcg_gen_add_i64(o->in2, o->in2, t1); +- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); +- } +- tcg_temp_free(t1); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 a1, a2; +- MemOp mop = s->insn->data; +- +- /* In a parallel context, stop the world and single step. */ +- if (tb_cflags(s->base.tb) & CF_PARALLEL) { +- update_psw_addr(s); +- update_cc_op(s); +- gen_exception(EXCP_ATOMIC); +- return DISAS_NORETURN; +- } +- +- /* In a serial context, perform the two loads ... */ +- a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); +- a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2)); +- tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); +- tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); +- tcg_temp_free_i64(a1); +- tcg_temp_free_i64(a2); +- +- /* ... and indicate that we performed them while interlocked. */ +- gen_op_movi_cc(s, 0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) +-{ +- if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { +- gen_helper_lpq(o->out, cpu_env, o->in2); +- } else if (HAVE_ATOMIC128) { +- gen_helper_lpq_parallel(o->out, cpu_env, o->in2); +- } else { +- gen_helper_exit_atomic(cpu_env); +- return DISAS_NORETURN; +- } +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_lura(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_andi_i64(o->out, o->in2, -256); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) +-{ +- const int64_t block_size = (1ull << (get_field(s, m3) + 6)); +- +- if (get_field(s, m3) > 6) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tcg_gen_ori_i64(o->addr1, o->addr1, -block_size); +- tcg_gen_neg_i64(o->addr1, o->addr1); +- tcg_gen_movi_i64(o->out, 16); +- tcg_gen_umin_i64(o->out, o->out, o->addr1); +- gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mc(DisasContext *s, DisasOps *o) +-{ +-#if !defined(CONFIG_USER_ONLY) +- TCGv_i32 i2; +-#endif +- const uint16_t monitor_class = get_field(s, i2); +- +- if (monitor_class & 0xff00) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +-#if !defined(CONFIG_USER_ONLY) +- i2 = tcg_const_i32(monitor_class); +- gen_helper_monitor_call(cpu_env, o->addr1, i2); +- tcg_temp_free_i32(i2); +-#endif +- /* Defaults to a NOP. */ +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) +-{ +- o->out = o->in2; +- o->g_out = o->g_in2; +- o->in2 = NULL; +- o->g_in2 = false; +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) +-{ +- int b2 = get_field(s, b2); +- TCGv ar1 = tcg_temp_new_i64(); +- +- o->out = o->in2; +- o->g_out = o->g_in2; +- o->in2 = NULL; +- o->g_in2 = false; +- +- switch (s->base.tb->flags & FLAG_MASK_ASC) { +- case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: +- tcg_gen_movi_i64(ar1, 0); +- break; +- case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: +- tcg_gen_movi_i64(ar1, 1); +- break; +- case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: +- if (b2) { +- tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2])); +- } else { +- tcg_gen_movi_i64(ar1, 0); +- } +- break; +- case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: +- tcg_gen_movi_i64(ar1, 2); +- break; +- } +- +- tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); +- tcg_temp_free_i64(ar1); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_movx(DisasContext *s, DisasOps *o) +-{ +- o->out = o->in1; +- o->out2 = o->in2; +- o->g_out = o->g_in1; +- o->g_out2 = o->g_in2; +- o->in1 = NULL; +- o->in2 = NULL; +- o->g_in1 = o->g_in2 = false; +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_mvc(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_mvcin(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r2 = get_field(s, r2); +- TCGv_i32 t1, t2; +- +- /* r1 and r2 must be even. */ +- if (r1 & 1 || r2 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t2 = tcg_const_i32(r2); +- gen_helper_mvcl(cc_op, cpu_env, t1, t2); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i32 t1, t3; +- +- /* r1 and r3 must be even. */ +- if (r1 & 1 || r3 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t3 = tcg_const_i32(r3); +- gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t3); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i32 t1, t3; +- +- /* r1 and r3 must be even. */ +- if (r1 & 1 || r3 & 1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t1 = tcg_const_i32(r1); +- t3 = tcg_const_i32(r3); +- gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t3); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) +-{ +- int r3 = get_field(s, r3); +- gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, l1); +- gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, l1); +- gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_mvn(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_mvo(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 t1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 t2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_mvst(cc_op, cpu_env, t1, t2); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_mvz(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mul(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_mul_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_muls128(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_meeb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_mdb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); +- gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); +- tcg_temp_free_i64(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_madb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 r3 = load_freg(get_field(s, r3)); +- gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); +- tcg_temp_free_i64(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 r3 = load_freg32_i64(get_field(s, r3)); +- gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); +- tcg_temp_free_i64(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 r3 = load_freg(get_field(s, r3)); +- gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); +- tcg_temp_free_i64(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 z, n; +- z = tcg_const_i64(0); +- n = tcg_temp_new_i64(); +- tcg_gen_neg_i64(n, o->in2); +- tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); +- tcg_temp_free_i64(n); +- tcg_temp_free_i64(z); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull); +- tcg_gen_mov_i64(o->out2, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_nc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_neg(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_neg_i64(o->out, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull); +- tcg_gen_mov_i64(o->out2, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_oc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_or(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_or_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ori(DisasContext *s, DisasOps *o) +-{ +- int shift = s->insn->data & 0xff; +- int size = s->insn->data >> 8; +- uint64_t mask = ((1ull << size) - 1) << shift; +- +- assert(!o->g_in2); +- tcg_gen_shli_i64(o->in2, o->in2, shift); +- tcg_gen_or_i64(o->out, o->in1, o->in2); +- +- /* Produce the CC from only the bits manipulated. */ +- tcg_gen_andi_i64(cc_dst, o->out, mask); +- set_cc_nz_u64(s, cc_dst); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_oi(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); +- } else { +- /* Perform the atomic operation in memory. */ +- tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s), +- s->insn->data); +- } +- +- /* Recompute also for atomic case: needed for setting CC. */ +- tcg_gen_or_i64(o->out, o->in1, o->in2); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pack(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_pack(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pka(DisasContext *s, DisasOps *o) +-{ +- int l2 = get_field(s, l2) + 1; +- TCGv_i32 l; +- +- /* The length must not exceed 32 bytes. */ +- if (l2 > 32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- l = tcg_const_i32(l2); +- gen_helper_pka(cpu_env, o->addr1, o->in2, l); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pku(DisasContext *s, DisasOps *o) +-{ +- int l2 = get_field(s, l2) + 1; +- TCGv_i32 l; +- +- /* The length must be even and should not exceed 64 bytes. */ +- if ((l2 & 1) || (l2 > 64)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- l = tcg_const_i32(l2); +- gen_helper_pku(cpu_env, o->addr1, o->in2, l); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) +-{ +- gen_helper_popcnt(o->out, o->in2); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_ptlb(cpu_env); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) +-{ +- int i3 = get_field(s, i3); +- int i4 = get_field(s, i4); +- int i5 = get_field(s, i5); +- int do_zero = i4 & 0x80; +- uint64_t mask, imask, pmask; +- int pos, len, rot; +- +- /* Adjust the arguments for the specific insn. */ +- switch (s->fields.op2) { +- case 0x55: /* risbg */ +- case 0x59: /* risbgn */ +- i3 &= 63; +- i4 &= 63; +- pmask = ~0; +- break; +- case 0x5d: /* risbhg */ +- i3 &= 31; +- i4 &= 31; +- pmask = 0xffffffff00000000ull; +- break; +- case 0x51: /* risblg */ +- i3 = (i3 & 31) + 32; +- i4 = (i4 & 31) + 32; +- pmask = 0x00000000ffffffffull; +- break; +- default: +- g_assert_not_reached(); +- } +- +- /* MASK is the set of bits to be inserted from R2. */ +- if (i3 <= i4) { +- /* [0...i3---i4...63] */ +- mask = (-1ull >> i3) & (-1ull << (63 - i4)); +- } else { +- /* [0---i4...i3---63] */ +- mask = (-1ull >> i3) | (-1ull << (63 - i4)); +- } +- /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */ +- mask &= pmask; +- +- /* IMASK is the set of bits to be kept from R1. In the case of the high/low +- insns, we need to keep the other half of the register. */ +- imask = ~mask | ~pmask; +- if (do_zero) { +- imask = ~pmask; +- } +- +- len = i4 - i3 + 1; +- pos = 63 - i4; +- rot = i5 & 63; +- +- /* In some cases we can implement this with extract. */ +- if (imask == 0 && pos == 0 && len > 0 && len <= rot) { +- tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len); +- return DISAS_NEXT; +- } +- +- /* In some cases we can implement this with deposit. */ +- if (len > 0 && (imask == 0 || ~mask == imask)) { +- /* Note that we rotate the bits to be inserted to the lsb, not to +- the position as described in the PoO. */ +- rot = (rot - pos) & 63; +- } else { +- pos = -1; +- } +- +- /* Rotate the input as necessary. */ +- tcg_gen_rotli_i64(o->in2, o->in2, rot); +- +- /* Insert the selected bits into the output. */ +- if (pos >= 0) { +- if (imask == 0) { +- tcg_gen_deposit_z_i64(o->out, o->in2, pos, len); +- } else { +- tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len); +- } +- } else if (imask == 0) { +- tcg_gen_andi_i64(o->out, o->in2, mask); +- } else { +- tcg_gen_andi_i64(o->in2, o->in2, mask); +- tcg_gen_andi_i64(o->out, o->out, imask); +- tcg_gen_or_i64(o->out, o->out, o->in2); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) +-{ +- int i3 = get_field(s, i3); +- int i4 = get_field(s, i4); +- int i5 = get_field(s, i5); +- uint64_t mask; +- +- /* If this is a test-only form, arrange to discard the result. */ +- if (i3 & 0x80) { +- o->out = tcg_temp_new_i64(); +- o->g_out = false; +- } +- +- i3 &= 63; +- i4 &= 63; +- i5 &= 63; +- +- /* MASK is the set of bits to be operated on from R2. +- Take care for I3/I4 wraparound. */ +- mask = ~0ull >> i3; +- if (i3 <= i4) { +- mask ^= ~0ull >> i4 >> 1; +- } else { +- mask |= ~(~0ull >> i4 >> 1); +- } +- +- /* Rotate the input as necessary. */ +- tcg_gen_rotli_i64(o->in2, o->in2, i5); +- +- /* Operate. */ +- switch (s->fields.op2) { +- case 0x54: /* AND */ +- tcg_gen_ori_i64(o->in2, o->in2, ~mask); +- tcg_gen_and_i64(o->out, o->out, o->in2); +- break; +- case 0x56: /* OR */ +- tcg_gen_andi_i64(o->in2, o->in2, mask); +- tcg_gen_or_i64(o->out, o->out, o->in2); +- break; +- case 0x57: /* XOR */ +- tcg_gen_andi_i64(o->in2, o->in2, mask); +- tcg_gen_xor_i64(o->out, o->out, o->in2); +- break; +- default: +- abort(); +- } +- +- /* Set the CC. */ +- tcg_gen_andi_i64(cc_dst, o->out, mask); +- set_cc_nz_u64(s, cc_dst); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_bswap64_i64(o->out, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 t1 = tcg_temp_new_i32(); +- TCGv_i32 t2 = tcg_temp_new_i32(); +- TCGv_i32 to = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(t1, o->in1); +- tcg_gen_extrl_i64_i32(t2, o->in2); +- tcg_gen_rotl_i32(to, t1, t2); +- tcg_gen_extu_i32_i64(o->out, to); +- tcg_temp_free_i32(t1); +- tcg_temp_free_i32(t2); +- tcg_temp_free_i32(to); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_rotl_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) +-{ +- gen_helper_rrbe(cc_op, cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sacf(cpu_env, o->in2); +- /* Addressing mode has changed, so end the block. */ +- return DISAS_PC_STALE; +-} +-#endif +- +-static DisasJumpType op_sam(DisasContext *s, DisasOps *o) +-{ +- int sam = s->insn->data; +- TCGv_i64 tsam; +- uint64_t mask; +- +- switch (sam) { +- case 0: +- mask = 0xffffff; +- break; +- case 1: +- mask = 0x7fffffff; +- break; +- default: +- mask = -1; +- break; +- } +- +- /* Bizarre but true, we check the address of the current insn for the +- specification exception, not the next to be executed. Thus the PoO +- documents that Bad Things Happen two bytes before the end. */ +- if (s->base.pc_next & ~mask) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- s->pc_tmp &= mask; +- +- tsam = tcg_const_i64(sam); +- tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2); +- tcg_temp_free_i64(tsam); +- +- /* Always exit the TB, since we (may have) changed execution mode. */ +- return DISAS_PC_STALE; +-} +- +-static DisasJumpType op_sar(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1])); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_seb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_seb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sdb(o->out, cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sqeb(o->out, cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sqdb(o->out, cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2); +- return_low128(o->out2); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_servc(DisasContext *s, DisasOps *o) +-{ +- gen_helper_servc(cc_op, cpu_env, o->in2, o->in1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3); +- set_cc_static(s); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_soc(DisasContext *s, DisasOps *o) +-{ +- DisasCompare c; +- TCGv_i64 a, h; +- TCGLabel *lab; +- int r1; +- +- disas_jcc(s, &c, get_field(s, m3)); +- +- /* We want to store when the condition is fulfilled, so branch +- out when it's not */ +- c.cond = tcg_invert_cond(c.cond); +- +- lab = gen_new_label(); +- if (c.is_64) { +- tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab); +- } else { +- tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); +- } +- free_compare(&c); +- +- r1 = get_field(s, r1); +- a = get_address(s, 0, get_field(s, b2), get_field(s, d2)); +- switch (s->insn->data) { +- case 1: /* STOCG */ +- tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); +- break; +- case 0: /* STOC */ +- tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); +- break; +- case 2: /* STOCFH */ +- h = tcg_temp_new_i64(); +- tcg_gen_shri_i64(h, regs[r1], 32); +- tcg_gen_qemu_st32(h, a, get_mem_index(s)); +- tcg_temp_free_i64(h); +- break; +- default: +- g_assert_not_reached(); +- } +- tcg_temp_free_i64(a); +- +- gen_set_label(lab); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sla(DisasContext *s, DisasOps *o) +-{ +- uint64_t sign = 1ull << s->insn->data; +- enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; +- gen_op_update2_cc_i64(s, cco, o->in1, o->in2); +- tcg_gen_shl_i64(o->out, o->in1, o->in2); +- /* The arithmetic left shift is curious in that it does not affect +- the sign bit. Copy that over from the source unchanged. */ +- tcg_gen_andi_i64(o->out, o->out, ~sign); +- tcg_gen_andi_i64(o->in1, o->in1, sign); +- tcg_gen_or_i64(o->out, o->out, o->in1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sll(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_shl_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sra(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_sar_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srl(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_shr_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sfpc(cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sfas(cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) +-{ +- /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ +- tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull); +- gen_helper_srnm(cpu_env, o->addr1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) +-{ +- /* Bits 0-55 are are ignored. */ +- tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull); +- gen_helper_srnm(cpu_env, o->addr1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 tmp = tcg_temp_new_i64(); +- +- /* Bits other than 61-63 are ignored. */ +- tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull); +- +- /* No need to call a helper, we don't implement dfp */ +- tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); +- tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); +- tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); +- +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_spm(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_extrl_i64_i32(cc_op, o->in1); +- tcg_gen_extract_i32(cc_op, cc_op, 28, 2); +- set_cc_static(s); +- +- tcg_gen_shri_i64(o->in1, o->in1, 24); +- tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) +-{ +- int b1 = get_field(s, b1); +- int d1 = get_field(s, d1); +- int b2 = get_field(s, b2); +- int d2 = get_field(s, d2); +- int r3 = get_field(s, r3); +- TCGv_i64 tmp = tcg_temp_new_i64(); +- +- /* fetch all operands first */ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_addi_i64(o->in1, regs[b1], d1); +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_addi_i64(o->in2, regs[b2], d2); +- o->addr1 = tcg_temp_new_i64(); +- gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0); +- +- /* load the third operand into r3 before modifying anything */ +- tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); +- +- /* subtract CPU timer from first operand and store in GR0 */ +- gen_helper_stpt(tmp, cpu_env); +- tcg_gen_sub_i64(regs[0], o->in1, tmp); +- +- /* store second operand in GR1 */ +- tcg_gen_mov_i64(regs[1], o->in2); +- +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_spka(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_shri_i64(o->in2, o->in2, 4); +- tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sske(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sske(cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); +- /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ +- return DISAS_PC_STALE_NOCHAIN; +-} +- +-static DisasJumpType op_stap(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id)); +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_stck(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stck(o->out, cpu_env); +- /* ??? We don't implement clock states. */ +- gen_op_movi_cc(s, 0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 c1 = tcg_temp_new_i64(); +- TCGv_i64 c2 = tcg_temp_new_i64(); +- TCGv_i64 todpr = tcg_temp_new_i64(); +- gen_helper_stck(c1, cpu_env); +- /* 16 bit value store in an uint32_t (only valid bits set) */ +- tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr)); +- /* Shift the 64-bit value into its place as a zero-extended +- 104-bit value. Note that "bit positions 64-103 are always +- non-zero so that they compare differently to STCK"; we set +- the least significant bit to 1. */ +- tcg_gen_shli_i64(c2, c1, 56); +- tcg_gen_shri_i64(c1, c1, 8); +- tcg_gen_ori_i64(c2, c2, 0x10000); +- tcg_gen_or_i64(c2, c2, todpr); +- tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(o->in2, o->in2, 8); +- tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); +- tcg_temp_free_i64(c1); +- tcg_temp_free_i64(c2); +- tcg_temp_free_i64(todpr); +- /* ??? We don't implement clock states. */ +- gen_op_movi_cc(s, 0); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_sck(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); +- gen_helper_sck(cc_op, cpu_env, o->in1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sckc(cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sckpf(cpu_env, regs[0]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stckc(o->out, cpu_env); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_stctg(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_stctl(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_spt(DisasContext *s, DisasOps *o) +-{ +- gen_helper_spt(cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stfl(cpu_env); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stpt(o->out, cpu_env); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_spx(DisasContext *s, DisasOps *o) +-{ +- gen_helper_spx(cpu_env, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_xsch(cpu_env, regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_csch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_csch(cpu_env, regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_hsch(cpu_env, regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_msch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_msch(cpu_env, regs[1], o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) +-{ +- gen_helper_rchp(cpu_env, regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_rsch(cpu_env, regs[1]); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sal(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sal(cpu_env, regs[1]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_schm(DisasContext *s, DisasOps *o) +-{ +- gen_helper_schm(cpu_env, regs[1], regs[2], o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_siga(DisasContext *s, DisasOps *o) +-{ +- /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ +- gen_op_movi_cc(s, 3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) +-{ +- /* The instruction is suppressed if not provided. */ +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_ssch(cpu_env, regs[1], o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stsch(cpu_env, regs[1], o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stcrw(cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tpi(cc_op, cpu_env, o->addr1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tsch(cpu_env, regs[1], o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) +-{ +- gen_helper_chsc(cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa)); +- tcg_gen_andi_i64(o->out, o->out, 0x7fffe000); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) +-{ +- uint64_t i2 = get_field(s, i2); +- TCGv_i64 t; +- +- /* It is important to do what the instruction name says: STORE THEN. +- If we let the output hook perform the store then if we fault and +- restart, we'll have the wrong SYSTEM MASK in place. */ +- t = tcg_temp_new_i64(); +- tcg_gen_shri_i64(t, psw_mask, 56); +- tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); +- tcg_temp_free_i64(t); +- +- if (s->fields.op == 0xac) { +- tcg_gen_andi_i64(psw_mask, psw_mask, +- (i2 << 56) | 0x00ffffffffffffffull); +- } else { +- tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56); +- } +- +- /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ +- return DISAS_PC_STALE_NOCHAIN; +-} +- +-static DisasJumpType op_stura(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data); +- +- if (s->base.tb->flags & FLAG_MASK_PER) { +- update_psw_addr(s); +- gen_helper_per_store_real(cpu_env); +- } +- return DISAS_NEXT; +-} +-#endif +- +-static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) +-{ +- gen_helper_stfle(cc_op, cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_st8(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_st16(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_st32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_st64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stam(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- gen_helper_stam(cpu_env, r1, o->in2, r3); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) +-{ +- int m3 = get_field(s, m3); +- int pos, base = s->insn->data; +- TCGv_i64 tmp = tcg_temp_new_i64(); +- +- pos = base + ctz32(m3) * 8; +- switch (m3) { +- case 0xf: +- /* Effectively a 32-bit store. */ +- tcg_gen_shri_i64(tmp, o->in1, pos); +- tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); +- break; +- +- case 0xc: +- case 0x6: +- case 0x3: +- /* Effectively a 16-bit store. */ +- tcg_gen_shri_i64(tmp, o->in1, pos); +- tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); +- break; +- +- case 0x8: +- case 0x4: +- case 0x2: +- case 0x1: +- /* Effectively an 8-bit store. */ +- tcg_gen_shri_i64(tmp, o->in1, pos); +- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); +- break; +- +- default: +- /* This is going to be a sequence of shifts and stores. */ +- pos = base + 32 - 8; +- while (m3) { +- if (m3 & 0x8) { +- tcg_gen_shri_i64(tmp, o->in1, pos); +- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); +- tcg_gen_addi_i64(o->in2, o->in2, 1); +- } +- m3 = (m3 << 1) & 0xf; +- pos -= 8; +- } +- break; +- } +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stm(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- int size = s->insn->data; +- TCGv_i64 tsize = tcg_const_i64(size); +- +- while (1) { +- if (size == 8) { +- tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); +- } else { +- tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s)); +- } +- if (r1 == r3) { +- break; +- } +- tcg_gen_add_i64(o->in2, o->in2, tsize); +- r1 = (r1 + 1) & 15; +- } +- +- tcg_temp_free_i64(tsize); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- int r3 = get_field(s, r3); +- TCGv_i64 t = tcg_temp_new_i64(); +- TCGv_i64 t4 = tcg_const_i64(4); +- TCGv_i64 t32 = tcg_const_i64(32); +- +- while (1) { +- tcg_gen_shl_i64(t, regs[r1], t32); +- tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); +- if (r1 == r3) { +- break; +- } +- tcg_gen_add_i64(o->in2, o->in2, t4); +- r1 = (r1 + 1) & 15; +- } +- +- tcg_temp_free_i64(t); +- tcg_temp_free_i64(t4); +- tcg_temp_free_i64(t32); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) +-{ +- if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { +- gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); +- } else if (HAVE_ATOMIC128) { +- gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); +- } else { +- gen_helper_exit_atomic(cpu_env); +- return DISAS_NORETURN; +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srst(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_srst(cpu_env, r1, r2); +- +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_srstu(cpu_env, r1, r2); +- +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sub(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_sub_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_subu64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_movi_i64(cc_src, 0); +- tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src); +- return DISAS_NEXT; +-} +- +-/* Compute borrow (0, -1) into cc_src. */ +-static void compute_borrow(DisasContext *s) +-{ +- switch (s->cc_op) { +- case CC_OP_SUBU: +- /* The borrow value is already in cc_src (0,-1). */ +- break; +- default: +- gen_op_calc_cc(s); +- /* fall through */ +- case CC_OP_STATIC: +- /* The carry flag is the msb of CC; compute into cc_src. */ +- tcg_gen_extu_i32_i64(cc_src, cc_op); +- tcg_gen_shri_i64(cc_src, cc_src, 1); +- /* fall through */ +- case CC_OP_ADDU: +- /* Convert carry (1,0) to borrow (0,-1). */ +- tcg_gen_subi_i64(cc_src, cc_src, 1); +- break; +- } +-} +- +-static DisasJumpType op_subb32(DisasContext *s, DisasOps *o) +-{ +- compute_borrow(s); +- +- /* Borrow is {0, -1}, so add to subtract. */ +- tcg_gen_add_i64(o->out, o->in1, cc_src); +- tcg_gen_sub_i64(o->out, o->out, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_subb64(DisasContext *s, DisasOps *o) +-{ +- compute_borrow(s); +- +- /* +- * Borrow is {0, -1}, so add to subtract; replicate the +- * borrow input to produce 128-bit -1 for the addition. +- */ +- TCGv_i64 zero = tcg_const_i64(0); +- tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src); +- tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero); +- tcg_temp_free_i64(zero); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_svc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 t; +- +- update_psw_addr(s); +- update_cc_op(s); +- +- t = tcg_const_i32(get_field(s, i1) & 0xff); +- tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code)); +- tcg_temp_free_i32(t); +- +- t = tcg_const_i32(s->ilen); +- tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen)); +- tcg_temp_free_i32(t); +- +- gen_exception(EXCP_SVC); +- return DISAS_NORETURN; +-} +- +-static DisasJumpType op_tam(DisasContext *s, DisasOps *o) +-{ +- int cc = 0; +- +- cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; +- cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; +- gen_op_movi_cc(s, cc); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +- +-static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) +-{ +- gen_helper_testblock(cc_op, cpu_env, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-#endif +- +-static DisasJumpType op_tp(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1); +- gen_helper_tp(cc_op, cpu_env, o->addr1, l1); +- tcg_temp_free_i32(l1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tr(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_tr(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_tre(DisasContext *s, DisasOps *o) +-{ +- gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2); +- return_low128(o->out2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_trt(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3); +- TCGv_i32 tst = tcg_temp_new_i32(); +- int m3 = get_field(s, m3); +- +- if (!s390_has_feat(S390_FEAT_ETF2_ENH)) { +- m3 = 0; +- } +- if (m3 & 1) { +- tcg_gen_movi_i32(tst, -1); +- } else { +- tcg_gen_extrl_i64_i32(tst, regs[0]); +- if (s->insn->opc & 3) { +- tcg_gen_ext8u_i32(tst, tst); +- } else { +- tcg_gen_ext16u_i32(tst, tst); +- } +- } +- gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes); +- +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- tcg_temp_free_i32(sizes); +- tcg_temp_free_i32(tst); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ts(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 t1 = tcg_const_i32(0xff); +- tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB); +- tcg_gen_extract_i32(cc_op, t1, 7, 1); +- tcg_temp_free_i32(t1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 l = tcg_const_i32(get_field(s, l1)); +- gen_helper_unpk(cpu_env, l, o->addr1, o->in2); +- tcg_temp_free_i32(l); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) +-{ +- int l1 = get_field(s, l1) + 1; +- TCGv_i32 l; +- +- /* The length must not exceed 32 bytes. */ +- if (l1 > 32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- l = tcg_const_i32(l1); +- gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) +-{ +- int l1 = get_field(s, l1) + 1; +- TCGv_i32 l; +- +- /* The length must be even and should not exceed 64 bytes. */ +- if ((l1 & 1) || (l1 > 64)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- l = tcg_const_i32(l1); +- gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2); +- tcg_temp_free_i32(l); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +- +-static DisasJumpType op_xc(DisasContext *s, DisasOps *o) +-{ +- int d1 = get_field(s, d1); +- int d2 = get_field(s, d2); +- int b1 = get_field(s, b1); +- int b2 = get_field(s, b2); +- int l = get_field(s, l1); +- TCGv_i32 t32; +- +- o->addr1 = get_address(s, 0, b1, d1); +- +- /* If the addresses are identical, this is a store/memset of zero. */ +- if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { +- o->in2 = tcg_const_i64(0); +- +- l++; +- while (l >= 8) { +- tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); +- l -= 8; +- if (l > 0) { +- tcg_gen_addi_i64(o->addr1, o->addr1, 8); +- } +- } +- if (l >= 4) { +- tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); +- l -= 4; +- if (l > 0) { +- tcg_gen_addi_i64(o->addr1, o->addr1, 4); +- } +- } +- if (l >= 2) { +- tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); +- l -= 2; +- if (l > 0) { +- tcg_gen_addi_i64(o->addr1, o->addr1, 2); +- } +- } +- if (l) { +- tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); +- } +- gen_op_movi_cc(s, 0); +- return DISAS_NEXT; +- } +- +- /* But in general we'll defer to a helper. */ +- o->in2 = get_address(s, 0, b2, d2); +- t32 = tcg_const_i32(l); +- gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2); +- tcg_temp_free_i32(t32); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_xor(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_xor_i64(o->out, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_xori(DisasContext *s, DisasOps *o) +-{ +- int shift = s->insn->data & 0xff; +- int size = s->insn->data >> 8; +- uint64_t mask = ((1ull << size) - 1) << shift; +- +- assert(!o->g_in2); +- tcg_gen_shli_i64(o->in2, o->in2, shift); +- tcg_gen_xor_i64(o->out, o->in1, o->in2); +- +- /* Produce the CC from only the bits manipulated. */ +- tcg_gen_andi_i64(cc_dst, o->out, mask); +- set_cc_nz_u64(s, cc_dst); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_xi(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); +- } else { +- /* Perform the atomic operation in memory. */ +- tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s), +- s->insn->data); +- } +- +- /* Recompute also for atomic case: needed for setting CC. */ +- tcg_gen_xor_i64(o->out, o->in1, o->in2); +- +- if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_zero(DisasContext *s, DisasOps *o) +-{ +- o->out = tcg_const_i64(0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) +-{ +- o->out = tcg_const_i64(0); +- o->out2 = o->out; +- o->g_out2 = true; +- return DISAS_NEXT; +-} +- +-#ifndef CONFIG_USER_ONLY +-static DisasJumpType op_clp(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_clp(cpu_env, r2); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_pcilg(cpu_env, r1, r2); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_pcistg(cpu_env, r1, r2); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); +- +- gen_helper_stpcifc(cpu_env, r1, o->addr1, ar); +- tcg_temp_free_i32(ar); +- tcg_temp_free_i32(r1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_sic(DisasContext *s, DisasOps *o) +-{ +- gen_helper_sic(cpu_env, o->in1, o->in2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r2 = tcg_const_i32(get_field(s, r2)); +- +- gen_helper_rpcit(cpu_env, r1, r2); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r2); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 r3 = tcg_const_i32(get_field(s, r3)); +- TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); +- +- gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar); +- tcg_temp_free_i32(ar); +- tcg_temp_free_i32(r1); +- tcg_temp_free_i32(r3); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 r1 = tcg_const_i32(get_field(s, r1)); +- TCGv_i32 ar = tcg_const_i32(get_field(s, b2)); +- +- gen_helper_mpcifc(cpu_env, r1, o->addr1, ar); +- tcg_temp_free_i32(ar); +- tcg_temp_free_i32(r1); +- set_cc_static(s); +- return DISAS_NEXT; +-} +-#endif +- +-#include "translate_vx.c.inc" +- +-/* ====================================================================== */ +-/* The "Cc OUTput" generators. Given the generated output (and in some cases +- the original inputs), update the various cc data structures in order to +- be able to compute the new condition code. */ +- +-static void cout_abs32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); +-} +- +-static void cout_abs64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); +-} +- +-static void cout_adds32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); +-} +- +-static void cout_adds64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); +-} +- +-static void cout_addu32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_shri_i64(cc_src, o->out, 32); +- tcg_gen_ext32u_i64(cc_dst, o->out); +- gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst); +-} +- +-static void cout_addu64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out); +-} +- +-static void cout_cmps32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); +-} +- +-static void cout_cmps64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); +-} +- +-static void cout_cmpu32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); +-} +- +-static void cout_cmpu64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); +-} +- +-static void cout_f32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); +-} +- +-static void cout_f64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); +-} +- +-static void cout_f128(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); +-} +- +-static void cout_nabs32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); +-} +- +-static void cout_nabs64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); +-} +- +-static void cout_neg32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); +-} +- +-static void cout_neg64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); +-} +- +-static void cout_nz32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_ext32u_i64(cc_dst, o->out); +- gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); +-} +- +-static void cout_nz64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); +-} +- +-static void cout_s32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); +-} +- +-static void cout_s64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); +-} +- +-static void cout_subs32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); +-} +- +-static void cout_subs64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); +-} +- +-static void cout_subu32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_sari_i64(cc_src, o->out, 32); +- tcg_gen_ext32u_i64(cc_dst, o->out); +- gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst); +-} +- +-static void cout_subu64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out); +-} +- +-static void cout_tm32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); +-} +- +-static void cout_tm64(DisasContext *s, DisasOps *o) +-{ +- gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); +-} +- +-static void cout_muls32(DisasContext *s, DisasOps *o) +-{ +- gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out); +-} +- +-static void cout_muls64(DisasContext *s, DisasOps *o) +-{ +- /* out contains "high" part, out2 contains "low" part of 128 bit result */ +- gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2); +-} +- +-/* ====================================================================== */ +-/* The "PREParation" generators. These initialize the DisasOps.OUT fields +- with the TCG register to which we will write. Used in combination with +- the "wout" generators, in some cases we need a new temporary, and in +- some cases we can write to a TCG global. */ +- +-static void prep_new(DisasContext *s, DisasOps *o) +-{ +- o->out = tcg_temp_new_i64(); +-} +-#define SPEC_prep_new 0 +- +-static void prep_new_P(DisasContext *s, DisasOps *o) +-{ +- o->out = tcg_temp_new_i64(); +- o->out2 = tcg_temp_new_i64(); +-} +-#define SPEC_prep_new_P 0 +- +-static void prep_r1(DisasContext *s, DisasOps *o) +-{ +- o->out = regs[get_field(s, r1)]; +- o->g_out = true; +-} +-#define SPEC_prep_r1 0 +- +-static void prep_r1_P(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- o->out = regs[r1]; +- o->out2 = regs[r1 + 1]; +- o->g_out = o->g_out2 = true; +-} +-#define SPEC_prep_r1_P SPEC_r1_even +- +-/* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ +-static void prep_x1(DisasContext *s, DisasOps *o) +-{ +- o->out = load_freg(get_field(s, r1)); +- o->out2 = load_freg(get_field(s, r1) + 2); +-} +-#define SPEC_prep_x1 SPEC_r1_f128 +- +-/* ====================================================================== */ +-/* The "Write OUTput" generators. These generally perform some non-trivial +- copy of data to TCG globals, or to main memory. The trivial cases are +- generally handled by having a "prep" generator install the TCG global +- as the destination of the operation. */ +- +-static void wout_r1(DisasContext *s, DisasOps *o) +-{ +- store_reg(get_field(s, r1), o->out); +-} +-#define SPEC_wout_r1 0 +- +-static void wout_out2_r1(DisasContext *s, DisasOps *o) +-{ +- store_reg(get_field(s, r1), o->out2); +-} +-#define SPEC_wout_out2_r1 0 +- +-static void wout_r1_8(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8); +-} +-#define SPEC_wout_r1_8 0 +- +-static void wout_r1_16(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16); +-} +-#define SPEC_wout_r1_16 0 +- +-static void wout_r1_32(DisasContext *s, DisasOps *o) +-{ +- store_reg32_i64(get_field(s, r1), o->out); +-} +-#define SPEC_wout_r1_32 0 +- +-static void wout_r1_32h(DisasContext *s, DisasOps *o) +-{ +- store_reg32h_i64(get_field(s, r1), o->out); +-} +-#define SPEC_wout_r1_32h 0 +- +-static void wout_r1_P32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- store_reg32_i64(r1, o->out); +- store_reg32_i64(r1 + 1, o->out2); +-} +-#define SPEC_wout_r1_P32 SPEC_r1_even +- +-static void wout_r1_D32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- store_reg32_i64(r1 + 1, o->out); +- tcg_gen_shri_i64(o->out, o->out, 32); +- store_reg32_i64(r1, o->out); +-} +-#define SPEC_wout_r1_D32 SPEC_r1_even +- +-static void wout_r3_P32(DisasContext *s, DisasOps *o) +-{ +- int r3 = get_field(s, r3); +- store_reg32_i64(r3, o->out); +- store_reg32_i64(r3 + 1, o->out2); +-} +-#define SPEC_wout_r3_P32 SPEC_r3_even +- +-static void wout_r3_P64(DisasContext *s, DisasOps *o) +-{ +- int r3 = get_field(s, r3); +- store_reg(r3, o->out); +- store_reg(r3 + 1, o->out2); +-} +-#define SPEC_wout_r3_P64 SPEC_r3_even +- +-static void wout_e1(DisasContext *s, DisasOps *o) +-{ +- store_freg32_i64(get_field(s, r1), o->out); +-} +-#define SPEC_wout_e1 0 +- +-static void wout_f1(DisasContext *s, DisasOps *o) +-{ +- store_freg(get_field(s, r1), o->out); +-} +-#define SPEC_wout_f1 0 +- +-static void wout_x1(DisasContext *s, DisasOps *o) +-{ +- int f1 = get_field(s, r1); +- store_freg(f1, o->out); +- store_freg(f1 + 2, o->out2); +-} +-#define SPEC_wout_x1 SPEC_r1_f128 +- +-static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, r1) != get_field(s, r2)) { +- store_reg32_i64(get_field(s, r1), o->out); +- } +-} +-#define SPEC_wout_cond_r1r2_32 0 +- +-static void wout_cond_e1e2(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, r1) != get_field(s, r2)) { +- store_freg32_i64(get_field(s, r1), o->out); +- } +-} +-#define SPEC_wout_cond_e1e2 0 +- +-static void wout_m1_8(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); +-} +-#define SPEC_wout_m1_8 0 +- +-static void wout_m1_16(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); +-} +-#define SPEC_wout_m1_16 0 +- +-#ifndef CONFIG_USER_ONLY +-static void wout_m1_16a(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); +-} +-#define SPEC_wout_m1_16a 0 +-#endif +- +-static void wout_m1_32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); +-} +-#define SPEC_wout_m1_32 0 +- +-#ifndef CONFIG_USER_ONLY +-static void wout_m1_32a(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); +-} +-#define SPEC_wout_m1_32a 0 +-#endif +- +-static void wout_m1_64(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); +-} +-#define SPEC_wout_m1_64 0 +- +-#ifndef CONFIG_USER_ONLY +-static void wout_m1_64a(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); +-} +-#define SPEC_wout_m1_64a 0 +-#endif +- +-static void wout_m2_32(DisasContext *s, DisasOps *o) +-{ +- tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); +-} +-#define SPEC_wout_m2_32 0 +- +-static void wout_in2_r1(DisasContext *s, DisasOps *o) +-{ +- store_reg(get_field(s, r1), o->in2); +-} +-#define SPEC_wout_in2_r1 0 +- +-static void wout_in2_r1_32(DisasContext *s, DisasOps *o) +-{ +- store_reg32_i64(get_field(s, r1), o->in2); +-} +-#define SPEC_wout_in2_r1_32 0 +- +-/* ====================================================================== */ +-/* The "INput 1" generators. These load the first operand to an insn. */ +- +-static void in1_r1(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_reg(get_field(s, r1)); +-} +-#define SPEC_in1_r1 0 +- +-static void in1_r1_o(DisasContext *s, DisasOps *o) +-{ +- o->in1 = regs[get_field(s, r1)]; +- o->g_in1 = true; +-} +-#define SPEC_in1_r1_o 0 +- +-static void in1_r1_32s(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]); +-} +-#define SPEC_in1_r1_32s 0 +- +-static void in1_r1_32u(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]); +-} +-#define SPEC_in1_r1_32u 0 +- +-static void in1_r1_sr32(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32); +-} +-#define SPEC_in1_r1_sr32 0 +- +-static void in1_r1p1(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_reg(get_field(s, r1) + 1); +-} +-#define SPEC_in1_r1p1 SPEC_r1_even +- +-static void in1_r1p1_o(DisasContext *s, DisasOps *o) +-{ +- o->in1 = regs[get_field(s, r1) + 1]; +- o->g_in1 = true; +-} +-#define SPEC_in1_r1p1_o SPEC_r1_even +- +-static void in1_r1p1_32s(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]); +-} +-#define SPEC_in1_r1p1_32s SPEC_r1_even +- +-static void in1_r1p1_32u(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]); +-} +-#define SPEC_in1_r1p1_32u SPEC_r1_even +- +-static void in1_r1_D32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]); +-} +-#define SPEC_in1_r1_D32 SPEC_r1_even +- +-static void in1_r2(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_reg(get_field(s, r2)); +-} +-#define SPEC_in1_r2 0 +- +-static void in1_r2_sr32(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32); +-} +-#define SPEC_in1_r2_sr32 0 +- +-static void in1_r2_32u(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]); +-} +-#define SPEC_in1_r2_32u 0 +- +-static void in1_r3(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_reg(get_field(s, r3)); +-} +-#define SPEC_in1_r3 0 +- +-static void in1_r3_o(DisasContext *s, DisasOps *o) +-{ +- o->in1 = regs[get_field(s, r3)]; +- o->g_in1 = true; +-} +-#define SPEC_in1_r3_o 0 +- +-static void in1_r3_32s(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]); +-} +-#define SPEC_in1_r3_32s 0 +- +-static void in1_r3_32u(DisasContext *s, DisasOps *o) +-{ +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]); +-} +-#define SPEC_in1_r3_32u 0 +- +-static void in1_r3_D32(DisasContext *s, DisasOps *o) +-{ +- int r3 = get_field(s, r3); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]); +-} +-#define SPEC_in1_r3_D32 SPEC_r3_even +- +-static void in1_e1(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_freg32_i64(get_field(s, r1)); +-} +-#define SPEC_in1_e1 0 +- +-static void in1_f1(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_freg(get_field(s, r1)); +-} +-#define SPEC_in1_f1 0 +- +-/* Load the high double word of an extended (128-bit) format FP number */ +-static void in1_x2h(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_freg(get_field(s, r2)); +-} +-#define SPEC_in1_x2h SPEC_r2_f128 +- +-static void in1_f3(DisasContext *s, DisasOps *o) +-{ +- o->in1 = load_freg(get_field(s, r3)); +-} +-#define SPEC_in1_f3 0 +- +-static void in1_la1(DisasContext *s, DisasOps *o) +-{ +- o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1)); +-} +-#define SPEC_in1_la1 0 +- +-static void in1_la2(DisasContext *s, DisasOps *o) +-{ +- int x2 = have_field(s, x2) ? get_field(s, x2) : 0; +- o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); +-} +-#define SPEC_in1_la2 0 +- +-static void in1_m1_8u(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_8u 0 +- +-static void in1_m1_16s(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_16s 0 +- +-static void in1_m1_16u(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_16u 0 +- +-static void in1_m1_32s(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_32s 0 +- +-static void in1_m1_32u(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_32u 0 +- +-static void in1_m1_64(DisasContext *s, DisasOps *o) +-{ +- in1_la1(s, o); +- o->in1 = tcg_temp_new_i64(); +- tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); +-} +-#define SPEC_in1_m1_64 0 +- +-/* ====================================================================== */ +-/* The "INput 2" generators. These load the second operand to an insn. */ +- +-static void in2_r1_o(DisasContext *s, DisasOps *o) +-{ +- o->in2 = regs[get_field(s, r1)]; +- o->g_in2 = true; +-} +-#define SPEC_in2_r1_o 0 +- +-static void in2_r1_16u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]); +-} +-#define SPEC_in2_r1_16u 0 +- +-static void in2_r1_32u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]); +-} +-#define SPEC_in2_r1_32u 0 +- +-static void in2_r1_D32(DisasContext *s, DisasOps *o) +-{ +- int r1 = get_field(s, r1); +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]); +-} +-#define SPEC_in2_r1_D32 SPEC_r1_even +- +-static void in2_r2(DisasContext *s, DisasOps *o) +-{ +- o->in2 = load_reg(get_field(s, r2)); +-} +-#define SPEC_in2_r2 0 +- +-static void in2_r2_o(DisasContext *s, DisasOps *o) +-{ +- o->in2 = regs[get_field(s, r2)]; +- o->g_in2 = true; +-} +-#define SPEC_in2_r2_o 0 +- +-static void in2_r2_nz(DisasContext *s, DisasOps *o) +-{ +- int r2 = get_field(s, r2); +- if (r2 != 0) { +- o->in2 = load_reg(r2); +- } +-} +-#define SPEC_in2_r2_nz 0 +- +-static void in2_r2_8s(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_8s 0 +- +-static void in2_r2_8u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_8u 0 +- +-static void in2_r2_16s(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_16s 0 +- +-static void in2_r2_16u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_16u 0 +- +-static void in2_r3(DisasContext *s, DisasOps *o) +-{ +- o->in2 = load_reg(get_field(s, r3)); +-} +-#define SPEC_in2_r3 0 +- +-static void in2_r3_sr32(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32); +-} +-#define SPEC_in2_r3_sr32 0 +- +-static void in2_r3_32u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]); +-} +-#define SPEC_in2_r3_32u 0 +- +-static void in2_r2_32s(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_32s 0 +- +-static void in2_r2_32u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]); +-} +-#define SPEC_in2_r2_32u 0 +- +-static void in2_r2_sr32(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_temp_new_i64(); +- tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32); +-} +-#define SPEC_in2_r2_sr32 0 +- +-static void in2_e2(DisasContext *s, DisasOps *o) +-{ +- o->in2 = load_freg32_i64(get_field(s, r2)); +-} +-#define SPEC_in2_e2 0 +- +-static void in2_f2(DisasContext *s, DisasOps *o) +-{ +- o->in2 = load_freg(get_field(s, r2)); +-} +-#define SPEC_in2_f2 0 +- +-/* Load the low double word of an extended (128-bit) format FP number */ +-static void in2_x2l(DisasContext *s, DisasOps *o) +-{ +- o->in2 = load_freg(get_field(s, r2) + 2); +-} +-#define SPEC_in2_x2l SPEC_r2_f128 +- +-static void in2_ra2(DisasContext *s, DisasOps *o) +-{ +- int r2 = get_field(s, r2); +- +- /* Note: *don't* treat !r2 as 0, use the reg value. */ +- o->in2 = tcg_temp_new_i64(); +- gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0); +-} +-#define SPEC_in2_ra2 0 +- +-static void in2_a2(DisasContext *s, DisasOps *o) +-{ +- int x2 = have_field(s, x2) ? get_field(s, x2) : 0; +- o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2)); +-} +-#define SPEC_in2_a2 0 +- +-static void in2_ri2(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2); +-} +-#define SPEC_in2_ri2 0 +- +-static void in2_sh32(DisasContext *s, DisasOps *o) +-{ +- help_l2_shift(s, o, 31); +-} +-#define SPEC_in2_sh32 0 +- +-static void in2_sh64(DisasContext *s, DisasOps *o) +-{ +- help_l2_shift(s, o, 63); +-} +-#define SPEC_in2_sh64 0 +- +-static void in2_m2_8u(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_8u 0 +- +-static void in2_m2_16s(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_16s 0 +- +-static void in2_m2_16u(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_16u 0 +- +-static void in2_m2_32s(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_32s 0 +- +-static void in2_m2_32u(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_32u 0 +- +-#ifndef CONFIG_USER_ONLY +-static void in2_m2_32ua(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); +-} +-#define SPEC_in2_m2_32ua 0 +-#endif +- +-static void in2_m2_64(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_m2_64 0 +- +-static void in2_m2_64w(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); +- gen_addi_and_wrap_i64(s, o->in2, o->in2, 0); +-} +-#define SPEC_in2_m2_64w 0 +- +-#ifndef CONFIG_USER_ONLY +-static void in2_m2_64a(DisasContext *s, DisasOps *o) +-{ +- in2_a2(s, o); +- tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); +-} +-#define SPEC_in2_m2_64a 0 +-#endif +- +-static void in2_mri2_16u(DisasContext *s, DisasOps *o) +-{ +- in2_ri2(s, o); +- tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_mri2_16u 0 +- +-static void in2_mri2_32s(DisasContext *s, DisasOps *o) +-{ +- in2_ri2(s, o); +- tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_mri2_32s 0 +- +-static void in2_mri2_32u(DisasContext *s, DisasOps *o) +-{ +- in2_ri2(s, o); +- tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_mri2_32u 0 +- +-static void in2_mri2_64(DisasContext *s, DisasOps *o) +-{ +- in2_ri2(s, o); +- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); +-} +-#define SPEC_in2_mri2_64 0 +- +-static void in2_i2(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64(get_field(s, i2)); +-} +-#define SPEC_in2_i2 0 +- +-static void in2_i2_8u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64((uint8_t)get_field(s, i2)); +-} +-#define SPEC_in2_i2_8u 0 +- +-static void in2_i2_16u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64((uint16_t)get_field(s, i2)); +-} +-#define SPEC_in2_i2_16u 0 +- +-static void in2_i2_32u(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64((uint32_t)get_field(s, i2)); +-} +-#define SPEC_in2_i2_32u 0 +- +-static void in2_i2_16u_shl(DisasContext *s, DisasOps *o) +-{ +- uint64_t i2 = (uint16_t)get_field(s, i2); +- o->in2 = tcg_const_i64(i2 << s->insn->data); +-} +-#define SPEC_in2_i2_16u_shl 0 +- +-static void in2_i2_32u_shl(DisasContext *s, DisasOps *o) +-{ +- uint64_t i2 = (uint32_t)get_field(s, i2); +- o->in2 = tcg_const_i64(i2 << s->insn->data); +-} +-#define SPEC_in2_i2_32u_shl 0 +- +-#ifndef CONFIG_USER_ONLY +-static void in2_insn(DisasContext *s, DisasOps *o) +-{ +- o->in2 = tcg_const_i64(s->fields.raw_insn); +-} +-#define SPEC_in2_insn 0 +-#endif +- +-/* ====================================================================== */ +- +-/* Find opc within the table of insns. This is formulated as a switch +- statement so that (1) we get compile-time notice of cut-paste errors +- for duplicated opcodes, and (2) the compiler generates the binary +- search tree, rather than us having to post-process the table. */ +- +-#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ +- E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) +- +-#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ +- E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) +- +-#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ +- E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) +- +-#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, +- +-enum DisasInsnEnum { +-#include "insn-data.def" +-}; +- +-#undef E +-#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ +- .opc = OPC, \ +- .flags = FL, \ +- .fmt = FMT_##FT, \ +- .fac = FAC_##FC, \ +- .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ +- .name = #NM, \ +- .help_in1 = in1_##I1, \ +- .help_in2 = in2_##I2, \ +- .help_prep = prep_##P, \ +- .help_wout = wout_##W, \ +- .help_cout = cout_##CC, \ +- .help_op = op_##OP, \ +- .data = D \ +- }, +- +-/* Allow 0 to be used for NULL in the table below. */ +-#define in1_0 NULL +-#define in2_0 NULL +-#define prep_0 NULL +-#define wout_0 NULL +-#define cout_0 NULL +-#define op_0 NULL +- +-#define SPEC_in1_0 0 +-#define SPEC_in2_0 0 +-#define SPEC_prep_0 0 +-#define SPEC_wout_0 0 +- +-/* Give smaller names to the various facilities. */ +-#define FAC_Z S390_FEAT_ZARCH +-#define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE +-#define FAC_DFP S390_FEAT_DFP +-#define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ +-#define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ +-#define FAC_EE S390_FEAT_EXECUTE_EXT +-#define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE +-#define FAC_FPE S390_FEAT_FLOATING_POINT_EXT +-#define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ +-#define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ +-#define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT +-#define FAC_HFP_MA S390_FEAT_HFP_MADDSUB +-#define FAC_HW S390_FEAT_STFLE_45 /* high-word */ +-#define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ +-#define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ +-#define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ +-#define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ +-#define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ +-#define FAC_LD S390_FEAT_LONG_DISPLACEMENT +-#define FAC_PC S390_FEAT_STFLE_45 /* population count */ +-#define FAC_SCF S390_FEAT_STORE_CLOCK_FAST +-#define FAC_SFLE S390_FEAT_STFLE +-#define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ +-#define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC +-#define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ +-#define FAC_DAT_ENH S390_FEAT_DAT_ENH +-#define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 +-#define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ +-#define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ +-#define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ +-#define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 +-#define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ +-#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ +-#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ +-#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ +-#define FAC_MSA8 S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */ +-#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME +-#define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ +-#define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION +-#define FAC_V S390_FEAT_VECTOR /* vector facility */ +-#define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ +-#define FAC_MIE2 S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */ +- +-static const DisasInsn insn_info[] = { +-#include "insn-data.def" +-}; +- +-#undef E +-#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ +- case OPC: return &insn_info[insn_ ## NM]; +- +-static const DisasInsn *lookup_opc(uint16_t opc) +-{ +- switch (opc) { +-#include "insn-data.def" +- default: +- return NULL; +- } +-} +- +-#undef F +-#undef E +-#undef D +-#undef C +- +-/* Extract a field from the insn. The INSN should be left-aligned in +- the uint64_t so that we can more easily utilize the big-bit-endian +- definitions we extract from the Principals of Operation. */ +- +-static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn) +-{ +- uint32_t r, m; +- +- if (f->size == 0) { +- return; +- } +- +- /* Zero extract the field from the insn. */ +- r = (insn << f->beg) >> (64 - f->size); +- +- /* Sign-extend, or un-swap the field as necessary. */ +- switch (f->type) { +- case 0: /* unsigned */ +- break; +- case 1: /* signed */ +- assert(f->size <= 32); +- m = 1u << (f->size - 1); +- r = (r ^ m) - m; +- break; +- case 2: /* dl+dh split, signed 20 bit. */ +- r = ((int8_t)r << 12) | (r >> 8); +- break; +- case 3: /* MSB stored in RXB */ +- g_assert(f->size == 4); +- switch (f->beg) { +- case 8: +- r |= extract64(insn, 63 - 36, 1) << 4; +- break; +- case 12: +- r |= extract64(insn, 63 - 37, 1) << 4; +- break; +- case 16: +- r |= extract64(insn, 63 - 38, 1) << 4; +- break; +- case 32: +- r |= extract64(insn, 63 - 39, 1) << 4; +- break; +- default: +- g_assert_not_reached(); +- } +- break; +- default: +- abort(); +- } +- +- /* +- * Validate that the "compressed" encoding we selected above is valid. +- * I.e. we haven't made two different original fields overlap. +- */ +- assert(((o->presentC >> f->indexC) & 1) == 0); +- o->presentC |= 1 << f->indexC; +- o->presentO |= 1 << f->indexO; +- +- o->c[f->indexC] = r; +-} +- +-/* Lookup the insn at the current PC, extracting the operands into O and +- returning the info struct for the insn. Returns NULL for invalid insn. */ +- +-static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s) +-{ +- uint64_t insn, pc = s->base.pc_next; +- int op, op2, ilen; +- const DisasInsn *info; +- +- if (unlikely(s->ex_value)) { +- /* Drop the EX data now, so that it's clear on exception paths. */ +- TCGv_i64 zero = tcg_const_i64(0); +- tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value)); +- tcg_temp_free_i64(zero); +- +- /* Extract the values saved by EXECUTE. */ +- insn = s->ex_value & 0xffffffffffff0000ull; +- ilen = s->ex_value & 0xf; +- op = insn >> 56; +- } else { +- insn = ld_code2(env, pc); +- op = (insn >> 8) & 0xff; +- ilen = get_ilen(op); +- switch (ilen) { +- case 2: +- insn = insn << 48; +- break; +- case 4: +- insn = ld_code4(env, pc) << 32; +- break; +- case 6: +- insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); +- break; +- default: +- g_assert_not_reached(); +- } +- } +- s->pc_tmp = s->base.pc_next + ilen; +- s->ilen = ilen; +- +- /* We can't actually determine the insn format until we've looked up +- the full insn opcode. Which we can't do without locating the +- secondary opcode. Assume by default that OP2 is at bit 40; for +- those smaller insns that don't actually have a secondary opcode +- this will correctly result in OP2 = 0. */ +- switch (op) { +- case 0x01: /* E */ +- case 0x80: /* S */ +- case 0x82: /* S */ +- case 0x93: /* S */ +- case 0xb2: /* S, RRF, RRE, IE */ +- case 0xb3: /* RRE, RRD, RRF */ +- case 0xb9: /* RRE, RRF */ +- case 0xe5: /* SSE, SIL */ +- op2 = (insn << 8) >> 56; +- break; +- case 0xa5: /* RI */ +- case 0xa7: /* RI */ +- case 0xc0: /* RIL */ +- case 0xc2: /* RIL */ +- case 0xc4: /* RIL */ +- case 0xc6: /* RIL */ +- case 0xc8: /* SSF */ +- case 0xcc: /* RIL */ +- op2 = (insn << 12) >> 60; +- break; +- case 0xc5: /* MII */ +- case 0xc7: /* SMI */ +- case 0xd0 ... 0xdf: /* SS */ +- case 0xe1: /* SS */ +- case 0xe2: /* SS */ +- case 0xe8: /* SS */ +- case 0xe9: /* SS */ +- case 0xea: /* SS */ +- case 0xee ... 0xf3: /* SS */ +- case 0xf8 ... 0xfd: /* SS */ +- op2 = 0; +- break; +- default: +- op2 = (insn << 40) >> 56; +- break; +- } +- +- memset(&s->fields, 0, sizeof(s->fields)); +- s->fields.raw_insn = insn; +- s->fields.op = op; +- s->fields.op2 = op2; +- +- /* Lookup the instruction. */ +- info = lookup_opc(op << 8 | op2); +- s->insn = info; +- +- /* If we found it, extract the operands. */ +- if (info != NULL) { +- DisasFormat fmt = info->fmt; +- int i; +- +- for (i = 0; i < NUM_C_FIELD; ++i) { +- extract_field(&s->fields, &format_info[fmt].op[i], insn); +- } +- } +- return info; +-} +- +-static bool is_afp_reg(int reg) +-{ +- return reg % 2 || reg > 6; +-} +- +-static bool is_fp_pair(int reg) +-{ +- /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ +- return !(reg & 0x2); +-} +- +-static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) +-{ +- const DisasInsn *insn; +- DisasJumpType ret = DISAS_NEXT; +- DisasOps o = {}; +- bool icount = false; +- +- /* Search for the insn in the table. */ +- insn = extract_insn(env, s); +- +- /* Emit insn_start now that we know the ILEN. */ +- tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); +- +- /* Not found means unimplemented/illegal opcode. */ +- if (insn == NULL) { +- qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n", +- s->fields.op, s->fields.op2); +- gen_illegal_opcode(s); +- ret = DISAS_NORETURN; +- goto out; +- } +- +-#ifndef CONFIG_USER_ONLY +- if (s->base.tb->flags & FLAG_MASK_PER) { +- TCGv_i64 addr = tcg_const_i64(s->base.pc_next); +- gen_helper_per_ifetch(cpu_env, addr); +- tcg_temp_free_i64(addr); +- } +-#endif +- +- /* process flags */ +- if (insn->flags) { +- /* privileged instruction */ +- if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { +- gen_program_exception(s, PGM_PRIVILEGED); +- ret = DISAS_NORETURN; +- goto out; +- } +- +- /* if AFP is not enabled, instructions and registers are forbidden */ +- if (!(s->base.tb->flags & FLAG_MASK_AFP)) { +- uint8_t dxc = 0; +- +- if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) { +- dxc = 1; +- } +- if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) { +- dxc = 1; +- } +- if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) { +- dxc = 1; +- } +- if (insn->flags & IF_BFP) { +- dxc = 2; +- } +- if (insn->flags & IF_DFP) { +- dxc = 3; +- } +- if (insn->flags & IF_VEC) { +- dxc = 0xfe; +- } +- if (dxc) { +- gen_data_exception(dxc); +- ret = DISAS_NORETURN; +- goto out; +- } +- } +- +- /* if vector instructions not enabled, executing them is forbidden */ +- if (insn->flags & IF_VEC) { +- if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { +- gen_data_exception(0xfe); +- ret = DISAS_NORETURN; +- goto out; +- } +- } +- +- /* input/output is the special case for icount mode */ +- if (unlikely(insn->flags & IF_IO)) { +- icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT; +- if (icount) { +- gen_io_start(); +- } +- } +- } +- +- /* Check for insn specification exceptions. */ +- if (insn->spec) { +- if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) || +- (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) || +- (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) || +- (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) || +- (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) { +- gen_program_exception(s, PGM_SPECIFICATION); +- ret = DISAS_NORETURN; +- goto out; +- } +- } +- +- /* Implement the instruction. */ +- if (insn->help_in1) { +- insn->help_in1(s, &o); +- } +- if (insn->help_in2) { +- insn->help_in2(s, &o); +- } +- if (insn->help_prep) { +- insn->help_prep(s, &o); +- } +- if (insn->help_op) { +- ret = insn->help_op(s, &o); +- } +- if (ret != DISAS_NORETURN) { +- if (insn->help_wout) { +- insn->help_wout(s, &o); +- } +- if (insn->help_cout) { +- insn->help_cout(s, &o); +- } +- } +- +- /* Free any temporaries created by the helpers. */ +- if (o.out && !o.g_out) { +- tcg_temp_free_i64(o.out); +- } +- if (o.out2 && !o.g_out2) { +- tcg_temp_free_i64(o.out2); +- } +- if (o.in1 && !o.g_in1) { +- tcg_temp_free_i64(o.in1); +- } +- if (o.in2 && !o.g_in2) { +- tcg_temp_free_i64(o.in2); +- } +- if (o.addr1) { +- tcg_temp_free_i64(o.addr1); +- } +- +- /* io should be the last instruction in tb when icount is enabled */ +- if (unlikely(icount && ret == DISAS_NEXT)) { +- ret = DISAS_PC_STALE; +- } +- +-#ifndef CONFIG_USER_ONLY +- if (s->base.tb->flags & FLAG_MASK_PER) { +- /* An exception might be triggered, save PSW if not already done. */ +- if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { +- tcg_gen_movi_i64(psw_addr, s->pc_tmp); +- } +- +- /* Call the helper to check for a possible PER exception. */ +- gen_helper_per_check_exception(cpu_env); +- } +-#endif +- +-out: +- /* Advance to the next instruction. */ +- s->base.pc_next = s->pc_tmp; +- return ret; +-} +- +-static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) +-{ +- DisasContext *dc = container_of(dcbase, DisasContext, base); +- +- /* 31-bit mode */ +- if (!(dc->base.tb->flags & FLAG_MASK_64)) { +- dc->base.pc_first &= 0x7fffffff; +- dc->base.pc_next = dc->base.pc_first; +- } +- +- dc->cc_op = CC_OP_DYNAMIC; +- dc->ex_value = dc->base.tb->cs_base; +- dc->do_debug = dc->base.singlestep_enabled; +-} +- +-static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) +-{ +-} +- +-static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +-{ +-} +- +-static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, +- const CPUBreakpoint *bp) +-{ +- DisasContext *dc = container_of(dcbase, DisasContext, base); +- +- /* +- * Emit an insn_start to accompany the breakpoint exception. +- * The ILEN value is a dummy, since this does not result in +- * an s390x exception, but an internal qemu exception which +- * brings us back to interact with the gdbstub. +- */ +- tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 2); +- +- dc->base.is_jmp = DISAS_PC_STALE; +- dc->do_debug = true; +- /* The address covered by the breakpoint must be included in +- [tb->pc, tb->pc + tb->size) in order to for it to be +- properly cleared -- thus we increment the PC here so that +- the logic setting tb->size does the right thing. */ +- dc->base.pc_next += 2; +- return true; +-} +- +-static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +-{ +- CPUS390XState *env = cs->env_ptr; +- DisasContext *dc = container_of(dcbase, DisasContext, base); +- +- dc->base.is_jmp = translate_one(env, dc); +- if (dc->base.is_jmp == DISAS_NEXT) { +- uint64_t page_start; +- +- page_start = dc->base.pc_first & TARGET_PAGE_MASK; +- if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { +- dc->base.is_jmp = DISAS_TOO_MANY; +- } +- } +-} +- +-static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +-{ +- DisasContext *dc = container_of(dcbase, DisasContext, base); +- +- switch (dc->base.is_jmp) { +- case DISAS_GOTO_TB: +- case DISAS_NORETURN: +- break; +- case DISAS_TOO_MANY: +- case DISAS_PC_STALE: +- case DISAS_PC_STALE_NOCHAIN: +- update_psw_addr(dc); +- /* FALLTHRU */ +- case DISAS_PC_UPDATED: +- /* Next TB starts off with CC_OP_DYNAMIC, so make sure the +- cc op type is in env */ +- update_cc_op(dc); +- /* FALLTHRU */ +- case DISAS_PC_CC_UPDATED: +- /* Exit the TB, either by raising a debug exception or by return. */ +- if (dc->do_debug) { +- gen_exception(EXCP_DEBUG); +- } else if ((dc->base.tb->flags & FLAG_MASK_PER) || +- dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { +- tcg_gen_exit_tb(NULL, 0); +- } else { +- tcg_gen_lookup_and_goto_ptr(); +- } +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) +-{ +- DisasContext *dc = container_of(dcbase, DisasContext, base); +- +- if (unlikely(dc->ex_value)) { +- /* ??? Unfortunately log_target_disas can't use host memory. */ +- qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value); +- } else { +- qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first)); +- log_target_disas(cs, dc->base.pc_first, dc->base.tb->size); +- } +-} +- +-static const TranslatorOps s390x_tr_ops = { +- .init_disas_context = s390x_tr_init_disas_context, +- .tb_start = s390x_tr_tb_start, +- .insn_start = s390x_tr_insn_start, +- .breakpoint_check = s390x_tr_breakpoint_check, +- .translate_insn = s390x_tr_translate_insn, +- .tb_stop = s390x_tr_tb_stop, +- .disas_log = s390x_tr_disas_log, +-}; +- +-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +-{ +- DisasContext dc; +- +- translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); +-} +- +-void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, +- target_ulong *data) +-{ +- int cc_op = data[1]; +- +- env->psw.addr = data[0]; +- +- /* Update the CC opcode if it is not already up-to-date. */ +- if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { +- env->cc_op = cc_op; +- } +- +- /* Record ILEN. */ +- env->int_pgm_ilen = data[2]; +-} +diff --git a/target/s390x/translate_vx.c.inc b/target/s390x/translate_vx.c.inc +deleted file mode 100644 +index a9d51b1f4c..0000000000 +--- a/target/s390x/translate_vx.c.inc ++++ /dev/null +@@ -1,3109 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector instruction translation functions +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +- +-/* +- * For most instructions that use the same element size for reads and +- * writes, we can use real gvec vector expansion, which potantially uses +- * real host vector instructions. As they only work up to 64 bit elements, +- * 128 bit elements (vector is a single element) have to be handled +- * differently. Operations that are too complicated to encode via TCG ops +- * are handled via gvec ool (out-of-line) handlers. +- * +- * As soon as instructions use different element sizes for reads and writes +- * or access elements "out of their element scope" we expand them manually +- * in fancy loops, as gvec expansion does not deal with actual element +- * numbers and does also not support access to other elements. +- * +- * 128 bit elements: +- * As we only have i32/i64, such elements have to be loaded into two +- * i64 values and can then be processed e.g. by tcg_gen_add2_i64. +- * +- * Sizes: +- * On s390x, the operand size (oprsz) and the maximum size (maxsz) are +- * always 16 (128 bit). What gvec code calls "vece", s390x calls "es", +- * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only +- * 128 bit element size has to be treated in a special way (MO_64 + 1). +- * We will use ES_* instead of MO_* for this reason in this file. +- * +- * CC handling: +- * As gvec ool-helpers can currently not return values (besides via +- * pointers like vectors or cpu_env), whenever we have to set the CC and +- * can't conclude the value from the result vector, we will directly +- * set it in "env->cc_op" and mark it as static via set_cc_static()". +- * Whenever this is done, the helper writes globals (cc_op). +- */ +- +-#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es)) +-#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es)) +-#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE) +- +-#define ES_8 MO_8 +-#define ES_16 MO_16 +-#define ES_32 MO_32 +-#define ES_64 MO_64 +-#define ES_128 4 +- +-/* Floating-Point Format */ +-#define FPF_SHORT 2 +-#define FPF_LONG 3 +-#define FPF_EXT 4 +- +-static inline bool valid_vec_element(uint8_t enr, MemOp es) +-{ +- return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1)); +-} +- +-static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, +- MemOp memop) +-{ +- const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); +- +- switch (memop) { +- case ES_8: +- tcg_gen_ld8u_i64(dst, cpu_env, offs); +- break; +- case ES_16: +- tcg_gen_ld16u_i64(dst, cpu_env, offs); +- break; +- case ES_32: +- tcg_gen_ld32u_i64(dst, cpu_env, offs); +- break; +- case ES_8 | MO_SIGN: +- tcg_gen_ld8s_i64(dst, cpu_env, offs); +- break; +- case ES_16 | MO_SIGN: +- tcg_gen_ld16s_i64(dst, cpu_env, offs); +- break; +- case ES_32 | MO_SIGN: +- tcg_gen_ld32s_i64(dst, cpu_env, offs); +- break; +- case ES_64: +- case ES_64 | MO_SIGN: +- tcg_gen_ld_i64(dst, cpu_env, offs); +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr, +- MemOp memop) +-{ +- const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); +- +- switch (memop) { +- case ES_8: +- tcg_gen_ld8u_i32(dst, cpu_env, offs); +- break; +- case ES_16: +- tcg_gen_ld16u_i32(dst, cpu_env, offs); +- break; +- case ES_8 | MO_SIGN: +- tcg_gen_ld8s_i32(dst, cpu_env, offs); +- break; +- case ES_16 | MO_SIGN: +- tcg_gen_ld16s_i32(dst, cpu_env, offs); +- break; +- case ES_32: +- case ES_32 | MO_SIGN: +- tcg_gen_ld_i32(dst, cpu_env, offs); +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr, +- MemOp memop) +-{ +- const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); +- +- switch (memop) { +- case ES_8: +- tcg_gen_st8_i64(src, cpu_env, offs); +- break; +- case ES_16: +- tcg_gen_st16_i64(src, cpu_env, offs); +- break; +- case ES_32: +- tcg_gen_st32_i64(src, cpu_env, offs); +- break; +- case ES_64: +- tcg_gen_st_i64(src, cpu_env, offs); +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr, +- MemOp memop) +-{ +- const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); +- +- switch (memop) { +- case ES_8: +- tcg_gen_st8_i32(src, cpu_env, offs); +- break; +- case ES_16: +- tcg_gen_st16_i32(src, cpu_env, offs); +- break; +- case ES_32: +- tcg_gen_st_i32(src, cpu_env, offs); +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, +- uint8_t es) +-{ +- TCGv_i64 tmp = tcg_temp_new_i64(); +- +- /* mask off invalid parts from the element nr */ +- tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1); +- +- /* convert it to an element offset relative to cpu_env (vec_reg_offset() */ +- tcg_gen_shli_i64(tmp, tmp, es); +-#ifndef HOST_WORDS_BIGENDIAN +- tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); +-#endif +- tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg)); +- +- /* generate the final ptr by adding cpu_env */ +- tcg_gen_trunc_i64_ptr(ptr, tmp); +- tcg_gen_add_ptr(ptr, ptr, cpu_env); +- +- tcg_temp_free_i64(tmp); +-} +- +-#define gen_gvec_2(v1, v2, gen) \ +- tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- 16, 16, gen) +-#define gen_gvec_2s(v1, v2, c, gen) \ +- tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- 16, 16, c, gen) +-#define gen_gvec_2_ool(v1, v2, data, fn) \ +- tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- 16, 16, data, fn) +-#define gen_gvec_2i_ool(v1, v2, c, data, fn) \ +- tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- c, 16, 16, data, fn) +-#define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \ +- tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- ptr, 16, 16, data, fn) +-#define gen_gvec_3(v1, v2, v3, gen) \ +- tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), 16, 16, gen) +-#define gen_gvec_3_ool(v1, v2, v3, data, fn) \ +- tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), 16, 16, data, fn) +-#define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \ +- tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), ptr, 16, 16, data, fn) +-#define gen_gvec_3i(v1, v2, v3, c, gen) \ +- tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), 16, 16, c, gen) +-#define gen_gvec_4(v1, v2, v3, v4, gen) \ +- tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ +- 16, 16, gen) +-#define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \ +- tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ +- 16, 16, data, fn) +-#define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \ +- tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), vec_full_reg_offset(v4), \ +- ptr, 16, 16, data, fn) +-#define gen_gvec_dup_i64(es, v1, c) \ +- tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c) +-#define gen_gvec_mov(v1, v2) \ +- tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \ +- 16) +-#define gen_gvec_dup_imm(es, v1, c) \ +- tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c); +-#define gen_gvec_fn_2(fn, es, v1, v2) \ +- tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- 16, 16) +-#define gen_gvec_fn_2i(fn, es, v1, v2, c) \ +- tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- c, 16, 16) +-#define gen_gvec_fn_2s(fn, es, v1, v2, s) \ +- tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- s, 16, 16) +-#define gen_gvec_fn_3(fn, es, v1, v2, v3) \ +- tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), 16, 16) +-#define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \ +- tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \ +- vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16) +- +-/* +- * Helper to carry out a 128 bit vector computation using 2 i64 values per +- * vector. +- */ +-typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, +- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh); +-static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a, +- uint8_t b) +-{ +- TCGv_i64 dh = tcg_temp_new_i64(); +- TCGv_i64 dl = tcg_temp_new_i64(); +- TCGv_i64 ah = tcg_temp_new_i64(); +- TCGv_i64 al = tcg_temp_new_i64(); +- TCGv_i64 bh = tcg_temp_new_i64(); +- TCGv_i64 bl = tcg_temp_new_i64(); +- +- read_vec_element_i64(ah, a, 0, ES_64); +- read_vec_element_i64(al, a, 1, ES_64); +- read_vec_element_i64(bh, b, 0, ES_64); +- read_vec_element_i64(bl, b, 1, ES_64); +- fn(dl, dh, al, ah, bl, bh); +- write_vec_element_i64(dh, d, 0, ES_64); +- write_vec_element_i64(dl, d, 1, ES_64); +- +- tcg_temp_free_i64(dh); +- tcg_temp_free_i64(dl); +- tcg_temp_free_i64(ah); +- tcg_temp_free_i64(al); +- tcg_temp_free_i64(bh); +- tcg_temp_free_i64(bl); +-} +- +-typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, +- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh, +- TCGv_i64 cl, TCGv_i64 ch); +-static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a, +- uint8_t b, uint8_t c) +-{ +- TCGv_i64 dh = tcg_temp_new_i64(); +- TCGv_i64 dl = tcg_temp_new_i64(); +- TCGv_i64 ah = tcg_temp_new_i64(); +- TCGv_i64 al = tcg_temp_new_i64(); +- TCGv_i64 bh = tcg_temp_new_i64(); +- TCGv_i64 bl = tcg_temp_new_i64(); +- TCGv_i64 ch = tcg_temp_new_i64(); +- TCGv_i64 cl = tcg_temp_new_i64(); +- +- read_vec_element_i64(ah, a, 0, ES_64); +- read_vec_element_i64(al, a, 1, ES_64); +- read_vec_element_i64(bh, b, 0, ES_64); +- read_vec_element_i64(bl, b, 1, ES_64); +- read_vec_element_i64(ch, c, 0, ES_64); +- read_vec_element_i64(cl, c, 1, ES_64); +- fn(dl, dh, al, ah, bl, bh, cl, ch); +- write_vec_element_i64(dh, d, 0, ES_64); +- write_vec_element_i64(dl, d, 1, ES_64); +- +- tcg_temp_free_i64(dh); +- tcg_temp_free_i64(dl); +- tcg_temp_free_i64(ah); +- tcg_temp_free_i64(al); +- tcg_temp_free_i64(bh); +- tcg_temp_free_i64(bl); +- tcg_temp_free_i64(ch); +- tcg_temp_free_i64(cl); +-} +- +-static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, +- uint64_t b) +-{ +- TCGv_i64 bl = tcg_const_i64(b); +- TCGv_i64 bh = tcg_const_i64(0); +- +- tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); +- tcg_temp_free_i64(bl); +- tcg_temp_free_i64(bh); +-} +- +-static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0, +- gen_helper_gvec_vbperm); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vge(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = s->insn->data; +- const uint8_t enr = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (!valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- read_vec_element_i64(tmp, get_field(s, v2), enr, es); +- tcg_gen_add_i64(o->addr1, o->addr1, tmp); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); +- +- tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); +- write_vec_element_i64(tmp, get_field(s, v1), enr, es); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static uint64_t generate_byte_mask(uint8_t mask) +-{ +- uint64_t r = 0; +- int i; +- +- for (i = 0; i < 8; i++) { +- if ((mask >> i) & 1) { +- r |= 0xffull << (i * 8); +- } +- } +- return r; +-} +- +-static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o) +-{ +- const uint16_t i2 = get_field(s, i2); +- +- if (i2 == (i2 & 0xff) * 0x0101) { +- /* +- * Masks for both 64 bit elements of the vector are the same. +- * Trust tcg to produce a good constant loading. +- */ +- gen_gvec_dup_imm(ES_64, get_field(s, v1), +- generate_byte_mask(i2 & 0xff)); +- } else { +- TCGv_i64 t = tcg_temp_new_i64(); +- +- tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8)); +- write_vec_element_i64(t, get_field(s, v1), 0, ES_64); +- tcg_gen_movi_i64(t, generate_byte_mask(i2)); +- write_vec_element_i64(t, get_field(s, v1), 1, ES_64); +- tcg_temp_free_i64(t); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vgm(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t bits = NUM_VEC_ELEMENT_BITS(es); +- const uint8_t i2 = get_field(s, i2) & (bits - 1); +- const uint8_t i3 = get_field(s, i3) & (bits - 1); +- uint64_t mask = 0; +- int i; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* generate the mask - take care of wrapping */ +- for (i = i2; ; i = (i + 1) % bits) { +- mask |= 1ull << (bits - i - 1); +- if (i == i3) { +- break; +- } +- } +- +- gen_gvec_dup_imm(es, get_field(s, v1), mask); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- +- tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); +- write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); +- tcg_temp_free(t0); +- tcg_temp_free(t1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlr(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_mov(get_field(s, v1), get_field(s, v2)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); +- gen_gvec_dup_i64(es, get_field(s, v1), tmp); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vle(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = s->insn->data; +- const uint8_t enr = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (!valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); +- write_vec_element_i64(tmp, get_field(s, v1), enr, es); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlei(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = s->insn->data; +- const uint8_t enr = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (!valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_const_i64((int16_t)get_field(s, i2)); +- write_vec_element_i64(tmp, get_field(s, v1), enr, es); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- TCGv_ptr ptr; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* fast path if we don't need the register content */ +- if (!get_field(s, b2)) { +- uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); +- +- read_vec_element_i64(o->out, get_field(s, v3), enr, es); +- return DISAS_NEXT; +- } +- +- ptr = tcg_temp_new_ptr(); +- get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es); +- switch (es) { +- case ES_8: +- tcg_gen_ld8u_i64(o->out, ptr, 0); +- break; +- case ES_16: +- tcg_gen_ld16u_i64(o->out, ptr, 0); +- break; +- case ES_32: +- tcg_gen_ld32u_i64(o->out, ptr, 0); +- break; +- case ES_64: +- tcg_gen_ld_i64(o->out, ptr, 0); +- break; +- default: +- g_assert_not_reached(); +- } +- tcg_temp_free_ptr(ptr); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vllez(DisasContext *s, DisasOps *o) +-{ +- uint8_t es = get_field(s, m3); +- uint8_t enr; +- TCGv_i64 t; +- +- switch (es) { +- /* rightmost sub-element of leftmost doubleword */ +- case ES_8: +- enr = 7; +- break; +- case ES_16: +- enr = 3; +- break; +- case ES_32: +- enr = 1; +- break; +- case ES_64: +- enr = 0; +- break; +- /* leftmost sub-element of leftmost doubleword */ +- case 6: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- es = ES_32; +- enr = 0; +- break; +- } +- /* fallthrough */ +- default: +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- t = tcg_temp_new_i64(); +- tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es); +- gen_gvec_dup_imm(es, get_field(s, v1), 0); +- write_vec_element_i64(t, get_field(s, v1), enr, es); +- tcg_temp_free_i64(t); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlm(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v3 = get_field(s, v3); +- uint8_t v1 = get_field(s, v1); +- TCGv_i64 t0, t1; +- +- if (v3 < v1 || (v3 - v1 + 1) > 16) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* +- * Check for possible access exceptions by trying to load the last +- * element. The first element will be checked first next. +- */ +- t0 = tcg_temp_new_i64(); +- t1 = tcg_temp_new_i64(); +- gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8); +- tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ); +- +- for (;; v1++) { +- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); +- write_vec_element_i64(t1, v1, 0, ES_64); +- if (v1 == v3) { +- break; +- } +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ); +- write_vec_element_i64(t1, v1, 1, ES_64); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- } +- +- /* Store the last element, loaded first */ +- write_vec_element_i64(t0, v1, 1, ES_64); +- +- tcg_temp_free_i64(t0); +- tcg_temp_free_i64(t1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o) +-{ +- const int64_t block_size = (1ull << (get_field(s, m3) + 6)); +- const int v1_offs = vec_full_reg_offset(get_field(s, v1)); +- TCGv_ptr a0; +- TCGv_i64 bytes; +- +- if (get_field(s, m3) > 6) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- bytes = tcg_temp_new_i64(); +- a0 = tcg_temp_new_ptr(); +- /* calculate the number of bytes until the next block boundary */ +- tcg_gen_ori_i64(bytes, o->addr1, -block_size); +- tcg_gen_neg_i64(bytes, bytes); +- +- tcg_gen_addi_ptr(a0, cpu_env, v1_offs); +- gen_helper_vll(cpu_env, a0, o->addr1, bytes); +- tcg_temp_free_i64(bytes); +- tcg_temp_free_ptr(a0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- TCGv_ptr ptr; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* fast path if we don't need the register content */ +- if (!get_field(s, b2)) { +- uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1); +- +- write_vec_element_i64(o->in2, get_field(s, v1), enr, es); +- return DISAS_NEXT; +- } +- +- ptr = tcg_temp_new_ptr(); +- get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es); +- switch (es) { +- case ES_8: +- tcg_gen_st8_i64(o->in2, ptr, 0); +- break; +- case ES_16: +- tcg_gen_st16_i64(o->in2, ptr, 0); +- break; +- case ES_32: +- tcg_gen_st32_i64(o->in2, ptr, 0); +- break; +- case ES_64: +- tcg_gen_st_i64(o->in2, ptr, 0); +- break; +- default: +- g_assert_not_reached(); +- } +- tcg_temp_free_ptr(ptr); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o) +-{ +- write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vll(DisasContext *s, DisasOps *o) +-{ +- const int v1_offs = vec_full_reg_offset(get_field(s, v1)); +- TCGv_ptr a0 = tcg_temp_new_ptr(); +- +- /* convert highest index into an actual length */ +- tcg_gen_addi_i64(o->in2, o->in2, 1); +- tcg_gen_addi_ptr(a0, cpu_env, v1_offs); +- gen_helper_vll(cpu_env, a0, o->addr1, o->in2); +- tcg_temp_free_ptr(a0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vmr(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t v3 = get_field(s, v3); +- const uint8_t es = get_field(s, m4); +- int dst_idx, src_idx; +- TCGv_i64 tmp; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- if (s->fields.op2 == 0x61) { +- /* iterate backwards to avoid overwriting data we might need later */ +- for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) { +- src_idx = dst_idx / 2; +- if (dst_idx % 2 == 0) { +- read_vec_element_i64(tmp, v2, src_idx, es); +- } else { +- read_vec_element_i64(tmp, v3, src_idx, es); +- } +- write_vec_element_i64(tmp, v1, dst_idx, es); +- } +- } else { +- /* iterate forward to avoid overwriting data we might need later */ +- for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) { +- src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2; +- if (dst_idx % 2 == 0) { +- read_vec_element_i64(tmp, v2, src_idx, es); +- } else { +- read_vec_element_i64(tmp, v3, src_idx, es); +- } +- write_vec_element_i64(tmp, v1, dst_idx, es); +- } +- } +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t v3 = get_field(s, v3); +- const uint8_t es = get_field(s, m4); +- static gen_helper_gvec_3 * const vpk[3] = { +- gen_helper_gvec_vpk16, +- gen_helper_gvec_vpk32, +- gen_helper_gvec_vpk64, +- }; +- static gen_helper_gvec_3 * const vpks[3] = { +- gen_helper_gvec_vpks16, +- gen_helper_gvec_vpks32, +- gen_helper_gvec_vpks64, +- }; +- static gen_helper_gvec_3_ptr * const vpks_cc[3] = { +- gen_helper_gvec_vpks_cc16, +- gen_helper_gvec_vpks_cc32, +- gen_helper_gvec_vpks_cc64, +- }; +- static gen_helper_gvec_3 * const vpkls[3] = { +- gen_helper_gvec_vpkls16, +- gen_helper_gvec_vpkls32, +- gen_helper_gvec_vpkls64, +- }; +- static gen_helper_gvec_3_ptr * const vpkls_cc[3] = { +- gen_helper_gvec_vpkls_cc16, +- gen_helper_gvec_vpkls_cc32, +- gen_helper_gvec_vpkls_cc64, +- }; +- +- if (es == ES_8 || es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (s->fields.op2) { +- case 0x97: +- if (get_field(s, m5) & 0x1) { +- gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]); +- set_cc_static(s); +- } else { +- gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]); +- } +- break; +- case 0x95: +- if (get_field(s, m5) & 0x1) { +- gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]); +- set_cc_static(s); +- } else { +- gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]); +- } +- break; +- case 0x94: +- /* If sources and destination dont't overlap -> fast path */ +- if (v1 != v2 && v1 != v3) { +- const uint8_t src_es = get_field(s, m4); +- const uint8_t dst_es = src_es - 1; +- TCGv_i64 tmp = tcg_temp_new_i64(); +- int dst_idx, src_idx; +- +- for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { +- src_idx = dst_idx; +- if (src_idx < NUM_VEC_ELEMENTS(src_es)) { +- read_vec_element_i64(tmp, v2, src_idx, src_es); +- } else { +- src_idx -= NUM_VEC_ELEMENTS(src_es); +- read_vec_element_i64(tmp, v3, src_idx, src_es); +- } +- write_vec_element_i64(tmp, v1, dst_idx, dst_es); +- } +- tcg_temp_free_i64(tmp); +- } else { +- gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]); +- } +- break; +- default: +- g_assert_not_reached(); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vperm(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), +- 0, gen_helper_gvec_vperm); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o) +-{ +- const uint8_t i2 = extract32(get_field(s, m4), 2, 1); +- const uint8_t i3 = extract32(get_field(s, m4), 0, 1); +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- +- read_vec_element_i64(t0, get_field(s, v2), i2, ES_64); +- read_vec_element_i64(t1, get_field(s, v3), i3, ES_64); +- write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); +- tcg_temp_free_i64(t0); +- tcg_temp_free_i64(t1); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vrep(DisasContext *s, DisasOps *o) +-{ +- const uint8_t enr = get_field(s, i2); +- const uint8_t es = get_field(s, m4); +- +- if (es > ES_64 || !valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)), +- vec_reg_offset(get_field(s, v3), enr, es), +- 16, 16); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o) +-{ +- const int64_t data = (int16_t)get_field(s, i2); +- const uint8_t es = get_field(s, m3); +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_dup_imm(es, get_field(s, v1), data); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsce(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = s->insn->data; +- const uint8_t enr = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (!valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- read_vec_element_i64(tmp, get_field(s, v2), enr, es); +- tcg_gen_add_i64(o->addr1, o->addr1, tmp); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0); +- +- read_vec_element_i64(tmp, get_field(s, v1), enr, es); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsel(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1), +- get_field(s, v4), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vseg(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- int idx1, idx2; +- TCGv_i64 tmp; +- +- switch (es) { +- case ES_8: +- idx1 = 7; +- idx2 = 15; +- break; +- case ES_16: +- idx1 = 3; +- idx2 = 7; +- break; +- case ES_32: +- idx1 = 1; +- idx2 = 3; +- break; +- default: +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN); +- write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); +- read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN); +- write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vst(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 tmp = tcg_const_i64(16); +- +- /* Probe write access before actually modifying memory */ +- gen_helper_probe_write_access(cpu_env, o->addr1, tmp); +- +- read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vste(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = s->insn->data; +- const uint8_t enr = get_field(s, m3); +- TCGv_i64 tmp; +- +- if (!valid_vec_element(enr, es)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- read_vec_element_i64(tmp, get_field(s, v1), enr, es); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vstm(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v3 = get_field(s, v3); +- uint8_t v1 = get_field(s, v1); +- TCGv_i64 tmp; +- +- while (v3 < v1 || (v3 - v1 + 1) > 16) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* Probe write access before actually modifying memory */ +- tmp = tcg_const_i64((v3 - v1 + 1) * 16); +- gen_helper_probe_write_access(cpu_env, o->addr1, tmp); +- +- for (;; v1++) { +- read_vec_element_i64(tmp, v1, 0, ES_64); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- read_vec_element_i64(tmp, v1, 1, ES_64); +- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ); +- if (v1 == v3) { +- break; +- } +- gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8); +- } +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vstl(DisasContext *s, DisasOps *o) +-{ +- const int v1_offs = vec_full_reg_offset(get_field(s, v1)); +- TCGv_ptr a0 = tcg_temp_new_ptr(); +- +- /* convert highest index into an actual length */ +- tcg_gen_addi_i64(o->in2, o->in2, 1); +- tcg_gen_addi_ptr(a0, cpu_env, v1_offs); +- gen_helper_vstl(cpu_env, a0, o->addr1, o->in2); +- tcg_temp_free_ptr(a0); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vup(DisasContext *s, DisasOps *o) +-{ +- const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5; +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t src_es = get_field(s, m3); +- const uint8_t dst_es = src_es + 1; +- int dst_idx, src_idx; +- TCGv_i64 tmp; +- +- if (src_es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tmp = tcg_temp_new_i64(); +- if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) { +- /* iterate backwards to avoid overwriting data we might need later */ +- for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) { +- src_idx = dst_idx; +- read_vec_element_i64(tmp, v2, src_idx, +- src_es | (logical ? 0 : MO_SIGN)); +- write_vec_element_i64(tmp, v1, dst_idx, dst_es); +- } +- +- } else { +- /* iterate forward to avoid overwriting data we might need later */ +- for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) { +- src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2; +- read_vec_element_i64(tmp, v2, src_idx, +- src_es | (logical ? 0 : MO_SIGN)); +- write_vec_element_i64(tmp, v1, dst_idx, dst_es); +- } +- } +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_va(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- +- if (es > ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } else if (es == ES_128) { +- gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +- } +- gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es) +-{ +- const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1; +- TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr)); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i64 t2 = tcg_temp_new_i64(); +- TCGv_i64 t3 = tcg_temp_new_i64(); +- +- /* Calculate the carry into the MSB, ignoring the old MSBs */ +- tcg_gen_andc_i64(t1, a, msb_mask); +- tcg_gen_andc_i64(t2, b, msb_mask); +- tcg_gen_add_i64(t1, t1, t2); +- /* Calculate the MSB without any carry into it */ +- tcg_gen_xor_i64(t3, a, b); +- /* Calculate the carry out of the MSB in the MSB bit position */ +- tcg_gen_and_i64(d, a, b); +- tcg_gen_and_i64(t1, t1, t3); +- tcg_gen_or_i64(d, d, t1); +- /* Isolate and shift the carry into position */ +- tcg_gen_and_i64(d, d, msb_mask); +- tcg_gen_shri_i64(d, d, msb_bit_nr); +- +- tcg_temp_free_i64(t1); +- tcg_temp_free_i64(t2); +- tcg_temp_free_i64(t3); +-} +- +-static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +-{ +- gen_acc(d, a, b, ES_8); +-} +- +-static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +-{ +- gen_acc(d, a, b, ES_16); +-} +- +-static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- TCGv_i32 t = tcg_temp_new_i32(); +- +- tcg_gen_add_i32(t, a, b); +- tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b); +- tcg_temp_free_i32(t); +-} +- +-static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +-{ +- TCGv_i64 t = tcg_temp_new_i64(); +- +- tcg_gen_add_i64(t, a, b); +- tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b); +- tcg_temp_free_i64(t); +-} +- +-static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, +- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +-{ +- TCGv_i64 th = tcg_temp_new_i64(); +- TCGv_i64 tl = tcg_temp_new_i64(); +- TCGv_i64 zero = tcg_const_i64(0); +- +- tcg_gen_add2_i64(tl, th, al, zero, bl, zero); +- tcg_gen_add2_i64(tl, th, th, zero, ah, zero); +- tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); +- tcg_gen_mov_i64(dh, zero); +- +- tcg_temp_free_i64(th); +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(zero); +-} +- +-static DisasJumpType op_vacc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g[4] = { +- { .fni8 = gen_acc8_i64, }, +- { .fni8 = gen_acc16_i64, }, +- { .fni4 = gen_acc_i32, }, +- { .fni8 = gen_acc_i64, }, +- }; +- +- if (es > ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } else if (es == ES_128) { +- gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +- } +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), &g[es]); +- return DISAS_NEXT; +-} +- +-static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, +- TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +-{ +- TCGv_i64 tl = tcg_temp_new_i64(); +- TCGv_i64 th = tcg_const_i64(0); +- +- /* extract the carry only */ +- tcg_gen_extract_i64(tl, cl, 0, 1); +- tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); +- tcg_gen_add2_i64(dl, dh, dl, dh, tl, th); +- +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(th); +-} +- +-static DisasJumpType op_vac(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, m5) != ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3), +- get_field(s, v4)); +- return DISAS_NEXT; +-} +- +-static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, +- TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +-{ +- TCGv_i64 tl = tcg_temp_new_i64(); +- TCGv_i64 th = tcg_temp_new_i64(); +- TCGv_i64 zero = tcg_const_i64(0); +- +- tcg_gen_andi_i64(tl, cl, 1); +- tcg_gen_add2_i64(tl, th, tl, zero, al, zero); +- tcg_gen_add2_i64(tl, th, tl, th, bl, zero); +- tcg_gen_add2_i64(tl, th, th, zero, ah, zero); +- tcg_gen_add2_i64(tl, dl, tl, th, bh, zero); +- tcg_gen_mov_i64(dh, zero); +- +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(th); +- tcg_temp_free_i64(zero); +-} +- +-static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, m5) != ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3), +- get_field(s, v4)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vn(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vnc(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(andc, ES_8, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- +- tcg_gen_ext_i32_i64(t0, a); +- tcg_gen_ext_i32_i64(t1, b); +- tcg_gen_add_i64(t0, t0, t1); +- tcg_gen_addi_i64(t0, t0, 1); +- tcg_gen_shri_i64(t0, t0, 1); +- tcg_gen_extrl_i64_i32(d, t0); +- +- tcg_temp_free(t0); +- tcg_temp_free(t1); +-} +- +-static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) +-{ +- TCGv_i64 dh = tcg_temp_new_i64(); +- TCGv_i64 ah = tcg_temp_new_i64(); +- TCGv_i64 bh = tcg_temp_new_i64(); +- +- /* extending the sign by one bit is sufficient */ +- tcg_gen_extract_i64(ah, al, 63, 1); +- tcg_gen_extract_i64(bh, bl, 63, 1); +- tcg_gen_add2_i64(dl, dh, al, ah, bl, bh); +- gen_addi2_i64(dl, dh, dl, dh, 1); +- tcg_gen_extract2_i64(dl, dl, dh, 1); +- +- tcg_temp_free_i64(dh); +- tcg_temp_free_i64(ah); +- tcg_temp_free_i64(bh); +-} +- +-static DisasJumpType op_vavg(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g[4] = { +- { .fno = gen_helper_gvec_vavg8, }, +- { .fno = gen_helper_gvec_vavg16, }, +- { .fni4 = gen_avg_i32, }, +- { .fni8 = gen_avg_i64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), &g[es]); +- return DISAS_NEXT; +-} +- +-static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- +- tcg_gen_extu_i32_i64(t0, a); +- tcg_gen_extu_i32_i64(t1, b); +- tcg_gen_add_i64(t0, t0, t1); +- tcg_gen_addi_i64(t0, t0, 1); +- tcg_gen_shri_i64(t0, t0, 1); +- tcg_gen_extrl_i64_i32(d, t0); +- +- tcg_temp_free(t0); +- tcg_temp_free(t1); +-} +- +-static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl) +-{ +- TCGv_i64 dh = tcg_temp_new_i64(); +- TCGv_i64 zero = tcg_const_i64(0); +- +- tcg_gen_add2_i64(dl, dh, al, zero, bl, zero); +- gen_addi2_i64(dl, dh, dl, dh, 1); +- tcg_gen_extract2_i64(dl, dl, dh, 1); +- +- tcg_temp_free_i64(dh); +- tcg_temp_free_i64(zero); +-} +- +-static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g[4] = { +- { .fno = gen_helper_gvec_vavgl8, }, +- { .fno = gen_helper_gvec_vavgl16, }, +- { .fni4 = gen_avgl_i32, }, +- { .fni8 = gen_avgl_i64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), &g[es]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o) +-{ +- TCGv_i32 tmp = tcg_temp_new_i32(); +- TCGv_i32 sum = tcg_temp_new_i32(); +- int i; +- +- read_vec_element_i32(sum, get_field(s, v3), 1, ES_32); +- for (i = 0; i < 4; i++) { +- read_vec_element_i32(tmp, get_field(s, v2), i, ES_32); +- tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp); +- } +- gen_gvec_dup_imm(ES_32, get_field(s, v1), 0); +- write_vec_element_i32(sum, get_field(s, v1), 1, ES_32); +- +- tcg_temp_free_i32(tmp); +- tcg_temp_free_i32(sum); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vec(DisasContext *s, DisasOps *o) +-{ +- uint8_t es = get_field(s, m3); +- const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- if (s->fields.op2 == 0xdb) { +- es |= MO_SIGN; +- } +- +- o->in1 = tcg_temp_new_i64(); +- o->in2 = tcg_temp_new_i64(); +- read_vec_element_i64(o->in1, get_field(s, v1), enr, es); +- read_vec_element_i64(o->in2, get_field(s, v2), enr, es); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- TCGCond cond = s->insn->data; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- tcg_gen_gvec_cmp(cond, es, +- vec_full_reg_offset(get_field(s, v1)), +- vec_full_reg_offset(get_field(s, v2)), +- vec_full_reg_offset(get_field(s, v3)), 16, 16); +- if (get_field(s, m5) & 0x1) { +- TCGv_i64 low = tcg_temp_new_i64(); +- TCGv_i64 high = tcg_temp_new_i64(); +- +- read_vec_element_i64(high, get_field(s, v1), 0, ES_64); +- read_vec_element_i64(low, get_field(s, v1), 1, ES_64); +- gen_op_update2_cc_i64(s, CC_OP_VC, low, high); +- +- tcg_temp_free_i64(low); +- tcg_temp_free_i64(high); +- } +- return DISAS_NEXT; +-} +- +-static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a) +-{ +- tcg_gen_clzi_i32(d, a, 32); +-} +- +-static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a) +-{ +- tcg_gen_clzi_i64(d, a, 64); +-} +- +-static DisasJumpType op_vclz(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- static const GVecGen2 g[4] = { +- { .fno = gen_helper_gvec_vclz8, }, +- { .fno = gen_helper_gvec_vclz16, }, +- { .fni4 = gen_clz_i32, }, +- { .fni8 = gen_clz_i64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); +- return DISAS_NEXT; +-} +- +-static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a) +-{ +- tcg_gen_ctzi_i32(d, a, 32); +-} +- +-static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a) +-{ +- tcg_gen_ctzi_i64(d, a, 64); +-} +- +-static DisasJumpType op_vctz(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- static const GVecGen2 g[4] = { +- { .fno = gen_helper_gvec_vctz8, }, +- { .fno = gen_helper_gvec_vctz16, }, +- { .fni4 = gen_ctz_i32, }, +- { .fni8 = gen_ctz_i64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vx(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g[4] = { +- { .fno = gen_helper_gvec_vgfm8, }, +- { .fno = gen_helper_gvec_vgfm16, }, +- { .fno = gen_helper_gvec_vgfm32, }, +- { .fno = gen_helper_gvec_vgfm64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), &g[es]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m5); +- static const GVecGen4 g[4] = { +- { .fno = gen_helper_gvec_vgfma8, }, +- { .fno = gen_helper_gvec_vgfma16, }, +- { .fno = gen_helper_gvec_vgfma32, }, +- { .fno = gen_helper_gvec_vgfma64, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- gen_gvec_4(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), &g[es]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vlp(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vmx(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t v3 = get_field(s, v3); +- const uint8_t es = get_field(s, m4); +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (s->fields.op2) { +- case 0xff: +- gen_gvec_fn_3(smax, es, v1, v2, v3); +- break; +- case 0xfd: +- gen_gvec_fn_3(umax, es, v1, v2, v3); +- break; +- case 0xfe: +- gen_gvec_fn_3(smin, es, v1, v2, v3); +- break; +- case 0xfc: +- gen_gvec_fn_3(umin, es, v1, v2, v3); +- break; +- default: +- g_assert_not_reached(); +- } +- return DISAS_NEXT; +-} +- +-static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +-{ +- TCGv_i32 t0 = tcg_temp_new_i32(); +- +- tcg_gen_mul_i32(t0, a, b); +- tcg_gen_add_i32(d, t0, c); +- +- tcg_temp_free_i32(t0); +-} +- +-static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +-{ +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i64 t2 = tcg_temp_new_i64(); +- +- tcg_gen_ext_i32_i64(t0, a); +- tcg_gen_ext_i32_i64(t1, b); +- tcg_gen_ext_i32_i64(t2, c); +- tcg_gen_mul_i64(t0, t0, t1); +- tcg_gen_add_i64(t0, t0, t2); +- tcg_gen_extrh_i64_i32(d, t0); +- +- tcg_temp_free(t0); +- tcg_temp_free(t1); +- tcg_temp_free(t2); +-} +- +-static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c) +-{ +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i64 t2 = tcg_temp_new_i64(); +- +- tcg_gen_extu_i32_i64(t0, a); +- tcg_gen_extu_i32_i64(t1, b); +- tcg_gen_extu_i32_i64(t2, c); +- tcg_gen_mul_i64(t0, t0, t1); +- tcg_gen_add_i64(t0, t0, t2); +- tcg_gen_extrh_i64_i32(d, t0); +- +- tcg_temp_free(t0); +- tcg_temp_free(t1); +- tcg_temp_free(t2); +-} +- +-static DisasJumpType op_vma(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m5); +- static const GVecGen4 g_vmal[3] = { +- { .fno = gen_helper_gvec_vmal8, }, +- { .fno = gen_helper_gvec_vmal16, }, +- { .fni4 = gen_mal_i32, }, +- }; +- static const GVecGen4 g_vmah[3] = { +- { .fno = gen_helper_gvec_vmah8, }, +- { .fno = gen_helper_gvec_vmah16, }, +- { .fni4 = gen_mah_i32, }, +- }; +- static const GVecGen4 g_vmalh[3] = { +- { .fno = gen_helper_gvec_vmalh8, }, +- { .fno = gen_helper_gvec_vmalh16, }, +- { .fni4 = gen_malh_i32, }, +- }; +- static const GVecGen4 g_vmae[3] = { +- { .fno = gen_helper_gvec_vmae8, }, +- { .fno = gen_helper_gvec_vmae16, }, +- { .fno = gen_helper_gvec_vmae32, }, +- }; +- static const GVecGen4 g_vmale[3] = { +- { .fno = gen_helper_gvec_vmale8, }, +- { .fno = gen_helper_gvec_vmale16, }, +- { .fno = gen_helper_gvec_vmale32, }, +- }; +- static const GVecGen4 g_vmao[3] = { +- { .fno = gen_helper_gvec_vmao8, }, +- { .fno = gen_helper_gvec_vmao16, }, +- { .fno = gen_helper_gvec_vmao32, }, +- }; +- static const GVecGen4 g_vmalo[3] = { +- { .fno = gen_helper_gvec_vmalo8, }, +- { .fno = gen_helper_gvec_vmalo16, }, +- { .fno = gen_helper_gvec_vmalo32, }, +- }; +- const GVecGen4 *fn; +- +- if (es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (s->fields.op2) { +- case 0xaa: +- fn = &g_vmal[es]; +- break; +- case 0xab: +- fn = &g_vmah[es]; +- break; +- case 0xa9: +- fn = &g_vmalh[es]; +- break; +- case 0xae: +- fn = &g_vmae[es]; +- break; +- case 0xac: +- fn = &g_vmale[es]; +- break; +- case 0xaf: +- fn = &g_vmao[es]; +- break; +- case 0xad: +- fn = &g_vmalo[es]; +- break; +- default: +- g_assert_not_reached(); +- } +- +- gen_gvec_4(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), fn); +- return DISAS_NEXT; +-} +- +-static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- TCGv_i32 t = tcg_temp_new_i32(); +- +- tcg_gen_muls2_i32(t, d, a, b); +- tcg_temp_free_i32(t); +-} +- +-static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- TCGv_i32 t = tcg_temp_new_i32(); +- +- tcg_gen_mulu2_i32(t, d, a, b); +- tcg_temp_free_i32(t); +-} +- +-static DisasJumpType op_vm(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g_vmh[3] = { +- { .fno = gen_helper_gvec_vmh8, }, +- { .fno = gen_helper_gvec_vmh16, }, +- { .fni4 = gen_mh_i32, }, +- }; +- static const GVecGen3 g_vmlh[3] = { +- { .fno = gen_helper_gvec_vmlh8, }, +- { .fno = gen_helper_gvec_vmlh16, }, +- { .fni4 = gen_mlh_i32, }, +- }; +- static const GVecGen3 g_vme[3] = { +- { .fno = gen_helper_gvec_vme8, }, +- { .fno = gen_helper_gvec_vme16, }, +- { .fno = gen_helper_gvec_vme32, }, +- }; +- static const GVecGen3 g_vmle[3] = { +- { .fno = gen_helper_gvec_vmle8, }, +- { .fno = gen_helper_gvec_vmle16, }, +- { .fno = gen_helper_gvec_vmle32, }, +- }; +- static const GVecGen3 g_vmo[3] = { +- { .fno = gen_helper_gvec_vmo8, }, +- { .fno = gen_helper_gvec_vmo16, }, +- { .fno = gen_helper_gvec_vmo32, }, +- }; +- static const GVecGen3 g_vmlo[3] = { +- { .fno = gen_helper_gvec_vmlo8, }, +- { .fno = gen_helper_gvec_vmlo16, }, +- { .fno = gen_helper_gvec_vmlo32, }, +- }; +- const GVecGen3 *fn; +- +- if (es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (s->fields.op2) { +- case 0xa2: +- gen_gvec_fn_3(mul, es, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +- case 0xa3: +- fn = &g_vmh[es]; +- break; +- case 0xa1: +- fn = &g_vmlh[es]; +- break; +- case 0xa6: +- fn = &g_vme[es]; +- break; +- case 0xa4: +- fn = &g_vmle[es]; +- break; +- case 0xa7: +- fn = &g_vmo[es]; +- break; +- case 0xa5: +- fn = &g_vmlo[es]; +- break; +- default: +- g_assert_not_reached(); +- } +- +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 l1, h1, l2, h2; +- +- if (get_field(s, m4) != ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- l1 = tcg_temp_new_i64(); +- h1 = tcg_temp_new_i64(); +- l2 = tcg_temp_new_i64(); +- h2 = tcg_temp_new_i64(); +- +- /* Multipy both even elements from v2 and v3 */ +- read_vec_element_i64(l1, get_field(s, v2), 0, ES_64); +- read_vec_element_i64(h1, get_field(s, v3), 0, ES_64); +- tcg_gen_mulu2_i64(l1, h1, l1, h1); +- /* Shift result left by one (x2) if requested */ +- if (extract32(get_field(s, m6), 3, 1)) { +- tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1); +- } +- +- /* Multipy both odd elements from v2 and v3 */ +- read_vec_element_i64(l2, get_field(s, v2), 1, ES_64); +- read_vec_element_i64(h2, get_field(s, v3), 1, ES_64); +- tcg_gen_mulu2_i64(l2, h2, l2, h2); +- /* Shift result left by one (x2) if requested */ +- if (extract32(get_field(s, m6), 2, 1)) { +- tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2); +- } +- +- /* Add both intermediate results */ +- tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); +- /* Add whole v4 */ +- read_vec_element_i64(h2, get_field(s, v4), 0, ES_64); +- read_vec_element_i64(l2, get_field(s, v4), 1, ES_64); +- tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2); +- +- /* Store final result into v1. */ +- write_vec_element_i64(h1, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(l1, get_field(s, v1), 1, ES_64); +- +- tcg_temp_free_i64(l1); +- tcg_temp_free_i64(h1); +- tcg_temp_free_i64(l2); +- tcg_temp_free_i64(h2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vnn(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(nand, ES_8, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vno(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vnx(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vo(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_voc(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m3); +- static const GVecGen2 g[4] = { +- { .fno = gen_helper_gvec_vpopct8, }, +- { .fno = gen_helper_gvec_vpopct16, }, +- { .fni4 = tcg_gen_ctpop_i32, }, +- { .fni8 = tcg_gen_ctpop_i64, }, +- }; +- +- if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]); +- return DISAS_NEXT; +-} +- +-static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c) +-{ +- TCGv_i32 t = tcg_temp_new_i32(); +- +- tcg_gen_rotli_i32(t, a, c & 31); +- tcg_gen_and_i32(t, t, b); +- tcg_gen_andc_i32(d, d, b); +- tcg_gen_or_i32(d, d, t); +- +- tcg_temp_free_i32(t); +-} +- +-static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c) +-{ +- TCGv_i64 t = tcg_temp_new_i64(); +- +- tcg_gen_rotli_i64(t, a, c & 63); +- tcg_gen_and_i64(t, t, b); +- tcg_gen_andc_i64(d, d, b); +- tcg_gen_or_i64(d, d, t); +- +- tcg_temp_free_i64(t); +-} +- +-static DisasJumpType op_verim(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m5); +- const uint8_t i4 = get_field(s, i4) & +- (NUM_VEC_ELEMENT_BITS(es) - 1); +- static const GVecGen3i g[4] = { +- { .fno = gen_helper_gvec_verim8, }, +- { .fno = gen_helper_gvec_verim16, }, +- { .fni4 = gen_rim_i32, +- .load_dest = true, }, +- { .fni8 = gen_rim_i64, +- .load_dest = true, }, +- }; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_3i(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), i4, &g[es]); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vesv(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t v3 = get_field(s, v3); +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (s->fields.op2) { +- case 0x70: +- gen_gvec_fn_3(shlv, es, v1, v2, v3); +- break; +- case 0x73: +- gen_gvec_fn_3(rotlv, es, v1, v2, v3); +- break; +- case 0x7a: +- gen_gvec_fn_3(sarv, es, v1, v2, v3); +- break; +- case 0x78: +- gen_gvec_fn_3(shrv, es, v1, v2, v3); +- break; +- default: +- g_assert_not_reached(); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_ves(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t d2 = get_field(s, d2) & +- (NUM_VEC_ELEMENT_BITS(es) - 1); +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v3 = get_field(s, v3); +- TCGv_i32 shift; +- +- if (es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (likely(!get_field(s, b2))) { +- switch (s->fields.op2) { +- case 0x30: +- gen_gvec_fn_2i(shli, es, v1, v3, d2); +- break; +- case 0x33: +- gen_gvec_fn_2i(rotli, es, v1, v3, d2); +- break; +- case 0x3a: +- gen_gvec_fn_2i(sari, es, v1, v3, d2); +- break; +- case 0x38: +- gen_gvec_fn_2i(shri, es, v1, v3, d2); +- break; +- default: +- g_assert_not_reached(); +- } +- } else { +- shift = tcg_temp_new_i32(); +- tcg_gen_extrl_i64_i32(shift, o->addr1); +- tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1); +- switch (s->fields.op2) { +- case 0x30: +- gen_gvec_fn_2s(shls, es, v1, v3, shift); +- break; +- case 0x33: +- gen_gvec_fn_2s(rotls, es, v1, v3, shift); +- break; +- case 0x3a: +- gen_gvec_fn_2s(sars, es, v1, v3, shift); +- break; +- case 0x38: +- gen_gvec_fn_2s(shrs, es, v1, v3, shift); +- break; +- default: +- g_assert_not_reached(); +- } +- tcg_temp_free_i32(shift); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 shift = tcg_temp_new_i64(); +- +- read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); +- if (s->fields.op2 == 0x74) { +- tcg_gen_andi_i64(shift, shift, 0x7); +- } else { +- tcg_gen_andi_i64(shift, shift, 0x78); +- } +- +- gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), +- shift, 0, gen_helper_gvec_vsl); +- tcg_temp_free_i64(shift); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o) +-{ +- const uint8_t i4 = get_field(s, i4) & 0xf; +- const int left_shift = (i4 & 7) * 8; +- const int right_shift = 64 - left_shift; +- TCGv_i64 t0 = tcg_temp_new_i64(); +- TCGv_i64 t1 = tcg_temp_new_i64(); +- TCGv_i64 t2 = tcg_temp_new_i64(); +- +- if ((i4 & 8) == 0) { +- read_vec_element_i64(t0, get_field(s, v2), 0, ES_64); +- read_vec_element_i64(t1, get_field(s, v2), 1, ES_64); +- read_vec_element_i64(t2, get_field(s, v3), 0, ES_64); +- } else { +- read_vec_element_i64(t0, get_field(s, v2), 1, ES_64); +- read_vec_element_i64(t1, get_field(s, v3), 0, ES_64); +- read_vec_element_i64(t2, get_field(s, v3), 1, ES_64); +- } +- tcg_gen_extract2_i64(t0, t1, t0, right_shift); +- tcg_gen_extract2_i64(t1, t2, t1, right_shift); +- write_vec_element_i64(t0, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(t1, get_field(s, v1), 1, ES_64); +- +- tcg_temp_free(t0); +- tcg_temp_free(t1); +- tcg_temp_free(t2); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsra(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 shift = tcg_temp_new_i64(); +- +- read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); +- if (s->fields.op2 == 0x7e) { +- tcg_gen_andi_i64(shift, shift, 0x7); +- } else { +- tcg_gen_andi_i64(shift, shift, 0x78); +- } +- +- gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), +- shift, 0, gen_helper_gvec_vsra); +- tcg_temp_free_i64(shift); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o) +-{ +- TCGv_i64 shift = tcg_temp_new_i64(); +- +- read_vec_element_i64(shift, get_field(s, v3), 7, ES_8); +- if (s->fields.op2 == 0x7c) { +- tcg_gen_andi_i64(shift, shift, 0x7); +- } else { +- tcg_gen_andi_i64(shift, shift, 0x78); +- } +- +- gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), +- shift, 0, gen_helper_gvec_vsrl); +- tcg_temp_free_i64(shift); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vs(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- +- if (es > ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } else if (es == ES_128) { +- gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +- } +- gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2), +- get_field(s, v3)); +- return DISAS_NEXT; +-} +- +-static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +-{ +- tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b); +-} +- +-static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +-{ +- tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b); +-} +- +-static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, +- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +-{ +- TCGv_i64 th = tcg_temp_new_i64(); +- TCGv_i64 tl = tcg_temp_new_i64(); +- TCGv_i64 zero = tcg_const_i64(0); +- +- tcg_gen_sub2_i64(tl, th, al, zero, bl, zero); +- tcg_gen_andi_i64(th, th, 1); +- tcg_gen_sub2_i64(tl, th, ah, zero, th, zero); +- tcg_gen_sub2_i64(tl, th, tl, th, bh, zero); +- /* "invert" the result: -1 -> 0; 0 -> 1 */ +- tcg_gen_addi_i64(dl, th, 1); +- tcg_gen_mov_i64(dh, zero); +- +- tcg_temp_free_i64(th); +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(zero); +-} +- +-static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- static const GVecGen3 g[4] = { +- { .fno = gen_helper_gvec_vscbi8, }, +- { .fno = gen_helper_gvec_vscbi16, }, +- { .fni4 = gen_scbi_i32, }, +- { .fni8 = gen_scbi_i64, }, +- }; +- +- if (es > ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } else if (es == ES_128) { +- gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3)); +- return DISAS_NEXT; +- } +- gen_gvec_3(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), &g[es]); +- return DISAS_NEXT; +-} +- +-static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, +- TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +-{ +- TCGv_i64 tl = tcg_temp_new_i64(); +- TCGv_i64 th = tcg_temp_new_i64(); +- +- tcg_gen_not_i64(tl, bl); +- tcg_gen_not_i64(th, bh); +- gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch); +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(th); +-} +- +-static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, m5) != ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3), +- get_field(s, v4)); +- return DISAS_NEXT; +-} +- +-static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah, +- TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch) +-{ +- TCGv_i64 th = tcg_temp_new_i64(); +- TCGv_i64 tl = tcg_temp_new_i64(); +- +- tcg_gen_not_i64(tl, bl); +- tcg_gen_not_i64(th, bh); +- gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch); +- +- tcg_temp_free_i64(tl); +- tcg_temp_free_i64(th); +-} +- +-static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o) +-{ +- if (get_field(s, m5) != ES_128) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1), +- get_field(s, v2), get_field(s, v3), +- get_field(s, v4)); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- TCGv_i64 sum, tmp; +- uint8_t dst_idx; +- +- if (es == ES_8 || es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- sum = tcg_temp_new_i64(); +- tmp = tcg_temp_new_i64(); +- for (dst_idx = 0; dst_idx < 2; dst_idx++) { +- uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2; +- const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1; +- +- read_vec_element_i64(sum, get_field(s, v3), max_idx, es); +- for (; idx <= max_idx; idx++) { +- read_vec_element_i64(tmp, get_field(s, v2), idx, es); +- tcg_gen_add_i64(sum, sum, tmp); +- } +- write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64); +- } +- tcg_temp_free_i64(sum); +- tcg_temp_free_i64(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1; +- TCGv_i64 sumh, suml, zero, tmpl; +- uint8_t idx; +- +- if (es < ES_32 || es > ES_64) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- sumh = tcg_const_i64(0); +- suml = tcg_temp_new_i64(); +- zero = tcg_const_i64(0); +- tmpl = tcg_temp_new_i64(); +- +- read_vec_element_i64(suml, get_field(s, v3), max_idx, es); +- for (idx = 0; idx <= max_idx; idx++) { +- read_vec_element_i64(tmpl, get_field(s, v2), idx, es); +- tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero); +- } +- write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64); +- write_vec_element_i64(suml, get_field(s, v1), 1, ES_64); +- +- tcg_temp_free_i64(sumh); +- tcg_temp_free_i64(suml); +- tcg_temp_free_i64(zero); +- tcg_temp_free_i64(tmpl); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vsum(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- TCGv_i32 sum, tmp; +- uint8_t dst_idx; +- +- if (es > ES_16) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- sum = tcg_temp_new_i32(); +- tmp = tcg_temp_new_i32(); +- for (dst_idx = 0; dst_idx < 4; dst_idx++) { +- uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4; +- const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1; +- +- read_vec_element_i32(sum, get_field(s, v3), max_idx, es); +- for (; idx <= max_idx; idx++) { +- read_vec_element_i32(tmp, get_field(s, v2), idx, es); +- tcg_gen_add_i32(sum, sum, tmp); +- } +- write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32); +- } +- tcg_temp_free_i32(sum); +- tcg_temp_free_i32(tmp); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vtm(DisasContext *s, DisasOps *o) +-{ +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), +- cpu_env, 0, gen_helper_gvec_vtm); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfae(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- static gen_helper_gvec_3 * const g[3] = { +- gen_helper_gvec_vfae8, +- gen_helper_gvec_vfae16, +- gen_helper_gvec_vfae32, +- }; +- static gen_helper_gvec_3_ptr * const g_cc[3] = { +- gen_helper_gvec_vfae_cc8, +- gen_helper_gvec_vfae_cc16, +- gen_helper_gvec_vfae_cc32, +- }; +- if (es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (extract32(m5, 0, 1)) { +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), cpu_env, m5, g_cc[es]); +- set_cc_static(s); +- } else { +- gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), m5, g[es]); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfee(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- static gen_helper_gvec_3 * const g[3] = { +- gen_helper_gvec_vfee8, +- gen_helper_gvec_vfee16, +- gen_helper_gvec_vfee32, +- }; +- static gen_helper_gvec_3_ptr * const g_cc[3] = { +- gen_helper_gvec_vfee_cc8, +- gen_helper_gvec_vfee_cc16, +- gen_helper_gvec_vfee_cc32, +- }; +- +- if (es > ES_32 || m5 & ~0x3) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (extract32(m5, 0, 1)) { +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), cpu_env, m5, g_cc[es]); +- set_cc_static(s); +- } else { +- gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), m5, g[es]); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfene(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- static gen_helper_gvec_3 * const g[3] = { +- gen_helper_gvec_vfene8, +- gen_helper_gvec_vfene16, +- gen_helper_gvec_vfene32, +- }; +- static gen_helper_gvec_3_ptr * const g_cc[3] = { +- gen_helper_gvec_vfene_cc8, +- gen_helper_gvec_vfene_cc16, +- gen_helper_gvec_vfene_cc32, +- }; +- +- if (es > ES_32 || m5 & ~0x3) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (extract32(m5, 0, 1)) { +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), cpu_env, m5, g_cc[es]); +- set_cc_static(s); +- } else { +- gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), m5, g[es]); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vistr(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- static gen_helper_gvec_2 * const g[3] = { +- gen_helper_gvec_vistr8, +- gen_helper_gvec_vistr16, +- gen_helper_gvec_vistr32, +- }; +- static gen_helper_gvec_2_ptr * const g_cc[3] = { +- gen_helper_gvec_vistr_cc8, +- gen_helper_gvec_vistr_cc16, +- gen_helper_gvec_vistr_cc32, +- }; +- +- if (es > ES_32 || m5 & ~0x1) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (extract32(m5, 0, 1)) { +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), +- cpu_env, 0, g_cc[es]); +- set_cc_static(s); +- } else { +- gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0, +- g[es]); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t es = get_field(s, m5); +- const uint8_t m6 = get_field(s, m6); +- static gen_helper_gvec_4 * const g[3] = { +- gen_helper_gvec_vstrc8, +- gen_helper_gvec_vstrc16, +- gen_helper_gvec_vstrc32, +- }; +- static gen_helper_gvec_4 * const g_rt[3] = { +- gen_helper_gvec_vstrc_rt8, +- gen_helper_gvec_vstrc_rt16, +- gen_helper_gvec_vstrc_rt32, +- }; +- static gen_helper_gvec_4_ptr * const g_cc[3] = { +- gen_helper_gvec_vstrc_cc8, +- gen_helper_gvec_vstrc_cc16, +- gen_helper_gvec_vstrc_cc32, +- }; +- static gen_helper_gvec_4_ptr * const g_cc_rt[3] = { +- gen_helper_gvec_vstrc_cc_rt8, +- gen_helper_gvec_vstrc_cc_rt16, +- gen_helper_gvec_vstrc_cc_rt32, +- }; +- +- if (es > ES_32) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- if (extract32(m6, 0, 1)) { +- if (extract32(m6, 2, 1)) { +- gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), +- cpu_env, m6, g_cc_rt[es]); +- } else { +- gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), +- cpu_env, m6, g_cc[es]); +- } +- set_cc_static(s); +- } else { +- if (extract32(m6, 2, 1)) { +- gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), +- m6, g_rt[es]); +- } else { +- gen_gvec_4_ool(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), +- m6, g[es]); +- } +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfa(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- gen_helper_gvec_3_ptr *fn = NULL; +- +- switch (s->fields.op2) { +- case 0xe3: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfa32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfa64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfa128; +- } +- break; +- default: +- break; +- } +- break; +- case 0xe5: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfd32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfd64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfd128; +- } +- break; +- default: +- break; +- } +- break; +- case 0xe7: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfm32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfm64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfm128; +- } +- break; +- default: +- break; +- } +- break; +- case 0xe2: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfs32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfs64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfs128; +- } +- break; +- default: +- break; +- } +- break; +- default: +- g_assert_not_reached(); +- } +- +- if (!fn || extract32(m5, 0, 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), cpu_env, m5, fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_wfc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m3); +- const uint8_t m4 = get_field(s, m4); +- gen_helper_gvec_2_ptr *fn = NULL; +- +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_wfk32; +- if (s->fields.op2 == 0xcb) { +- fn = gen_helper_gvec_wfc32; +- } +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_wfk64; +- if (s->fields.op2 == 0xcb) { +- fn = gen_helper_gvec_wfc64; +- } +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_wfk128; +- if (s->fields.op2 == 0xcb) { +- fn = gen_helper_gvec_wfc128; +- } +- } +- break; +- default: +- break; +- }; +- +- if (!fn || m4) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn); +- set_cc_static(s); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfc(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- const uint8_t m6 = get_field(s, m6); +- const bool cs = extract32(m6, 0, 1); +- const bool sq = extract32(m5, 2, 1); +- gen_helper_gvec_3_ptr *fn = NULL; +- +- switch (s->fields.op2) { +- case 0xe8: +- switch (fpf) { +- case FPF_SHORT: +- fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32; +- break; +- case FPF_LONG: +- fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64; +- break; +- case FPF_EXT: +- fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128; +- break; +- default: +- break; +- } +- break; +- case 0xeb: +- switch (fpf) { +- case FPF_SHORT: +- fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32; +- break; +- case FPF_LONG: +- fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64; +- break; +- case FPF_EXT: +- fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128; +- break; +- default: +- break; +- } +- break; +- case 0xea: +- switch (fpf) { +- case FPF_SHORT: +- fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32; +- break; +- case FPF_LONG: +- fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64; +- break; +- case FPF_EXT: +- fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128; +- break; +- default: +- break; +- } +- break; +- default: +- g_assert_not_reached(); +- } +- +- if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) || +- (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), +- cpu_env, m5, fn); +- if (cs) { +- set_cc_static(s); +- } +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m3); +- const uint8_t m4 = get_field(s, m4); +- const uint8_t erm = get_field(s, m5); +- gen_helper_gvec_2_ptr *fn = NULL; +- +- +- switch (s->fields.op2) { +- case 0xc3: +- if (fpf == FPF_LONG) { +- fn = gen_helper_gvec_vcdg64; +- } +- break; +- case 0xc1: +- if (fpf == FPF_LONG) { +- fn = gen_helper_gvec_vcdlg64; +- } +- break; +- case 0xc2: +- if (fpf == FPF_LONG) { +- fn = gen_helper_gvec_vcgd64; +- } +- break; +- case 0xc0: +- if (fpf == FPF_LONG) { +- fn = gen_helper_gvec_vclgd64; +- } +- break; +- case 0xc7: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfi32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfi64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfi128; +- } +- break; +- default: +- break; +- } +- break; +- case 0xc5: +- switch (fpf) { +- case FPF_LONG: +- fn = gen_helper_gvec_vflr64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vflr128; +- } +- break; +- default: +- break; +- } +- break; +- default: +- g_assert_not_reached(); +- } +- +- if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, +- deposit32(m4, 4, 4, erm), fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfll(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m3); +- const uint8_t m4 = get_field(s, m4); +- gen_helper_gvec_2_ptr *fn = NULL; +- +- switch (fpf) { +- case FPF_SHORT: +- fn = gen_helper_gvec_vfll32; +- break; +- case FPF_LONG: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfll64; +- } +- break; +- default: +- break; +- } +- +- if (!fn || extract32(m4, 0, 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfmax(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m4); +- const uint8_t m6 = get_field(s, m6); +- const uint8_t m5 = get_field(s, m5); +- gen_helper_gvec_3_ptr *fn; +- +- if (m6 == 5 || m6 == 6 || m6 == 7 || m6 > 13) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (fpf) { +- case FPF_SHORT: +- if (s->fields.op2 == 0xef) { +- fn = gen_helper_gvec_vfmax32; +- } else { +- fn = gen_helper_gvec_vfmin32; +- } +- break; +- case FPF_LONG: +- if (s->fields.op2 == 0xef) { +- fn = gen_helper_gvec_vfmax64; +- } else { +- fn = gen_helper_gvec_vfmin64; +- } +- break; +- case FPF_EXT: +- if (s->fields.op2 == 0xef) { +- fn = gen_helper_gvec_vfmax128; +- } else { +- fn = gen_helper_gvec_vfmin128; +- } +- break; +- default: +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3), +- cpu_env, deposit32(m5, 4, 4, m6), fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfma(DisasContext *s, DisasOps *o) +-{ +- const uint8_t m5 = get_field(s, m5); +- const uint8_t fpf = get_field(s, m6); +- gen_helper_gvec_4_ptr *fn = NULL; +- +- switch (s->fields.op2) { +- case 0x8f: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfma32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfma64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfma128; +- } +- break; +- default: +- break; +- } +- break; +- case 0x8e: +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfms32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfms64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfms128; +- } +- break; +- default: +- break; +- } +- break; +- case 0x9f: +- switch (fpf) { +- case FPF_SHORT: +- fn = gen_helper_gvec_vfnma32; +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfnma64; +- break; +- case FPF_EXT: +- fn = gen_helper_gvec_vfnma128; +- break; +- default: +- break; +- } +- break; +- case 0x9e: +- switch (fpf) { +- case FPF_SHORT: +- fn = gen_helper_gvec_vfnms32; +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfnms64; +- break; +- case FPF_EXT: +- fn = gen_helper_gvec_vfnms128; +- break; +- default: +- break; +- } +- break; +- default: +- g_assert_not_reached(); +- } +- +- if (!fn || extract32(m5, 0, 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2), +- get_field(s, v3), get_field(s, v4), cpu_env, m5, fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o) +-{ +- const uint8_t v1 = get_field(s, v1); +- const uint8_t v2 = get_field(s, v2); +- const uint8_t fpf = get_field(s, m3); +- const uint8_t m4 = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- const bool se = extract32(m4, 3, 1); +- TCGv_i64 tmp; +- +- if ((fpf != FPF_LONG && !s390_has_feat(S390_FEAT_VECTOR_ENH)) || +- extract32(m4, 0, 3) || m5 > 2) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- switch (fpf) { +- case FPF_SHORT: +- if (!se) { +- switch (m5) { +- case 0: +- /* sign bit is inverted (complement) */ +- gen_gvec_fn_2i(xori, ES_32, v1, v2, 1ull << 31); +- break; +- case 1: +- /* sign bit is set to one (negative) */ +- gen_gvec_fn_2i(ori, ES_32, v1, v2, 1ull << 31); +- break; +- case 2: +- /* sign bit is set to zero (positive) */ +- gen_gvec_fn_2i(andi, ES_32, v1, v2, (1ull << 31) - 1); +- break; +- } +- return DISAS_NEXT; +- } +- break; +- case FPF_LONG: +- if (!se) { +- switch (m5) { +- case 0: +- /* sign bit is inverted (complement) */ +- gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63); +- break; +- case 1: +- /* sign bit is set to one (negative) */ +- gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63); +- break; +- case 2: +- /* sign bit is set to zero (positive) */ +- gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1); +- break; +- } +- return DISAS_NEXT; +- } +- break; +- case FPF_EXT: +- /* Only a single element. */ +- break; +- default: +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- /* With a single element, we are only interested in bit 0. */ +- tmp = tcg_temp_new_i64(); +- read_vec_element_i64(tmp, v2, 0, ES_64); +- switch (m5) { +- case 0: +- /* sign bit is inverted (complement) */ +- tcg_gen_xori_i64(tmp, tmp, 1ull << 63); +- break; +- case 1: +- /* sign bit is set to one (negative) */ +- tcg_gen_ori_i64(tmp, tmp, 1ull << 63); +- break; +- case 2: +- /* sign bit is set to zero (positive) */ +- tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1); +- break; +- } +- write_vec_element_i64(tmp, v1, 0, ES_64); +- +- if (fpf == FPF_EXT) { +- read_vec_element_i64(tmp, v2, 1, ES_64); +- write_vec_element_i64(tmp, v1, 1, ES_64); +- } +- +- tcg_temp_free_i64(tmp); +- +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o) +-{ +- const uint8_t fpf = get_field(s, m3); +- const uint8_t m4 = get_field(s, m4); +- gen_helper_gvec_2_ptr *fn = NULL; +- +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfsq32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vfsq64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vfsq128; +- } +- break; +- default: +- break; +- } +- +- if (!fn || extract32(m4, 0, 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn); +- return DISAS_NEXT; +-} +- +-static DisasJumpType op_vftci(DisasContext *s, DisasOps *o) +-{ +- const uint16_t i3 = get_field(s, i3); +- const uint8_t fpf = get_field(s, m4); +- const uint8_t m5 = get_field(s, m5); +- gen_helper_gvec_2_ptr *fn = NULL; +- +- switch (fpf) { +- case FPF_SHORT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vftci32; +- } +- break; +- case FPF_LONG: +- fn = gen_helper_gvec_vftci64; +- break; +- case FPF_EXT: +- if (s390_has_feat(S390_FEAT_VECTOR_ENH)) { +- fn = gen_helper_gvec_vftci128; +- } +- break; +- default: +- break; +- } +- +- if (!fn || extract32(m5, 0, 3)) { +- gen_program_exception(s, PGM_SPECIFICATION); +- return DISAS_NORETURN; +- } +- +- gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, +- deposit32(m5, 4, 12, i3), fn); +- set_cc_static(s); +- return DISAS_NEXT; +-} +diff --git a/target/s390x/vec.h b/target/s390x/vec.h +deleted file mode 100644 +index a6e361869b..0000000000 +--- a/target/s390x/vec.h ++++ /dev/null +@@ -1,141 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector utilitites +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +-#ifndef S390X_VEC_H +-#define S390X_VEC_H +- +-#include "tcg/tcg.h" +- +-typedef union S390Vector { +- uint64_t doubleword[2]; +- uint32_t word[4]; +- uint16_t halfword[8]; +- uint8_t byte[16]; +-} S390Vector; +- +-/* +- * Each vector is stored as two 64bit host values. So when talking about +- * byte/halfword/word numbers, we have to take care of proper translation +- * between element numbers. +- * +- * Big Endian (target/possible host) +- * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] +- * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] +- * W: [ 0][ 1] - [ 2][ 3] +- * DW: [ 0] - [ 1] +- * +- * Little Endian (possible host) +- * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] +- * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] +- * W: [ 1][ 0] - [ 3][ 2] +- * DW: [ 0] - [ 1] +- */ +-#ifndef HOST_WORDS_BIGENDIAN +-#define H1(x) ((x) ^ 7) +-#define H2(x) ((x) ^ 3) +-#define H4(x) ((x) ^ 1) +-#else +-#define H1(x) (x) +-#define H2(x) (x) +-#define H4(x) (x) +-#endif +- +-static inline uint8_t s390_vec_read_element8(const S390Vector *v, uint8_t enr) +-{ +- g_assert(enr < 16); +- return v->byte[H1(enr)]; +-} +- +-static inline uint16_t s390_vec_read_element16(const S390Vector *v, uint8_t enr) +-{ +- g_assert(enr < 8); +- return v->halfword[H2(enr)]; +-} +- +-static inline uint32_t s390_vec_read_element32(const S390Vector *v, uint8_t enr) +-{ +- g_assert(enr < 4); +- return v->word[H4(enr)]; +-} +- +-static inline uint64_t s390_vec_read_element64(const S390Vector *v, uint8_t enr) +-{ +- g_assert(enr < 2); +- return v->doubleword[enr]; +-} +- +-static inline uint64_t s390_vec_read_element(const S390Vector *v, uint8_t enr, +- uint8_t es) +-{ +- switch (es) { +- case MO_8: +- return s390_vec_read_element8(v, enr); +- case MO_16: +- return s390_vec_read_element16(v, enr); +- case MO_32: +- return s390_vec_read_element32(v, enr); +- case MO_64: +- return s390_vec_read_element64(v, enr); +- default: +- g_assert_not_reached(); +- } +-} +- +-static inline void s390_vec_write_element8(S390Vector *v, uint8_t enr, +- uint8_t data) +-{ +- g_assert(enr < 16); +- v->byte[H1(enr)] = data; +-} +- +-static inline void s390_vec_write_element16(S390Vector *v, uint8_t enr, +- uint16_t data) +-{ +- g_assert(enr < 8); +- v->halfword[H2(enr)] = data; +-} +- +-static inline void s390_vec_write_element32(S390Vector *v, uint8_t enr, +- uint32_t data) +-{ +- g_assert(enr < 4); +- v->word[H4(enr)] = data; +-} +- +-static inline void s390_vec_write_element64(S390Vector *v, uint8_t enr, +- uint64_t data) +-{ +- g_assert(enr < 2); +- v->doubleword[enr] = data; +-} +- +-static inline void s390_vec_write_element(S390Vector *v, uint8_t enr, +- uint8_t es, uint64_t data) +-{ +- switch (es) { +- case MO_8: +- s390_vec_write_element8(v, enr, data); +- break; +- case MO_16: +- s390_vec_write_element16(v, enr, data); +- break; +- case MO_32: +- s390_vec_write_element32(v, enr, data); +- break; +- case MO_64: +- s390_vec_write_element64(v, enr, data); +- break; +- default: +- g_assert_not_reached(); +- } +-} +- +-#endif /* S390X_VEC_H */ +diff --git a/target/s390x/vec_fpu_helper.c b/target/s390x/vec_fpu_helper.c +deleted file mode 100644 +index 8e2b274547..0000000000 +--- a/target/s390x/vec_fpu_helper.c ++++ /dev/null +@@ -1,1072 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector floating point instruction support +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "cpu.h" +-#include "internal.h" +-#include "vec.h" +-#include "tcg_s390x.h" +-#include "tcg/tcg-gvec-desc.h" +-#include "exec/exec-all.h" +-#include "exec/helper-proto.h" +-#include "fpu/softfloat.h" +- +-#define VIC_INVALID 0x1 +-#define VIC_DIVBYZERO 0x2 +-#define VIC_OVERFLOW 0x3 +-#define VIC_UNDERFLOW 0x4 +-#define VIC_INEXACT 0x5 +- +-/* returns the VEX. If the VEX is 0, there is no trap */ +-static uint8_t check_ieee_exc(CPUS390XState *env, uint8_t enr, bool XxC, +- uint8_t *vec_exc) +-{ +- uint8_t vece_exc = 0, trap_exc; +- unsigned qemu_exc; +- +- /* Retrieve and clear the softfloat exceptions */ +- qemu_exc = env->fpu_status.float_exception_flags; +- if (qemu_exc == 0) { +- return 0; +- } +- env->fpu_status.float_exception_flags = 0; +- +- vece_exc = s390_softfloat_exc_to_ieee(qemu_exc); +- +- /* Add them to the vector-wide s390x exception bits */ +- *vec_exc |= vece_exc; +- +- /* Check for traps and construct the VXC */ +- trap_exc = vece_exc & env->fpc >> 24; +- if (trap_exc) { +- if (trap_exc & S390_IEEE_MASK_INVALID) { +- return enr << 4 | VIC_INVALID; +- } else if (trap_exc & S390_IEEE_MASK_DIVBYZERO) { +- return enr << 4 | VIC_DIVBYZERO; +- } else if (trap_exc & S390_IEEE_MASK_OVERFLOW) { +- return enr << 4 | VIC_OVERFLOW; +- } else if (trap_exc & S390_IEEE_MASK_UNDERFLOW) { +- return enr << 4 | VIC_UNDERFLOW; +- } else if (!XxC) { +- g_assert(trap_exc & S390_IEEE_MASK_INEXACT); +- /* inexact has lowest priority on traps */ +- return enr << 4 | VIC_INEXACT; +- } +- } +- return 0; +-} +- +-static void handle_ieee_exc(CPUS390XState *env, uint8_t vxc, uint8_t vec_exc, +- uintptr_t retaddr) +-{ +- if (vxc) { +- /* on traps, the fpc flags are not updated, instruction is suppressed */ +- tcg_s390_vector_exception(env, vxc, retaddr); +- } +- if (vec_exc) { +- /* indicate exceptions for all elements combined */ +- env->fpc |= vec_exc << 16; +- } +-} +- +-static float32 s390_vec_read_float32(const S390Vector *v, uint8_t enr) +-{ +- return make_float32(s390_vec_read_element32(v, enr)); +-} +- +-static float64 s390_vec_read_float64(const S390Vector *v, uint8_t enr) +-{ +- return make_float64(s390_vec_read_element64(v, enr)); +-} +- +-static float128 s390_vec_read_float128(const S390Vector *v) +-{ +- return make_float128(s390_vec_read_element64(v, 0), +- s390_vec_read_element64(v, 1)); +-} +- +-static void s390_vec_write_float32(S390Vector *v, uint8_t enr, float32 data) +-{ +- return s390_vec_write_element32(v, enr, data); +-} +- +-static void s390_vec_write_float64(S390Vector *v, uint8_t enr, float64 data) +-{ +- return s390_vec_write_element64(v, enr, data); +-} +- +-static void s390_vec_write_float128(S390Vector *v, float128 data) +-{ +- s390_vec_write_element64(v, 0, data.high); +- s390_vec_write_element64(v, 1, data.low); +-} +- +-typedef float32 (*vop32_2_fn)(float32 a, float_status *s); +-static void vop32_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, +- bool s, bool XxC, uint8_t erm, vop32_2_fn fn, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i, old_mode; +- +- old_mode = s390_swap_bfp_rounding_mode(env, erm); +- for (i = 0; i < 4; i++) { +- const float32 a = s390_vec_read_float32(v2, i); +- +- s390_vec_write_float32(&tmp, i, fn(a, &env->fpu_status)); +- vxc = check_ieee_exc(env, i, XxC, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-typedef float64 (*vop64_2_fn)(float64 a, float_status *s); +-static void vop64_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, +- bool s, bool XxC, uint8_t erm, vop64_2_fn fn, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i, old_mode; +- +- old_mode = s390_swap_bfp_rounding_mode(env, erm); +- for (i = 0; i < 2; i++) { +- const float64 a = s390_vec_read_float64(v2, i); +- +- s390_vec_write_float64(&tmp, i, fn(a, &env->fpu_status)); +- vxc = check_ieee_exc(env, i, XxC, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-typedef float128 (*vop128_2_fn)(float128 a, float_status *s); +-static void vop128_2(S390Vector *v1, const S390Vector *v2, CPUS390XState *env, +- bool s, bool XxC, uint8_t erm, vop128_2_fn fn, +- uintptr_t retaddr) +-{ +- const float128 a = s390_vec_read_float128(v2); +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int old_mode; +- +- old_mode = s390_swap_bfp_rounding_mode(env, erm); +- s390_vec_write_float128(&tmp, fn(a, &env->fpu_status)); +- vxc = check_ieee_exc(env, 0, XxC, &vec_exc); +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-static float64 vcdg64(float64 a, float_status *s) +-{ +- return int64_to_float64(a, s); +-} +- +-static float64 vcdlg64(float64 a, float_status *s) +-{ +- return uint64_to_float64(a, s); +-} +- +-static float64 vcgd64(float64 a, float_status *s) +-{ +- const float64 tmp = float64_to_int64(a, s); +- +- return float64_is_any_nan(a) ? INT64_MIN : tmp; +-} +- +-static float64 vclgd64(float64 a, float_status *s) +-{ +- const float64 tmp = float64_to_uint64(a, s); +- +- return float64_is_any_nan(a) ? 0 : tmp; +-} +- +-#define DEF_GVEC_VOP2_FN(NAME, FN, BITS) \ +-void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, CPUS390XState *env, \ +- uint32_t desc) \ +-{ \ +- const uint8_t erm = extract32(simd_data(desc), 4, 4); \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- const bool XxC = extract32(simd_data(desc), 2, 1); \ +- \ +- vop##BITS##_2(v1, v2, env, se, XxC, erm, FN, GETPC()); \ +-} +- +-#define DEF_GVEC_VOP2_64(NAME) \ +-DEF_GVEC_VOP2_FN(NAME, NAME##64, 64) +- +-#define DEF_GVEC_VOP2(NAME, OP) \ +-DEF_GVEC_VOP2_FN(NAME, float32_##OP, 32) \ +-DEF_GVEC_VOP2_FN(NAME, float64_##OP, 64) \ +-DEF_GVEC_VOP2_FN(NAME, float128_##OP, 128) +- +-DEF_GVEC_VOP2_64(vcdg) +-DEF_GVEC_VOP2_64(vcdlg) +-DEF_GVEC_VOP2_64(vcgd) +-DEF_GVEC_VOP2_64(vclgd) +-DEF_GVEC_VOP2(vfi, round_to_int) +-DEF_GVEC_VOP2(vfsq, sqrt) +- +-typedef float32 (*vop32_3_fn)(float32 a, float32 b, float_status *s); +-static void vop32_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vop32_3_fn fn, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 4; i++) { +- const float32 a = s390_vec_read_float32(v2, i); +- const float32 b = s390_vec_read_float32(v3, i); +- +- s390_vec_write_float32(&tmp, i, fn(a, b, &env->fpu_status)); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-typedef float64 (*vop64_3_fn)(float64 a, float64 b, float_status *s); +-static void vop64_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vop64_3_fn fn, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 2; i++) { +- const float64 a = s390_vec_read_float64(v2, i); +- const float64 b = s390_vec_read_float64(v3, i); +- +- s390_vec_write_float64(&tmp, i, fn(a, b, &env->fpu_status)); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-typedef float128 (*vop128_3_fn)(float128 a, float128 b, float_status *s); +-static void vop128_3(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vop128_3_fn fn, +- uintptr_t retaddr) +-{ +- const float128 a = s390_vec_read_float128(v2); +- const float128 b = s390_vec_read_float128(v3); +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- +- s390_vec_write_float128(&tmp, fn(a, b, &env->fpu_status)); +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-#define DEF_GVEC_VOP3_B(NAME, OP, BITS) \ +-void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- \ +- vop##BITS##_3(v1, v2, v3, env, se, float##BITS##_##OP, GETPC()); \ +-} +- +-#define DEF_GVEC_VOP3(NAME, OP) \ +-DEF_GVEC_VOP3_B(NAME, OP, 32) \ +-DEF_GVEC_VOP3_B(NAME, OP, 64) \ +-DEF_GVEC_VOP3_B(NAME, OP, 128) +- +-DEF_GVEC_VOP3(vfa, add) +-DEF_GVEC_VOP3(vfs, sub) +-DEF_GVEC_VOP3(vfd, div) +-DEF_GVEC_VOP3(vfm, mul) +- +-static int wfc32(const S390Vector *v1, const S390Vector *v2, +- CPUS390XState *env, bool signal, uintptr_t retaddr) +-{ +- /* only the zero-indexed elements are compared */ +- const float32 a = s390_vec_read_float32(v1, 0); +- const float32 b = s390_vec_read_float32(v2, 0); +- uint8_t vxc, vec_exc = 0; +- int cmp; +- +- if (signal) { +- cmp = float32_compare(a, b, &env->fpu_status); +- } else { +- cmp = float32_compare_quiet(a, b, &env->fpu_status); +- } +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- +- return float_comp_to_cc(env, cmp); +-} +- +-static int wfc64(const S390Vector *v1, const S390Vector *v2, +- CPUS390XState *env, bool signal, uintptr_t retaddr) +-{ +- /* only the zero-indexed elements are compared */ +- const float64 a = s390_vec_read_float64(v1, 0); +- const float64 b = s390_vec_read_float64(v2, 0); +- uint8_t vxc, vec_exc = 0; +- int cmp; +- +- if (signal) { +- cmp = float64_compare(a, b, &env->fpu_status); +- } else { +- cmp = float64_compare_quiet(a, b, &env->fpu_status); +- } +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- +- return float_comp_to_cc(env, cmp); +-} +- +-static int wfc128(const S390Vector *v1, const S390Vector *v2, +- CPUS390XState *env, bool signal, uintptr_t retaddr) +-{ +- /* only the zero-indexed elements are compared */ +- const float128 a = s390_vec_read_float128(v1); +- const float128 b = s390_vec_read_float128(v2); +- uint8_t vxc, vec_exc = 0; +- int cmp; +- +- if (signal) { +- cmp = float128_compare(a, b, &env->fpu_status); +- } else { +- cmp = float128_compare_quiet(a, b, &env->fpu_status); +- } +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- +- return float_comp_to_cc(env, cmp); +-} +- +-#define DEF_GVEC_WFC_B(NAME, SIGNAL, BITS) \ +-void HELPER(gvec_##NAME##BITS)(const void *v1, const void *v2, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- env->cc_op = wfc##BITS(v1, v2, env, SIGNAL, GETPC()); \ +-} +- +-#define DEF_GVEC_WFC(NAME, SIGNAL) \ +- DEF_GVEC_WFC_B(NAME, SIGNAL, 32) \ +- DEF_GVEC_WFC_B(NAME, SIGNAL, 64) \ +- DEF_GVEC_WFC_B(NAME, SIGNAL, 128) +- +-DEF_GVEC_WFC(wfc, false) +-DEF_GVEC_WFC(wfk, true) +- +-typedef bool (*vfc32_fn)(float32 a, float32 b, float_status *status); +-static int vfc32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vfc32_fn fn, uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int match = 0; +- int i; +- +- for (i = 0; i < 4; i++) { +- const float32 a = s390_vec_read_float32(v2, i); +- const float32 b = s390_vec_read_float32(v3, i); +- +- /* swap the order of the parameters, so we can use existing functions */ +- if (fn(b, a, &env->fpu_status)) { +- match++; +- s390_vec_write_element32(&tmp, i, -1u); +- } +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +- if (match) { +- return s || match == 4 ? 0 : 1; +- } +- return 3; +-} +- +-typedef bool (*vfc64_fn)(float64 a, float64 b, float_status *status); +-static int vfc64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vfc64_fn fn, uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int match = 0; +- int i; +- +- for (i = 0; i < 2; i++) { +- const float64 a = s390_vec_read_float64(v2, i); +- const float64 b = s390_vec_read_float64(v3, i); +- +- /* swap the order of the parameters, so we can use existing functions */ +- if (fn(b, a, &env->fpu_status)) { +- match++; +- s390_vec_write_element64(&tmp, i, -1ull); +- } +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +- if (match) { +- return s || match == 2 ? 0 : 1; +- } +- return 3; +-} +- +-typedef bool (*vfc128_fn)(float128 a, float128 b, float_status *status); +-static int vfc128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- CPUS390XState *env, bool s, vfc128_fn fn, uintptr_t retaddr) +-{ +- const float128 a = s390_vec_read_float128(v2); +- const float128 b = s390_vec_read_float128(v3); +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- bool match = false; +- +- /* swap the order of the parameters, so we can use existing functions */ +- if (fn(b, a, &env->fpu_status)) { +- match = true; +- s390_vec_write_element64(&tmp, 0, -1ull); +- s390_vec_write_element64(&tmp, 1, -1ull); +- } +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +- return match ? 0 : 3; +-} +- +-#define DEF_GVEC_VFC_B(NAME, OP, BITS) \ +-void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- const bool sq = extract32(simd_data(desc), 2, 1); \ +- vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ +- \ +- vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ +-} \ +- \ +-void HELPER(gvec_##NAME##BITS##_cc)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- const bool sq = extract32(simd_data(desc), 2, 1); \ +- vfc##BITS##_fn fn = sq ? float##BITS##_##OP : float##BITS##_##OP##_quiet; \ +- \ +- env->cc_op = vfc##BITS(v1, v2, v3, env, se, fn, GETPC()); \ +-} +- +-#define DEF_GVEC_VFC(NAME, OP) \ +-DEF_GVEC_VFC_B(NAME, OP, 32) \ +-DEF_GVEC_VFC_B(NAME, OP, 64) \ +-DEF_GVEC_VFC_B(NAME, OP, 128) \ +- +-DEF_GVEC_VFC(vfce, eq) +-DEF_GVEC_VFC(vfch, lt) +-DEF_GVEC_VFC(vfche, le) +- +-void HELPER(gvec_vfll32)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- const bool s = extract32(simd_data(desc), 3, 1); +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 2; i++) { +- /* load from even element */ +- const float32 a = s390_vec_read_element32(v2, i * 2); +- const uint64_t ret = float32_to_float64(a, &env->fpu_status); +- +- s390_vec_write_element64(&tmp, i, ret); +- /* indicate the source element */ +- vxc = check_ieee_exc(env, i * 2, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, GETPC()); +- *(S390Vector *)v1 = tmp; +-} +- +-void HELPER(gvec_vfll64)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- /* load from even element */ +- const float128 ret = float64_to_float128(s390_vec_read_float64(v2, 0), +- &env->fpu_status); +- uint8_t vxc, vec_exc = 0; +- +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, GETPC()); +- s390_vec_write_float128(v1, ret); +-} +- +-void HELPER(gvec_vflr64)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- const uint8_t erm = extract32(simd_data(desc), 4, 4); +- const bool s = extract32(simd_data(desc), 3, 1); +- const bool XxC = extract32(simd_data(desc), 2, 1); +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i, old_mode; +- +- old_mode = s390_swap_bfp_rounding_mode(env, erm); +- for (i = 0; i < 2; i++) { +- float64 a = s390_vec_read_element64(v2, i); +- uint32_t ret = float64_to_float32(a, &env->fpu_status); +- +- /* place at even element */ +- s390_vec_write_element32(&tmp, i * 2, ret); +- /* indicate the source element */ +- vxc = check_ieee_exc(env, i, XxC, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_ieee_exc(env, vxc, vec_exc, GETPC()); +- *(S390Vector *)v1 = tmp; +-} +- +-void HELPER(gvec_vflr128)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- const uint8_t erm = extract32(simd_data(desc), 4, 4); +- const bool XxC = extract32(simd_data(desc), 2, 1); +- uint8_t vxc, vec_exc = 0; +- int old_mode; +- float64 ret; +- +- old_mode = s390_swap_bfp_rounding_mode(env, erm); +- ret = float128_to_float64(s390_vec_read_float128(v2), &env->fpu_status); +- vxc = check_ieee_exc(env, 0, XxC, &vec_exc); +- s390_restore_bfp_rounding_mode(env, old_mode); +- handle_ieee_exc(env, vxc, vec_exc, GETPC()); +- +- /* place at even element, odd element is unpredictable */ +- s390_vec_write_float64(v1, 0, ret); +-} +- +-static void vfma32(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- const S390Vector *v4, CPUS390XState *env, bool s, int flags, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 4; i++) { +- const float32 a = s390_vec_read_float32(v2, i); +- const float32 b = s390_vec_read_float32(v3, i); +- const float32 c = s390_vec_read_float32(v4, i); +- float32 ret = float32_muladd(a, b, c, flags, &env->fpu_status); +- +- s390_vec_write_float32(&tmp, i, ret); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-static void vfma64(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- const S390Vector *v4, CPUS390XState *env, bool s, int flags, +- uintptr_t retaddr) +-{ +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 2; i++) { +- const float64 a = s390_vec_read_float64(v2, i); +- const float64 b = s390_vec_read_float64(v3, i); +- const float64 c = s390_vec_read_float64(v4, i); +- const float64 ret = float64_muladd(a, b, c, flags, &env->fpu_status); +- +- s390_vec_write_float64(&tmp, i, ret); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (s || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-static void vfma128(S390Vector *v1, const S390Vector *v2, const S390Vector *v3, +- const S390Vector *v4, CPUS390XState *env, bool s, int flags, +- uintptr_t retaddr) +-{ +- const float128 a = s390_vec_read_float128(v2); +- const float128 b = s390_vec_read_float128(v3); +- const float128 c = s390_vec_read_float128(v4); +- uint8_t vxc, vec_exc = 0; +- float128 ret; +- +- ret = float128_muladd(a, b, c, flags, &env->fpu_status); +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- s390_vec_write_float128(v1, ret); +-} +- +-#define DEF_GVEC_VFMA_B(NAME, FLAGS, BITS) \ +-void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, CPUS390XState *env, \ +- uint32_t desc) \ +-{ \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- \ +- vfma##BITS(v1, v2, v3, v4, env, se, FLAGS, GETPC()); \ +-} +- +-#define DEF_GVEC_VFMA(NAME, FLAGS) \ +- DEF_GVEC_VFMA_B(NAME, FLAGS, 32) \ +- DEF_GVEC_VFMA_B(NAME, FLAGS, 64) \ +- DEF_GVEC_VFMA_B(NAME, FLAGS, 128) +- +-DEF_GVEC_VFMA(vfma, 0) +-DEF_GVEC_VFMA(vfms, float_muladd_negate_c) +-DEF_GVEC_VFMA(vfnma, float_muladd_negate_result) +-DEF_GVEC_VFMA(vfnms, float_muladd_negate_c | float_muladd_negate_result) +- +-void HELPER(gvec_vftci32)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- uint16_t i3 = extract32(simd_data(desc), 4, 12); +- bool s = extract32(simd_data(desc), 3, 1); +- int i, match = 0; +- +- for (i = 0; i < 4; i++) { +- float32 a = s390_vec_read_float32(v2, i); +- +- if (float32_dcmask(env, a) & i3) { +- match++; +- s390_vec_write_element32(v1, i, -1u); +- } else { +- s390_vec_write_element32(v1, i, 0); +- } +- if (s) { +- break; +- } +- } +- +- if (match == 4 || (s && match)) { +- env->cc_op = 0; +- } else if (match) { +- env->cc_op = 1; +- } else { +- env->cc_op = 3; +- } +-} +- +-void HELPER(gvec_vftci64)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- const uint16_t i3 = extract32(simd_data(desc), 4, 12); +- const bool s = extract32(simd_data(desc), 3, 1); +- int i, match = 0; +- +- for (i = 0; i < 2; i++) { +- const float64 a = s390_vec_read_float64(v2, i); +- +- if (float64_dcmask(env, a) & i3) { +- match++; +- s390_vec_write_element64(v1, i, -1ull); +- } else { +- s390_vec_write_element64(v1, i, 0); +- } +- if (s) { +- break; +- } +- } +- +- if (match == 2 || (s && match)) { +- env->cc_op = 0; +- } else if (match) { +- env->cc_op = 1; +- } else { +- env->cc_op = 3; +- } +-} +- +-void HELPER(gvec_vftci128)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- const float128 a = s390_vec_read_float128(v2); +- uint16_t i3 = extract32(simd_data(desc), 4, 12); +- +- if (float128_dcmask(env, a) & i3) { +- env->cc_op = 0; +- s390_vec_write_element64(v1, 0, -1ull); +- s390_vec_write_element64(v1, 1, -1ull); +- } else { +- env->cc_op = 3; +- s390_vec_write_element64(v1, 0, 0); +- s390_vec_write_element64(v1, 1, 0); +- } +-} +- +-typedef enum S390MinMaxType { +- S390_MINMAX_TYPE_IEEE = 0, +- S390_MINMAX_TYPE_JAVA, +- S390_MINMAX_TYPE_C_MACRO, +- S390_MINMAX_TYPE_CPP, +- S390_MINMAX_TYPE_F, +-} S390MinMaxType; +- +-typedef enum S390MinMaxRes { +- S390_MINMAX_RES_MINMAX = 0, +- S390_MINMAX_RES_A, +- S390_MINMAX_RES_B, +- S390_MINMAX_RES_SILENCE_A, +- S390_MINMAX_RES_SILENCE_B, +-} S390MinMaxRes; +- +-static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, +- S390MinMaxType type, float_status *s) +-{ +- const bool neg_a = dcmask_a & DCMASK_NEGATIVE; +- const bool nan_a = dcmask_a & DCMASK_NAN; +- const bool nan_b = dcmask_b & DCMASK_NAN; +- +- g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); +- +- if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { +- const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; +- const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; +- +- if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { +- s->float_exception_flags |= float_flag_invalid; +- } +- switch (type) { +- case S390_MINMAX_TYPE_JAVA: +- if (sig_a) { +- return S390_MINMAX_RES_SILENCE_A; +- } else if (sig_b) { +- return S390_MINMAX_RES_SILENCE_B; +- } +- return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_F: +- return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_C_MACRO: +- s->float_exception_flags |= float_flag_invalid; +- return S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_CPP: +- s->float_exception_flags |= float_flag_invalid; +- return S390_MINMAX_RES_A; +- default: +- g_assert_not_reached(); +- } +- } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { +- switch (type) { +- case S390_MINMAX_TYPE_JAVA: +- return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_C_MACRO: +- return S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_F: +- return !neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; +- case S390_MINMAX_TYPE_CPP: +- return S390_MINMAX_RES_A; +- default: +- g_assert_not_reached(); +- } +- } +- return S390_MINMAX_RES_MINMAX; +-} +- +-static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, +- S390MinMaxType type, float_status *s) +-{ +- g_assert(type > S390_MINMAX_TYPE_IEEE && type <= S390_MINMAX_TYPE_F); +- +- if (unlikely((dcmask_a | dcmask_b) & DCMASK_NAN)) { +- const bool sig_a = dcmask_a & DCMASK_SIGNALING_NAN; +- const bool sig_b = dcmask_b & DCMASK_SIGNALING_NAN; +- const bool nan_a = dcmask_a & DCMASK_NAN; +- const bool nan_b = dcmask_b & DCMASK_NAN; +- +- if ((dcmask_a | dcmask_b) & DCMASK_SIGNALING_NAN) { +- s->float_exception_flags |= float_flag_invalid; +- } +- switch (type) { +- case S390_MINMAX_TYPE_JAVA: +- if (sig_a) { +- return S390_MINMAX_RES_SILENCE_A; +- } else if (sig_b) { +- return S390_MINMAX_RES_SILENCE_B; +- } +- return nan_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_F: +- return nan_b ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_C_MACRO: +- s->float_exception_flags |= float_flag_invalid; +- return S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_CPP: +- s->float_exception_flags |= float_flag_invalid; +- return S390_MINMAX_RES_A; +- default: +- g_assert_not_reached(); +- } +- } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { +- const bool neg_a = dcmask_a & DCMASK_NEGATIVE; +- +- switch (type) { +- case S390_MINMAX_TYPE_JAVA: +- case S390_MINMAX_TYPE_F: +- return neg_a ? S390_MINMAX_RES_B : S390_MINMAX_RES_A; +- case S390_MINMAX_TYPE_C_MACRO: +- return S390_MINMAX_RES_B; +- case S390_MINMAX_TYPE_CPP: +- return S390_MINMAX_RES_A; +- default: +- g_assert_not_reached(); +- } +- } +- return S390_MINMAX_RES_MINMAX; +-} +- +-static S390MinMaxRes vfminmax_res(uint16_t dcmask_a, uint16_t dcmask_b, +- S390MinMaxType type, bool is_min, +- float_status *s) +-{ +- return is_min ? vfmin_res(dcmask_a, dcmask_b, type, s) : +- vfmax_res(dcmask_a, dcmask_b, type, s); +-} +- +-static void vfminmax32(S390Vector *v1, const S390Vector *v2, +- const S390Vector *v3, CPUS390XState *env, +- S390MinMaxType type, bool is_min, bool is_abs, bool se, +- uintptr_t retaddr) +-{ +- float_status *s = &env->fpu_status; +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 4; i++) { +- float32 a = s390_vec_read_float32(v2, i); +- float32 b = s390_vec_read_float32(v3, i); +- float32 result; +- +- if (type != S390_MINMAX_TYPE_IEEE) { +- S390MinMaxRes res; +- +- if (is_abs) { +- a = float32_abs(a); +- b = float32_abs(b); +- } +- +- res = vfminmax_res(float32_dcmask(env, a), float32_dcmask(env, b), +- type, is_min, s); +- switch (res) { +- case S390_MINMAX_RES_MINMAX: +- result = is_min ? float32_min(a, b, s) : float32_max(a, b, s); +- break; +- case S390_MINMAX_RES_A: +- result = a; +- break; +- case S390_MINMAX_RES_B: +- result = b; +- break; +- case S390_MINMAX_RES_SILENCE_A: +- result = float32_silence_nan(a, s); +- break; +- case S390_MINMAX_RES_SILENCE_B: +- result = float32_silence_nan(b, s); +- break; +- default: +- g_assert_not_reached(); +- } +- } else if (!is_abs) { +- result = is_min ? float32_minnum(a, b, &env->fpu_status) : +- float32_maxnum(a, b, &env->fpu_status); +- } else { +- result = is_min ? float32_minnummag(a, b, &env->fpu_status) : +- float32_maxnummag(a, b, &env->fpu_status); +- } +- +- s390_vec_write_float32(&tmp, i, result); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (se || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-static void vfminmax64(S390Vector *v1, const S390Vector *v2, +- const S390Vector *v3, CPUS390XState *env, +- S390MinMaxType type, bool is_min, bool is_abs, bool se, +- uintptr_t retaddr) +-{ +- float_status *s = &env->fpu_status; +- uint8_t vxc, vec_exc = 0; +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < 2; i++) { +- float64 a = s390_vec_read_float64(v2, i); +- float64 b = s390_vec_read_float64(v3, i); +- float64 result; +- +- if (type != S390_MINMAX_TYPE_IEEE) { +- S390MinMaxRes res; +- +- if (is_abs) { +- a = float64_abs(a); +- b = float64_abs(b); +- } +- +- res = vfminmax_res(float64_dcmask(env, a), float64_dcmask(env, b), +- type, is_min, s); +- switch (res) { +- case S390_MINMAX_RES_MINMAX: +- result = is_min ? float64_min(a, b, s) : float64_max(a, b, s); +- break; +- case S390_MINMAX_RES_A: +- result = a; +- break; +- case S390_MINMAX_RES_B: +- result = b; +- break; +- case S390_MINMAX_RES_SILENCE_A: +- result = float64_silence_nan(a, s); +- break; +- case S390_MINMAX_RES_SILENCE_B: +- result = float64_silence_nan(b, s); +- break; +- default: +- g_assert_not_reached(); +- } +- } else if (!is_abs) { +- result = is_min ? float64_minnum(a, b, &env->fpu_status) : +- float64_maxnum(a, b, &env->fpu_status); +- } else { +- result = is_min ? float64_minnummag(a, b, &env->fpu_status) : +- float64_maxnummag(a, b, &env->fpu_status); +- } +- +- s390_vec_write_float64(&tmp, i, result); +- vxc = check_ieee_exc(env, i, false, &vec_exc); +- if (se || vxc) { +- break; +- } +- } +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- *v1 = tmp; +-} +- +-static void vfminmax128(S390Vector *v1, const S390Vector *v2, +- const S390Vector *v3, CPUS390XState *env, +- S390MinMaxType type, bool is_min, bool is_abs, bool se, +- uintptr_t retaddr) +-{ +- float128 a = s390_vec_read_float128(v2); +- float128 b = s390_vec_read_float128(v3); +- float_status *s = &env->fpu_status; +- uint8_t vxc, vec_exc = 0; +- float128 result; +- +- if (type != S390_MINMAX_TYPE_IEEE) { +- S390MinMaxRes res; +- +- if (is_abs) { +- a = float128_abs(a); +- b = float128_abs(b); +- } +- +- res = vfminmax_res(float128_dcmask(env, a), float128_dcmask(env, b), +- type, is_min, s); +- switch (res) { +- case S390_MINMAX_RES_MINMAX: +- result = is_min ? float128_min(a, b, s) : float128_max(a, b, s); +- break; +- case S390_MINMAX_RES_A: +- result = a; +- break; +- case S390_MINMAX_RES_B: +- result = b; +- break; +- case S390_MINMAX_RES_SILENCE_A: +- result = float128_silence_nan(a, s); +- break; +- case S390_MINMAX_RES_SILENCE_B: +- result = float128_silence_nan(b, s); +- break; +- default: +- g_assert_not_reached(); +- } +- } else if (!is_abs) { +- result = is_min ? float128_minnum(a, b, &env->fpu_status) : +- float128_maxnum(a, b, &env->fpu_status); +- } else { +- result = is_min ? float128_minnummag(a, b, &env->fpu_status) : +- float128_maxnummag(a, b, &env->fpu_status); +- } +- +- vxc = check_ieee_exc(env, 0, false, &vec_exc); +- handle_ieee_exc(env, vxc, vec_exc, retaddr); +- s390_vec_write_float128(v1, result); +-} +- +-#define DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, BITS) \ +-void HELPER(gvec_##NAME##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool se = extract32(simd_data(desc), 3, 1); \ +- uint8_t type = extract32(simd_data(desc), 4, 4); \ +- bool is_abs = false; \ +- \ +- if (type >= 8) { \ +- is_abs = true; \ +- type -= 8; \ +- } \ +- \ +- vfminmax##BITS(v1, v2, v3, env, type, IS_MIN, is_abs, se, GETPC()); \ +-} +- +-#define DEF_GVEC_VFMINMAX(NAME, IS_MIN) \ +- DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 32) \ +- DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 64) \ +- DEF_GVEC_VFMINMAX_B(NAME, IS_MIN, 128) +- +-DEF_GVEC_VFMINMAX(vfmax, false) +-DEF_GVEC_VFMINMAX(vfmin, true) +diff --git a/target/s390x/vec_helper.c b/target/s390x/vec_helper.c +deleted file mode 100644 +index 599bab06bd..0000000000 +--- a/target/s390x/vec_helper.c ++++ /dev/null +@@ -1,214 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector support instructions +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +-#include "qemu/osdep.h" +-#include "cpu.h" +-#include "internal.h" +-#include "vec.h" +-#include "tcg/tcg.h" +-#include "tcg/tcg-gvec-desc.h" +-#include "exec/helper-proto.h" +-#include "exec/cpu_ldst.h" +-#include "exec/exec-all.h" +- +-void HELPER(gvec_vbperm)(void *v1, const void *v2, const void *v3, +- uint32_t desc) +-{ +- S390Vector tmp = {}; +- uint16_t result = 0; +- int i; +- +- for (i = 0; i < 16; i++) { +- const uint8_t bit_nr = s390_vec_read_element8(v3, i); +- uint16_t bit; +- +- if (bit_nr >= 128) { +- continue; +- } +- bit = (s390_vec_read_element8(v2, bit_nr / 8) +- >> (7 - (bit_nr % 8))) & 1; +- result |= (bit << (15 - i)); +- } +- s390_vec_write_element16(&tmp, 3, result); +- *(S390Vector *)v1 = tmp; +-} +- +-void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) +-{ +- if (likely(bytes >= 16)) { +- uint64_t t0, t1; +- +- t0 = cpu_ldq_data_ra(env, addr, GETPC()); +- addr = wrap_address(env, addr + 8); +- t1 = cpu_ldq_data_ra(env, addr, GETPC()); +- s390_vec_write_element64(v1, 0, t0); +- s390_vec_write_element64(v1, 1, t1); +- } else { +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < bytes; i++) { +- uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC()); +- +- s390_vec_write_element8(&tmp, i, byte); +- addr = wrap_address(env, addr + 1); +- } +- *(S390Vector *)v1 = tmp; +- } +-} +- +-#define DEF_VPK_HFN(BITS, TBITS) \ +-typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \ +-static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \ +- const S390Vector *v3, vpk##BITS##_fn fn) \ +-{ \ +- int i, saturated = 0; \ +- S390Vector tmp; \ +- \ +- for (i = 0; i < (128 / TBITS); i++) { \ +- uint##BITS##_t src; \ +- \ +- if (i < (128 / BITS)) { \ +- src = s390_vec_read_element##BITS(v2, i); \ +- } else { \ +- src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \ +- } \ +- s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \ +- } \ +- *v1 = tmp; \ +- return saturated; \ +-} +-DEF_VPK_HFN(64, 32) +-DEF_VPK_HFN(32, 16) +-DEF_VPK_HFN(16, 8) +- +-#define DEF_VPK(BITS, TBITS) \ +-static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \ +-{ \ +- return src; \ +-} \ +-void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \ +-} +-DEF_VPK(64, 32) +-DEF_VPK(32, 16) +-DEF_VPK(16, 8) +- +-#define DEF_VPKS(BITS, TBITS) \ +-static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \ +-{ \ +- if ((int##BITS##_t)src > INT##TBITS##_MAX) { \ +- (*saturated)++; \ +- return INT##TBITS##_MAX; \ +- } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \ +- (*saturated)++; \ +- return INT##TBITS##_MIN; \ +- } \ +- return src; \ +-} \ +-void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ +-} \ +-void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ +- \ +- if (saturated == (128 / TBITS)) { \ +- env->cc_op = 3; \ +- } else if (saturated) { \ +- env->cc_op = 1; \ +- } else { \ +- env->cc_op = 0; \ +- } \ +-} +-DEF_VPKS(64, 32) +-DEF_VPKS(32, 16) +-DEF_VPKS(16, 8) +- +-#define DEF_VPKLS(BITS, TBITS) \ +-static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \ +-{ \ +- if (src > UINT##TBITS##_MAX) { \ +- (*saturated)++; \ +- return UINT##TBITS##_MAX; \ +- } \ +- return src; \ +-} \ +-void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ +-} \ +-void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ +- \ +- if (saturated == (128 / TBITS)) { \ +- env->cc_op = 3; \ +- } else if (saturated) { \ +- env->cc_op = 1; \ +- } else { \ +- env->cc_op = 0; \ +- } \ +-} +-DEF_VPKLS(64, 32) +-DEF_VPKLS(32, 16) +-DEF_VPKLS(16, 8) +- +-void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3, +- const void *v4, uint32_t desc) +-{ +- S390Vector tmp; +- int i; +- +- for (i = 0; i < 16; i++) { +- const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f; +- uint8_t byte; +- +- if (selector < 16) { +- byte = s390_vec_read_element8(v2, selector); +- } else { +- byte = s390_vec_read_element8(v3, selector - 16); +- } +- s390_vec_write_element8(&tmp, i, byte); +- } +- *(S390Vector *)v1 = tmp; +-} +- +-void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, +- uint64_t bytes) +-{ +- /* Probe write access before actually modifying memory */ +- probe_write_access(env, addr, bytes, GETPC()); +- +- if (likely(bytes >= 16)) { +- cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC()); +- addr = wrap_address(env, addr + 8); +- cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); +- } else { +- S390Vector tmp = {}; +- int i; +- +- for (i = 0; i < bytes; i++) { +- uint8_t byte = s390_vec_read_element8(v1, i); +- +- cpu_stb_data_ra(env, addr, byte, GETPC()); +- addr = wrap_address(env, addr + 1); +- } +- *(S390Vector *)v1 = tmp; +- } +-} +diff --git a/target/s390x/vec_int_helper.c b/target/s390x/vec_int_helper.c +deleted file mode 100644 +index 5561b3ed90..0000000000 +--- a/target/s390x/vec_int_helper.c ++++ /dev/null +@@ -1,587 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector integer instruction support +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "cpu.h" +-#include "vec.h" +-#include "exec/helper-proto.h" +-#include "tcg/tcg-gvec-desc.h" +- +-static bool s390_vec_is_zero(const S390Vector *v) +-{ +- return !v->doubleword[0] && !v->doubleword[1]; +-} +- +-static void s390_vec_xor(S390Vector *res, const S390Vector *a, +- const S390Vector *b) +-{ +- res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0]; +- res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1]; +-} +- +-static void s390_vec_and(S390Vector *res, const S390Vector *a, +- const S390Vector *b) +-{ +- res->doubleword[0] = a->doubleword[0] & b->doubleword[0]; +- res->doubleword[1] = a->doubleword[1] & b->doubleword[1]; +-} +- +-static bool s390_vec_equal(const S390Vector *a, const S390Vector *b) +-{ +- return a->doubleword[0] == b->doubleword[0] && +- a->doubleword[1] == b->doubleword[1]; +-} +- +-static void s390_vec_shl(S390Vector *d, const S390Vector *a, uint64_t count) +-{ +- uint64_t tmp; +- +- g_assert(count < 128); +- if (count == 0) { +- d->doubleword[0] = a->doubleword[0]; +- d->doubleword[1] = a->doubleword[1]; +- } else if (count == 64) { +- d->doubleword[0] = a->doubleword[1]; +- d->doubleword[1] = 0; +- } else if (count < 64) { +- tmp = extract64(a->doubleword[1], 64 - count, count); +- d->doubleword[1] = a->doubleword[1] << count; +- d->doubleword[0] = (a->doubleword[0] << count) | tmp; +- } else { +- d->doubleword[0] = a->doubleword[1] << (count - 64); +- d->doubleword[1] = 0; +- } +-} +- +-static void s390_vec_sar(S390Vector *d, const S390Vector *a, uint64_t count) +-{ +- uint64_t tmp; +- +- if (count == 0) { +- d->doubleword[0] = a->doubleword[0]; +- d->doubleword[1] = a->doubleword[1]; +- } else if (count == 64) { +- tmp = (int64_t)a->doubleword[0] >> 63; +- d->doubleword[1] = a->doubleword[0]; +- d->doubleword[0] = tmp; +- } else if (count < 64) { +- tmp = a->doubleword[1] >> count; +- d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); +- d->doubleword[0] = (int64_t)a->doubleword[0] >> count; +- } else { +- tmp = (int64_t)a->doubleword[0] >> 63; +- d->doubleword[1] = (int64_t)a->doubleword[0] >> (count - 64); +- d->doubleword[0] = tmp; +- } +-} +- +-static void s390_vec_shr(S390Vector *d, const S390Vector *a, uint64_t count) +-{ +- uint64_t tmp; +- +- g_assert(count < 128); +- if (count == 0) { +- d->doubleword[0] = a->doubleword[0]; +- d->doubleword[1] = a->doubleword[1]; +- } else if (count == 64) { +- d->doubleword[1] = a->doubleword[0]; +- d->doubleword[0] = 0; +- } else if (count < 64) { +- tmp = a->doubleword[1] >> count; +- d->doubleword[1] = deposit64(tmp, 64 - count, count, a->doubleword[0]); +- d->doubleword[0] = a->doubleword[0] >> count; +- } else { +- d->doubleword[1] = a->doubleword[0] >> (count - 64); +- d->doubleword[0] = 0; +- } +-} +-#define DEF_VAVG(BITS) \ +-void HELPER(gvec_vavg##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ +- const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ +- } \ +-} +-DEF_VAVG(8) +-DEF_VAVG(16) +- +-#define DEF_VAVGL(BITS) \ +-void HELPER(gvec_vavgl##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a + b + 1) >> 1); \ +- } \ +-} +-DEF_VAVGL(8) +-DEF_VAVGL(16) +- +-#define DEF_VCLZ(BITS) \ +-void HELPER(gvec_vclz##BITS)(void *v1, const void *v2, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, clz32(a) - 32 + BITS); \ +- } \ +-} +-DEF_VCLZ(8) +-DEF_VCLZ(16) +- +-#define DEF_VCTZ(BITS) \ +-void HELPER(gvec_vctz##BITS)(void *v1, const void *v2, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, a ? ctz32(a) : BITS); \ +- } \ +-} +-DEF_VCTZ(8) +-DEF_VCTZ(16) +- +-/* like binary multiplication, but XOR instead of addition */ +-#define DEF_GALOIS_MULTIPLY(BITS, TBITS) \ +-static uint##TBITS##_t galois_multiply##BITS(uint##TBITS##_t a, \ +- uint##TBITS##_t b) \ +-{ \ +- uint##TBITS##_t res = 0; \ +- \ +- while (b) { \ +- if (b & 0x1) { \ +- res = res ^ a; \ +- } \ +- a = a << 1; \ +- b = b >> 1; \ +- } \ +- return res; \ +-} +-DEF_GALOIS_MULTIPLY(8, 16) +-DEF_GALOIS_MULTIPLY(16, 32) +-DEF_GALOIS_MULTIPLY(32, 64) +- +-static S390Vector galois_multiply64(uint64_t a, uint64_t b) +-{ +- S390Vector res = {}; +- S390Vector va = { +- .doubleword[1] = a, +- }; +- S390Vector vb = { +- .doubleword[1] = b, +- }; +- +- while (!s390_vec_is_zero(&vb)) { +- if (vb.doubleword[1] & 0x1) { +- s390_vec_xor(&res, &res, &va); +- } +- s390_vec_shl(&va, &va, 1); +- s390_vec_shr(&vb, &vb, 1); +- } +- return res; +-} +- +-#define DEF_VGFM(BITS, TBITS) \ +-void HELPER(gvec_vgfm##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / TBITS); i++) { \ +- uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ +- uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ +- uint##TBITS##_t d = galois_multiply##BITS(a, b); \ +- \ +- a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ +- b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ +- d = d ^ galois_multiply32(a, b); \ +- s390_vec_write_element##TBITS(v1, i, d); \ +- } \ +-} +-DEF_VGFM(8, 16) +-DEF_VGFM(16, 32) +-DEF_VGFM(32, 64) +- +-void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3, +- uint32_t desc) +-{ +- S390Vector tmp1, tmp2; +- uint64_t a, b; +- +- a = s390_vec_read_element64(v2, 0); +- b = s390_vec_read_element64(v3, 0); +- tmp1 = galois_multiply64(a, b); +- a = s390_vec_read_element64(v2, 1); +- b = s390_vec_read_element64(v3, 1); +- tmp2 = galois_multiply64(a, b); +- s390_vec_xor(v1, &tmp1, &tmp2); +-} +- +-#define DEF_VGFMA(BITS, TBITS) \ +-void HELPER(gvec_vgfma##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / TBITS); i++) { \ +- uint##BITS##_t a = s390_vec_read_element##BITS(v2, i * 2); \ +- uint##BITS##_t b = s390_vec_read_element##BITS(v3, i * 2); \ +- uint##TBITS##_t d = galois_multiply##BITS(a, b); \ +- \ +- a = s390_vec_read_element##BITS(v2, i * 2 + 1); \ +- b = s390_vec_read_element##BITS(v3, i * 2 + 1); \ +- d = d ^ galois_multiply32(a, b); \ +- d = d ^ s390_vec_read_element##TBITS(v4, i); \ +- s390_vec_write_element##TBITS(v1, i, d); \ +- } \ +-} +-DEF_VGFMA(8, 16) +-DEF_VGFMA(16, 32) +-DEF_VGFMA(32, 64) +- +-void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3, +- const void *v4, uint32_t desc) +-{ +- S390Vector tmp1, tmp2; +- uint64_t a, b; +- +- a = s390_vec_read_element64(v2, 0); +- b = s390_vec_read_element64(v3, 0); +- tmp1 = galois_multiply64(a, b); +- a = s390_vec_read_element64(v2, 1); +- b = s390_vec_read_element64(v3, 1); +- tmp2 = galois_multiply64(a, b); +- s390_vec_xor(&tmp1, &tmp1, &tmp2); +- s390_vec_xor(v1, &tmp1, v4); +-} +- +-#define DEF_VMAL(BITS) \ +-void HELPER(gvec_vmal##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ +- const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, a * b + c); \ +- } \ +-} +-DEF_VMAL(8) +-DEF_VMAL(16) +- +-#define DEF_VMAH(BITS) \ +-void HELPER(gvec_vmah##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ +- const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ +- const int32_t c = (int##BITS##_t)s390_vec_read_element##BITS(v4, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ +- } \ +-} +-DEF_VMAH(8) +-DEF_VMAH(16) +- +-#define DEF_VMALH(BITS) \ +-void HELPER(gvec_vmalh##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ +- const uint##BITS##_t c = s390_vec_read_element##BITS(v4, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a * b + c) >> BITS); \ +- } \ +-} +-DEF_VMALH(8) +-DEF_VMALH(16) +- +-#define DEF_VMAE(BITS, TBITS) \ +-void HELPER(gvec_vmae##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ +- int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ +- int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ +- int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b + c); \ +- } \ +-} +-DEF_VMAE(8, 16) +-DEF_VMAE(16, 32) +-DEF_VMAE(32, 64) +- +-#define DEF_VMALE(BITS, TBITS) \ +-void HELPER(gvec_vmale##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ +- uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ +- uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ +- uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b + c); \ +- } \ +-} +-DEF_VMALE(8, 16) +-DEF_VMALE(16, 32) +-DEF_VMALE(32, 64) +- +-#define DEF_VMAO(BITS, TBITS) \ +-void HELPER(gvec_vmao##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ +- int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ +- int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ +- int##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b + c); \ +- } \ +-} +-DEF_VMAO(8, 16) +-DEF_VMAO(16, 32) +-DEF_VMAO(32, 64) +- +-#define DEF_VMALO(BITS, TBITS) \ +-void HELPER(gvec_vmalo##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ +- uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ +- uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ +- uint##TBITS##_t c = s390_vec_read_element##TBITS(v4, i); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b + c); \ +- } \ +-} +-DEF_VMALO(8, 16) +-DEF_VMALO(16, 32) +-DEF_VMALO(32, 64) +- +-#define DEF_VMH(BITS) \ +-void HELPER(gvec_vmh##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const int32_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, i); \ +- const int32_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ +- } \ +-} +-DEF_VMH(8) +-DEF_VMH(16) +- +-#define DEF_VMLH(BITS) \ +-void HELPER(gvec_vmlh##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, (a * b) >> BITS); \ +- } \ +-} +-DEF_VMLH(8) +-DEF_VMLH(16) +- +-#define DEF_VME(BITS, TBITS) \ +-void HELPER(gvec_vme##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ +- int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ +- int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b); \ +- } \ +-} +-DEF_VME(8, 16) +-DEF_VME(16, 32) +-DEF_VME(32, 64) +- +-#define DEF_VMLE(BITS, TBITS) \ +-void HELPER(gvec_vmle##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 0; i < (128 / TBITS); i++, j += 2) { \ +- const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ +- const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b); \ +- } \ +-} +-DEF_VMLE(8, 16) +-DEF_VMLE(16, 32) +-DEF_VMLE(32, 64) +- +-#define DEF_VMO(BITS, TBITS) \ +-void HELPER(gvec_vmo##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ +- int##TBITS##_t a = (int##BITS##_t)s390_vec_read_element##BITS(v2, j); \ +- int##TBITS##_t b = (int##BITS##_t)s390_vec_read_element##BITS(v3, j); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b); \ +- } \ +-} +-DEF_VMO(8, 16) +-DEF_VMO(16, 32) +-DEF_VMO(32, 64) +- +-#define DEF_VMLO(BITS, TBITS) \ +-void HELPER(gvec_vmlo##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i, j; \ +- \ +- for (i = 0, j = 1; i < (128 / TBITS); i++, j += 2) { \ +- const uint##TBITS##_t a = s390_vec_read_element##BITS(v2, j); \ +- const uint##TBITS##_t b = s390_vec_read_element##BITS(v3, j); \ +- \ +- s390_vec_write_element##TBITS(v1, i, a * b); \ +- } \ +-} +-DEF_VMLO(8, 16) +-DEF_VMLO(16, 32) +-DEF_VMLO(32, 64) +- +-#define DEF_VPOPCT(BITS) \ +-void HELPER(gvec_vpopct##BITS)(void *v1, const void *v2, uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, ctpop32(a)); \ +- } \ +-} +-DEF_VPOPCT(8) +-DEF_VPOPCT(16) +- +-#define DEF_VERIM(BITS) \ +-void HELPER(gvec_verim##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- const uint8_t count = simd_data(desc); \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v1, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t mask = s390_vec_read_element##BITS(v3, i); \ +- const uint##BITS##_t d = (a & ~mask) | (rol##BITS(b, count) & mask); \ +- \ +- s390_vec_write_element##BITS(v1, i, d); \ +- } \ +-} +-DEF_VERIM(8) +-DEF_VERIM(16) +- +-void HELPER(gvec_vsl)(void *v1, const void *v2, uint64_t count, +- uint32_t desc) +-{ +- s390_vec_shl(v1, v2, count); +-} +- +-void HELPER(gvec_vsra)(void *v1, const void *v2, uint64_t count, +- uint32_t desc) +-{ +- s390_vec_sar(v1, v2, count); +-} +- +-void HELPER(gvec_vsrl)(void *v1, const void *v2, uint64_t count, +- uint32_t desc) +-{ +- s390_vec_shr(v1, v2, count); +-} +- +-#define DEF_VSCBI(BITS) \ +-void HELPER(gvec_vscbi##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- int i; \ +- \ +- for (i = 0; i < (128 / BITS); i++) { \ +- const uint##BITS##_t a = s390_vec_read_element##BITS(v2, i); \ +- const uint##BITS##_t b = s390_vec_read_element##BITS(v3, i); \ +- \ +- s390_vec_write_element##BITS(v1, i, a >= b); \ +- } \ +-} +-DEF_VSCBI(8) +-DEF_VSCBI(16) +- +-void HELPER(gvec_vtm)(void *v1, const void *v2, CPUS390XState *env, +- uint32_t desc) +-{ +- S390Vector tmp; +- +- s390_vec_and(&tmp, v1, v2); +- if (s390_vec_is_zero(&tmp)) { +- /* Selected bits all zeros; or all mask bits zero */ +- env->cc_op = 0; +- } else if (s390_vec_equal(&tmp, v2)) { +- /* Selected bits all ones */ +- env->cc_op = 3; +- } else { +- /* Selected bits a mix of zeros and ones */ +- env->cc_op = 1; +- } +-} +diff --git a/target/s390x/vec_string_helper.c b/target/s390x/vec_string_helper.c +deleted file mode 100644 +index c516c0ceeb..0000000000 +--- a/target/s390x/vec_string_helper.c ++++ /dev/null +@@ -1,473 +0,0 @@ +-/* +- * QEMU TCG support -- s390x vector string instruction support +- * +- * Copyright (C) 2019 Red Hat Inc +- * +- * Authors: +- * David Hildenbrand +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- */ +-#include "qemu/osdep.h" +-#include "qemu-common.h" +-#include "cpu.h" +-#include "internal.h" +-#include "vec.h" +-#include "tcg/tcg.h" +-#include "tcg/tcg-gvec-desc.h" +-#include "exec/helper-proto.h" +- +-/* +- * Returns a bit set in the MSB of each element that is zero, +- * as defined by the mask. +- */ +-static inline uint64_t zero_search(uint64_t a, uint64_t mask) +-{ +- return ~(((a & mask) + mask) | a | mask); +-} +- +-/* +- * Returns a bit set in the MSB of each element that is not zero, +- * as defined by the mask. +- */ +-static inline uint64_t nonzero_search(uint64_t a, uint64_t mask) +-{ +- return (((a & mask) + mask) | a) & ~mask; +-} +- +-/* +- * Returns the byte offset for the first match, or 16 for no match. +- */ +-static inline int match_index(uint64_t c0, uint64_t c1) +-{ +- return (c0 ? clz64(c0) : clz64(c1) + 64) >> 3; +-} +- +-/* +- * Returns the number of bits composing one element. +- */ +-static uint8_t get_element_bits(uint8_t es) +-{ +- return (1 << es) * BITS_PER_BYTE; +-} +- +-/* +- * Returns the bitmask for a single element. +- */ +-static uint64_t get_single_element_mask(uint8_t es) +-{ +- return -1ull >> (64 - get_element_bits(es)); +-} +- +-/* +- * Returns the bitmask for a single element (excluding the MSB). +- */ +-static uint64_t get_single_element_lsbs_mask(uint8_t es) +-{ +- return -1ull >> (65 - get_element_bits(es)); +-} +- +-/* +- * Returns the bitmasks for multiple elements (excluding the MSBs). +- */ +-static uint64_t get_element_lsbs_mask(uint8_t es) +-{ +- return dup_const(es, get_single_element_lsbs_mask(es)); +-} +- +-static int vfae(void *v1, const void *v2, const void *v3, bool in, +- bool rt, bool zs, uint8_t es) +-{ +- const uint64_t mask = get_element_lsbs_mask(es); +- const int bits = get_element_bits(es); +- uint64_t a0, a1, b0, b1, e0, e1, t0, t1, z0, z1; +- uint64_t first_zero = 16; +- uint64_t first_equal; +- int i; +- +- a0 = s390_vec_read_element64(v2, 0); +- a1 = s390_vec_read_element64(v2, 1); +- b0 = s390_vec_read_element64(v3, 0); +- b1 = s390_vec_read_element64(v3, 1); +- e0 = 0; +- e1 = 0; +- /* compare against equality with every other element */ +- for (i = 0; i < 64; i += bits) { +- t0 = rol64(b0, i); +- t1 = rol64(b1, i); +- e0 |= zero_search(a0 ^ t0, mask); +- e0 |= zero_search(a0 ^ t1, mask); +- e1 |= zero_search(a1 ^ t0, mask); +- e1 |= zero_search(a1 ^ t1, mask); +- } +- /* invert the result if requested - invert only the MSBs */ +- if (in) { +- e0 = ~e0 & ~mask; +- e1 = ~e1 & ~mask; +- } +- first_equal = match_index(e0, e1); +- +- if (zs) { +- z0 = zero_search(a0, mask); +- z1 = zero_search(a1, mask); +- first_zero = match_index(z0, z1); +- } +- +- if (rt) { +- e0 = (e0 >> (bits - 1)) * get_single_element_mask(es); +- e1 = (e1 >> (bits - 1)) * get_single_element_mask(es); +- s390_vec_write_element64(v1, 0, e0); +- s390_vec_write_element64(v1, 1, e1); +- } else { +- s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); +- s390_vec_write_element64(v1, 1, 0); +- } +- +- if (first_zero == 16 && first_equal == 16) { +- return 3; /* no match */ +- } else if (first_zero == 16) { +- return 1; /* matching elements, no match for zero */ +- } else if (first_equal < first_zero) { +- return 2; /* matching elements before match for zero */ +- } +- return 0; /* match for zero */ +-} +- +-#define DEF_VFAE_HELPER(BITS) \ +-void HELPER(gvec_vfae##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool rt = extract32(simd_data(desc), 2, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ +-} +-DEF_VFAE_HELPER(8) +-DEF_VFAE_HELPER(16) +-DEF_VFAE_HELPER(32) +- +-#define DEF_VFAE_CC_HELPER(BITS) \ +-void HELPER(gvec_vfae_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool rt = extract32(simd_data(desc), 2, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- env->cc_op = vfae(v1, v2, v3, in, rt, zs, MO_##BITS); \ +-} +-DEF_VFAE_CC_HELPER(8) +-DEF_VFAE_CC_HELPER(16) +-DEF_VFAE_CC_HELPER(32) +- +-static int vfee(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) +-{ +- const uint64_t mask = get_element_lsbs_mask(es); +- uint64_t a0, a1, b0, b1, e0, e1, z0, z1; +- uint64_t first_zero = 16; +- uint64_t first_equal; +- +- a0 = s390_vec_read_element64(v2, 0); +- a1 = s390_vec_read_element64(v2, 1); +- b0 = s390_vec_read_element64(v3, 0); +- b1 = s390_vec_read_element64(v3, 1); +- e0 = zero_search(a0 ^ b0, mask); +- e1 = zero_search(a1 ^ b1, mask); +- first_equal = match_index(e0, e1); +- +- if (zs) { +- z0 = zero_search(a0, mask); +- z1 = zero_search(a1, mask); +- first_zero = match_index(z0, z1); +- } +- +- s390_vec_write_element64(v1, 0, MIN(first_equal, first_zero)); +- s390_vec_write_element64(v1, 1, 0); +- if (first_zero == 16 && first_equal == 16) { +- return 3; /* no match */ +- } else if (first_zero == 16) { +- return 1; /* matching elements, no match for zero */ +- } else if (first_equal < first_zero) { +- return 2; /* matching elements before match for zero */ +- } +- return 0; /* match for zero */ +-} +- +-#define DEF_VFEE_HELPER(BITS) \ +-void HELPER(gvec_vfee##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- vfee(v1, v2, v3, zs, MO_##BITS); \ +-} +-DEF_VFEE_HELPER(8) +-DEF_VFEE_HELPER(16) +-DEF_VFEE_HELPER(32) +- +-#define DEF_VFEE_CC_HELPER(BITS) \ +-void HELPER(gvec_vfee_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- env->cc_op = vfee(v1, v2, v3, zs, MO_##BITS); \ +-} +-DEF_VFEE_CC_HELPER(8) +-DEF_VFEE_CC_HELPER(16) +-DEF_VFEE_CC_HELPER(32) +- +-static int vfene(void *v1, const void *v2, const void *v3, bool zs, uint8_t es) +-{ +- const uint64_t mask = get_element_lsbs_mask(es); +- uint64_t a0, a1, b0, b1, e0, e1, z0, z1; +- uint64_t first_zero = 16; +- uint64_t first_inequal; +- bool smaller = false; +- +- a0 = s390_vec_read_element64(v2, 0); +- a1 = s390_vec_read_element64(v2, 1); +- b0 = s390_vec_read_element64(v3, 0); +- b1 = s390_vec_read_element64(v3, 1); +- e0 = nonzero_search(a0 ^ b0, mask); +- e1 = nonzero_search(a1 ^ b1, mask); +- first_inequal = match_index(e0, e1); +- +- /* identify the smaller element */ +- if (first_inequal < 16) { +- uint8_t enr = first_inequal / (1 << es); +- uint32_t a = s390_vec_read_element(v2, enr, es); +- uint32_t b = s390_vec_read_element(v3, enr, es); +- +- smaller = a < b; +- } +- +- if (zs) { +- z0 = zero_search(a0, mask); +- z1 = zero_search(a1, mask); +- first_zero = match_index(z0, z1); +- } +- +- s390_vec_write_element64(v1, 0, MIN(first_inequal, first_zero)); +- s390_vec_write_element64(v1, 1, 0); +- if (first_zero == 16 && first_inequal == 16) { +- return 3; +- } else if (first_zero < first_inequal) { +- return 0; +- } +- return smaller ? 1 : 2; +-} +- +-#define DEF_VFENE_HELPER(BITS) \ +-void HELPER(gvec_vfene##BITS)(void *v1, const void *v2, const void *v3, \ +- uint32_t desc) \ +-{ \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- vfene(v1, v2, v3, zs, MO_##BITS); \ +-} +-DEF_VFENE_HELPER(8) +-DEF_VFENE_HELPER(16) +-DEF_VFENE_HELPER(32) +- +-#define DEF_VFENE_CC_HELPER(BITS) \ +-void HELPER(gvec_vfene_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- CPUS390XState *env, uint32_t desc) \ +-{ \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- env->cc_op = vfene(v1, v2, v3, zs, MO_##BITS); \ +-} +-DEF_VFENE_CC_HELPER(8) +-DEF_VFENE_CC_HELPER(16) +-DEF_VFENE_CC_HELPER(32) +- +-static int vistr(void *v1, const void *v2, uint8_t es) +-{ +- const uint64_t mask = get_element_lsbs_mask(es); +- uint64_t a0 = s390_vec_read_element64(v2, 0); +- uint64_t a1 = s390_vec_read_element64(v2, 1); +- uint64_t z; +- int cc = 3; +- +- z = zero_search(a0, mask); +- if (z) { +- a0 &= ~(-1ull >> clz64(z)); +- a1 = 0; +- cc = 0; +- } else { +- z = zero_search(a1, mask); +- if (z) { +- a1 &= ~(-1ull >> clz64(z)); +- cc = 0; +- } +- } +- +- s390_vec_write_element64(v1, 0, a0); +- s390_vec_write_element64(v1, 1, a1); +- return cc; +-} +- +-#define DEF_VISTR_HELPER(BITS) \ +-void HELPER(gvec_vistr##BITS)(void *v1, const void *v2, uint32_t desc) \ +-{ \ +- vistr(v1, v2, MO_##BITS); \ +-} +-DEF_VISTR_HELPER(8) +-DEF_VISTR_HELPER(16) +-DEF_VISTR_HELPER(32) +- +-#define DEF_VISTR_CC_HELPER(BITS) \ +-void HELPER(gvec_vistr_cc##BITS)(void *v1, const void *v2, CPUS390XState *env, \ +- uint32_t desc) \ +-{ \ +- env->cc_op = vistr(v1, v2, MO_##BITS); \ +-} +-DEF_VISTR_CC_HELPER(8) +-DEF_VISTR_CC_HELPER(16) +-DEF_VISTR_CC_HELPER(32) +- +-static bool element_compare(uint32_t data, uint32_t l, uint8_t c) +-{ +- const bool equal = extract32(c, 7, 1); +- const bool lower = extract32(c, 6, 1); +- const bool higher = extract32(c, 5, 1); +- +- if (data < l) { +- return lower; +- } else if (data > l) { +- return higher; +- } +- return equal; +-} +- +-static int vstrc(void *v1, const void *v2, const void *v3, const void *v4, +- bool in, bool rt, bool zs, uint8_t es) +-{ +- const uint64_t mask = get_element_lsbs_mask(es); +- uint64_t a0 = s390_vec_read_element64(v2, 0); +- uint64_t a1 = s390_vec_read_element64(v2, 1); +- int first_zero = 16, first_match = 16; +- S390Vector rt_result = {}; +- uint64_t z0, z1; +- int i, j; +- +- if (zs) { +- z0 = zero_search(a0, mask); +- z1 = zero_search(a1, mask); +- first_zero = match_index(z0, z1); +- } +- +- for (i = 0; i < 16 / (1 << es); i++) { +- const uint32_t data = s390_vec_read_element(v2, i, es); +- const int cur_byte = i * (1 << es); +- bool any_match = false; +- +- /* if we don't need a bit vector, we can stop early */ +- if (cur_byte == first_zero && !rt) { +- break; +- } +- +- for (j = 0; j < 16 / (1 << es); j += 2) { +- const uint32_t l1 = s390_vec_read_element(v3, j, es); +- const uint32_t l2 = s390_vec_read_element(v3, j + 1, es); +- /* we are only interested in the highest byte of each element */ +- const uint8_t c1 = s390_vec_read_element8(v4, j * (1 << es)); +- const uint8_t c2 = s390_vec_read_element8(v4, (j + 1) * (1 << es)); +- +- if (element_compare(data, l1, c1) && +- element_compare(data, l2, c2)) { +- any_match = true; +- break; +- } +- } +- /* invert the result if requested */ +- any_match = in ^ any_match; +- +- if (any_match) { +- /* indicate bit vector if requested */ +- if (rt) { +- const uint64_t val = -1ull; +- +- first_match = MIN(cur_byte, first_match); +- s390_vec_write_element(&rt_result, i, es, val); +- } else { +- /* stop on the first match */ +- first_match = cur_byte; +- break; +- } +- } +- } +- +- if (rt) { +- *(S390Vector *)v1 = rt_result; +- } else { +- s390_vec_write_element64(v1, 0, MIN(first_match, first_zero)); +- s390_vec_write_element64(v1, 1, 0); +- } +- +- if (first_zero == 16 && first_match == 16) { +- return 3; /* no match */ +- } else if (first_zero == 16) { +- return 1; /* matching elements, no match for zero */ +- } else if (first_match < first_zero) { +- return 2; /* matching elements before match for zero */ +- } +- return 0; /* match for zero */ +-} +- +-#define DEF_VSTRC_HELPER(BITS) \ +-void HELPER(gvec_vstrc##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ +-} +-DEF_VSTRC_HELPER(8) +-DEF_VSTRC_HELPER(16) +-DEF_VSTRC_HELPER(32) +- +-#define DEF_VSTRC_RT_HELPER(BITS) \ +-void HELPER(gvec_vstrc_rt##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ +-} +-DEF_VSTRC_RT_HELPER(8) +-DEF_VSTRC_RT_HELPER(16) +-DEF_VSTRC_RT_HELPER(32) +- +-#define DEF_VSTRC_CC_HELPER(BITS) \ +-void HELPER(gvec_vstrc_cc##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, CPUS390XState *env, \ +- uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- env->cc_op = vstrc(v1, v2, v3, v4, in, 0, zs, MO_##BITS); \ +-} +-DEF_VSTRC_CC_HELPER(8) +-DEF_VSTRC_CC_HELPER(16) +-DEF_VSTRC_CC_HELPER(32) +- +-#define DEF_VSTRC_CC_RT_HELPER(BITS) \ +-void HELPER(gvec_vstrc_cc_rt##BITS)(void *v1, const void *v2, const void *v3, \ +- const void *v4, CPUS390XState *env, \ +- uint32_t desc) \ +-{ \ +- const bool in = extract32(simd_data(desc), 3, 1); \ +- const bool zs = extract32(simd_data(desc), 1, 1); \ +- \ +- env->cc_op = vstrc(v1, v2, v3, v4, in, 1, zs, MO_##BITS); \ +-} +-DEF_VSTRC_CC_RT_HELPER(8) +-DEF_VSTRC_CC_RT_HELPER(16) +-DEF_VSTRC_CC_RT_HELPER(32) +diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out +index 3a3e121d57..8fbdac2b39 100644 +--- a/tests/qemu-iotests/122.out ++++ b/tests/qemu-iotests/122.out +@@ -67,12 +67,12 @@ read 65536/65536 bytes at offset 4194304 + 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 65536/65536 bytes at offset 8388608 + 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true}, +-{ "start": 65536, "length": 4128768, "depth": 0, "zero": true, "data": false}, +-{ "start": 4194304, "length": 65536, "depth": 0, "zero": false, "data": true}, +-{ "start": 4259840, "length": 4128768, "depth": 0, "zero": true, "data": false}, +-{ "start": 8388608, "length": 65536, "depth": 0, "zero": false, "data": true}, +-{ "start": 8454144, "length": 4128768, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 65536, "length": 4128768, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 4194304, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 4259840, "length": 4128768, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 8388608, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 8454144, "length": 4128768, "depth": 0, "present": false, "zero": true, "data": false}] + read 65536/65536 bytes at offset 0 + 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 65536/65536 bytes at offset 4194304 +@@ -94,12 +94,12 @@ wrote 1024/1024 bytes at offset 1046528 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 1024/1024 bytes at offset 0 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": true}, +-{ "start": 65536, "length": 65536, "depth": 0, "zero": true, "data": false}, +-{ "start": 131072, "length": 196608, "depth": 0, "zero": false, "data": true}, +-{ "start": 327680, "length": 655360, "depth": 0, "zero": true, "data": false}, +-{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true}, +-{ "start": 1048576, "length": 1046528, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 65536, "length": 65536, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 131072, "length": 196608, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 327680, "length": 655360, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 983040, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 1048576, "length": 1046528, "depth": 0, "present": false, "zero": true, "data": false}] + read 16384/16384 bytes at offset 0 + 16 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 16384/16384 bytes at offset 16384 +@@ -130,14 +130,14 @@ read 3145728/3145728 bytes at offset 0 + 3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 63963136/63963136 bytes at offset 3145728 + 61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + convert -c -S 0: + read 3145728/3145728 bytes at offset 0 + 3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 63963136/63963136 bytes at offset 3145728 + 61 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true}] + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 + wrote 33554432/33554432 bytes at offset 0 + 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +@@ -152,7 +152,7 @@ read 30408704/30408704 bytes at offset 3145728 + 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 33554432/33554432 bytes at offset 33554432 + 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + convert -c -S 0 with source backing file: + read 3145728/3145728 bytes at offset 0 +@@ -161,7 +161,7 @@ read 30408704/30408704 bytes at offset 3145728 + 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 33554432/33554432 bytes at offset 33554432 + 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true}] + + convert -S 0 -B ... + read 3145728/3145728 bytes at offset 0 +@@ -170,7 +170,7 @@ read 30408704/30408704 bytes at offset 3145728 + 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 33554432/33554432 bytes at offset 33554432 + 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + convert -c -S 0 -B ... + read 3145728/3145728 bytes at offset 0 +@@ -179,7 +179,7 @@ read 30408704/30408704 bytes at offset 3145728 + 29 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 33554432/33554432 bytes at offset 33554432 + 32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true}] + + === Non-zero -S === + +@@ -194,32 +194,32 @@ wrote 1024/1024 bytes at offset 17408 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + + convert -S 4k +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 8192, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 12288, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 16384, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20480, "length": 67088384, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 4096, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 8192, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 12288, "length": 4096, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 16384, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20480, "length": 67088384, "depth": 0, "present": false, "zero": true, "data": false}] + + convert -c -S 4k +-[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false}, +-{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +-{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 1024, "length": 7168, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 8192, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 9216, "length": 8192, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 17408, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 18432, "length": 67090432, "depth": 0, "present": false, "zero": true, "data": false}] + + convert -S 8k +-[{ "start": 0, "length": 24576, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 24576, "length": 67084288, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 24576, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 24576, "length": 67084288, "depth": 0, "present": false, "zero": true, "data": false}] + + convert -c -S 8k +-[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 1024, "length": 7168, "depth": 0, "zero": true, "data": false}, +-{ "start": 8192, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 9216, "length": 8192, "depth": 0, "zero": true, "data": false}, +-{ "start": 17408, "length": 1024, "depth": 0, "zero": false, "data": true}, +-{ "start": 18432, "length": 67090432, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 1024, "length": 7168, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 8192, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 9216, "length": 8192, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 17408, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true}, ++{ "start": 18432, "length": 67090432, "depth": 0, "present": false, "zero": true, "data": false}] + + === -n to a non-zero image === + +@@ -233,18 +233,18 @@ Images are identical. + + Formatting 'TEST_DIR/t.IMGFMT.orig', fmt=IMGFMT size=67108864 + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": false, "zero": true, "data": false}] + + === -n to an empty image with a backing file === + + Formatting 'TEST_DIR/t.IMGFMT.orig', fmt=IMGFMT size=67108864 + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": false, "data": true, "offset": 327680}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] + + === -n -B to an image without a backing file === + +diff --git a/tests/qemu-iotests/146.out b/tests/qemu-iotests/146.out +index c67ba4ba7c..dfd6c77140 100644 +--- a/tests/qemu-iotests/146.out ++++ b/tests/qemu-iotests/146.out +@@ -2,414 +2,414 @@ QA output created by 146 + + === Testing VPC Autodetect === + +-[{ "start": 0, "length": 136363130880, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136363130880, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing VPC with current_size force === + +-[{ "start": 0, "length": 136365211648, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136365211648, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing VPC with chs force === + +-[{ "start": 0, "length": 136363130880, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136363130880, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing Hyper-V Autodetect === + +-[{ "start": 0, "length": 136365211648, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136365211648, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing Hyper-V with current_size force === + +-[{ "start": 0, "length": 136365211648, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136365211648, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing Hyper-V with chs force === + +-[{ "start": 0, "length": 136363130880, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 136363130880, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing d2v Autodetect === + +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4194304, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 6291456, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 8388608, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 10485760, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 12582912, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 14680064, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 16777216, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 29360128, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 35651584, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 37748736, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 39845888, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 41943040, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 44040192, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 46137344, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 48234496, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 50331648, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 52428800, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 54525952, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 56623104, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 58720256, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 60817408, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 62914560, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 65011712, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 67108864, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69206016, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 71303168, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 73400320, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 75497472, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 77594624, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 79691776, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 81788928, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 83886080, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 85983232, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 88080384, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 90177536, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 92274688, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 94371840, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 96468992, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 98566144, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 100663296, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 102760448, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 104857600, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 106954752, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 109051904, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 111149056, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 113246208, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 115343360, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 117440512, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 119537664, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 121634816, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 123731968, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 125829120, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 127926272, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 130023424, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 132120576, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 134217728, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 136314880, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 138412032, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 140509184, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 142606336, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 144703488, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 146800640, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 148897792, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 150994944, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 153092096, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 155189248, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 157286400, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 159383552, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 161480704, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 163577856, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 165675008, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 167772160, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 169869312, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 171966464, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 174063616, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 176160768, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 178257920, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 180355072, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 182452224, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 184549376, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 186646528, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 188743680, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 190840832, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 192937984, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 195035136, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 197132288, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 199229440, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 201326592, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 203423744, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 205520896, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 207618048, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 209715200, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 211812352, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 213909504, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 216006656, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 218103808, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 220200960, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 222298112, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 224395264, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 226492416, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 228589568, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 230686720, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 232783872, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 234881024, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 236978176, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 239075328, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 241172480, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 243269632, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 245366784, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 247463936, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 249561088, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 251658240, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 253755392, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 255852544, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 257949696, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 260046848, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 262144000, "length": 1310720, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 29360128, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 35651584, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 37748736, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 39845888, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 41943040, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 44040192, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 46137344, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 48234496, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 50331648, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 52428800, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 54525952, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 56623104, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 58720256, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 60817408, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 62914560, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 65011712, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 67108864, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69206016, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 71303168, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 73400320, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 75497472, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 77594624, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 79691776, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 81788928, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 83886080, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 85983232, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 88080384, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 90177536, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 92274688, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 94371840, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 96468992, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 98566144, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 100663296, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 102760448, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 104857600, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 106954752, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 109051904, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 111149056, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 113246208, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 115343360, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 117440512, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 119537664, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 121634816, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 123731968, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 125829120, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 127926272, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 130023424, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 132120576, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 134217728, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 136314880, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 138412032, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 140509184, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 142606336, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 144703488, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 146800640, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 148897792, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 150994944, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 153092096, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 155189248, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 157286400, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 159383552, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 161480704, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 163577856, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 165675008, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 167772160, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 169869312, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 171966464, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 174063616, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 176160768, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 178257920, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 180355072, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 182452224, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 184549376, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 186646528, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 188743680, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 190840832, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 192937984, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 195035136, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 197132288, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 199229440, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 201326592, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 203423744, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 205520896, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 207618048, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 209715200, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 211812352, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 213909504, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 216006656, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 218103808, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 220200960, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 222298112, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 224395264, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 226492416, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 228589568, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 230686720, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 232783872, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 234881024, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 236978176, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 239075328, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 241172480, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 243269632, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 245366784, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 247463936, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 249561088, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 251658240, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 253755392, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 255852544, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 257949696, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 260046848, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 262144000, "length": 1310720, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + === Testing d2v with current_size force === + +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4194304, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 6291456, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 8388608, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 10485760, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 12582912, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 14680064, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 16777216, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 29360128, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 35651584, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 37748736, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 39845888, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 41943040, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 44040192, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 46137344, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 48234496, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 50331648, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 52428800, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 54525952, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 56623104, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 58720256, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 60817408, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 62914560, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 65011712, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 67108864, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69206016, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 71303168, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 73400320, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 75497472, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 77594624, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 79691776, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 81788928, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 83886080, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 85983232, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 88080384, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 90177536, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 92274688, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 94371840, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 96468992, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 98566144, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 100663296, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 102760448, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 104857600, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 106954752, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 109051904, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 111149056, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 113246208, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 115343360, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 117440512, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 119537664, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 121634816, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 123731968, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 125829120, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 127926272, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 130023424, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 132120576, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 134217728, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 136314880, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 138412032, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 140509184, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 142606336, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 144703488, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 146800640, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 148897792, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 150994944, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 153092096, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 155189248, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 157286400, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 159383552, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 161480704, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 163577856, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 165675008, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 167772160, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 169869312, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 171966464, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 174063616, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 176160768, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 178257920, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 180355072, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 182452224, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 184549376, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 186646528, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 188743680, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 190840832, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 192937984, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 195035136, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 197132288, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 199229440, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 201326592, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 203423744, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 205520896, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 207618048, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 209715200, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 211812352, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 213909504, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 216006656, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 218103808, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 220200960, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 222298112, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 224395264, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 226492416, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 228589568, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 230686720, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 232783872, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 234881024, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 236978176, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 239075328, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 241172480, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 243269632, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 245366784, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 247463936, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 249561088, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 251658240, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 253755392, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 255852544, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 257949696, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 260046848, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 262144000, "length": 1310720, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 29360128, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 35651584, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 37748736, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 39845888, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 41943040, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 44040192, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 46137344, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 48234496, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 50331648, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 52428800, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 54525952, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 56623104, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 58720256, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 60817408, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 62914560, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 65011712, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 67108864, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69206016, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 71303168, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 73400320, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 75497472, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 77594624, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 79691776, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 81788928, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 83886080, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 85983232, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 88080384, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 90177536, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 92274688, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 94371840, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 96468992, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 98566144, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 100663296, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 102760448, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 104857600, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 106954752, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 109051904, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 111149056, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 113246208, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 115343360, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 117440512, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 119537664, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 121634816, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 123731968, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 125829120, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 127926272, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 130023424, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 132120576, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 134217728, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 136314880, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 138412032, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 140509184, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 142606336, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 144703488, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 146800640, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 148897792, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 150994944, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 153092096, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 155189248, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 157286400, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 159383552, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 161480704, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 163577856, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 165675008, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 167772160, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 169869312, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 171966464, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 174063616, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 176160768, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 178257920, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 180355072, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 182452224, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 184549376, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 186646528, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 188743680, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 190840832, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 192937984, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 195035136, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 197132288, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 199229440, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 201326592, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 203423744, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 205520896, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 207618048, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 209715200, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 211812352, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 213909504, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 216006656, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 218103808, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 220200960, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 222298112, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 224395264, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 226492416, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 228589568, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 230686720, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 232783872, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 234881024, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 236978176, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 239075328, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 241172480, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 243269632, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 245366784, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 247463936, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 249561088, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 251658240, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 253755392, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 255852544, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 257949696, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 260046848, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 262144000, "length": 1310720, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + === Testing d2v with chs force === + +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4194304, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 6291456, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 8388608, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 10485760, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 12582912, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 14680064, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 16777216, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 29360128, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 35651584, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 37748736, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 39845888, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 41943040, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 44040192, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 46137344, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 48234496, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 50331648, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 52428800, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 54525952, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 56623104, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 58720256, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 60817408, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 62914560, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 65011712, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 67108864, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69206016, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 71303168, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 73400320, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 75497472, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 77594624, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 79691776, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 81788928, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 83886080, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 85983232, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 88080384, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 90177536, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 92274688, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 94371840, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 96468992, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 98566144, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 100663296, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 102760448, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 104857600, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 106954752, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 109051904, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 111149056, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 113246208, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 115343360, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 117440512, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 119537664, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 121634816, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 123731968, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 125829120, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 127926272, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 130023424, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 132120576, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 134217728, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 136314880, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 138412032, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 140509184, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 142606336, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 144703488, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 146800640, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 148897792, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 150994944, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 153092096, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 155189248, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 157286400, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 159383552, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 161480704, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 163577856, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 165675008, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 167772160, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 169869312, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 171966464, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 174063616, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 176160768, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 178257920, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 180355072, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 182452224, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 184549376, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 186646528, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 188743680, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 190840832, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 192937984, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 195035136, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 197132288, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 199229440, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 201326592, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 203423744, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 205520896, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 207618048, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 209715200, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 211812352, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 213909504, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 216006656, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 218103808, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 220200960, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 222298112, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 224395264, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 226492416, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 228589568, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 230686720, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 232783872, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 234881024, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 236978176, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 239075328, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 241172480, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 243269632, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 245366784, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 247463936, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 249561088, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 251658240, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 253755392, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 255852544, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 257949696, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 260046848, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 262144000, "length": 1310720, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 29360128, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 35651584, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 37748736, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 39845888, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 41943040, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 44040192, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 46137344, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 48234496, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 50331648, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 52428800, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 54525952, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 56623104, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 58720256, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 60817408, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 62914560, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 65011712, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 67108864, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69206016, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 71303168, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 73400320, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 75497472, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 77594624, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 79691776, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 81788928, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 83886080, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 85983232, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 88080384, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 90177536, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 92274688, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 94371840, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 96468992, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 98566144, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 100663296, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 102760448, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 104857600, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 106954752, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 109051904, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 111149056, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 113246208, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 115343360, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 117440512, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 119537664, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 121634816, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 123731968, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 125829120, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 127926272, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 130023424, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 132120576, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 134217728, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 136314880, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 138412032, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 140509184, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 142606336, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 144703488, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 146800640, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 148897792, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 150994944, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 153092096, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 155189248, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 157286400, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 159383552, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 161480704, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 163577856, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 165675008, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 167772160, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 169869312, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 171966464, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 174063616, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 176160768, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 178257920, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 180355072, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 182452224, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 184549376, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 186646528, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 188743680, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 190840832, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 192937984, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 195035136, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 197132288, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 199229440, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 201326592, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 203423744, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 205520896, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 207618048, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 209715200, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 211812352, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 213909504, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 216006656, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 218103808, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 220200960, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 222298112, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 224395264, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 226492416, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 228589568, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 230686720, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 232783872, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 234881024, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 236978176, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 239075328, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 241172480, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 243269632, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 245366784, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 247463936, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 249561088, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 251658240, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 253755392, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 255852544, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 257949696, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 260046848, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 262144000, "length": 1310720, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + + === Testing Image create, default === + +@@ -417,15 +417,15 @@ Formatting 'TEST_DIR/IMGFMT-create-test.IMGFMT', fmt=IMGFMT size=4294967296 + + === Read created image, default opts ==== + +-[{ "start": 0, "length": 4295467008, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4295467008, "depth": 0, "present": true, "zero": true, "data": false}] + + === Read created image, force_size_calc=chs ==== + +-[{ "start": 0, "length": 4295467008, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4295467008, "depth": 0, "present": true, "zero": true, "data": false}] + + === Read created image, force_size_calc=current_size ==== + +-[{ "start": 0, "length": 4295467008, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4295467008, "depth": 0, "present": true, "zero": true, "data": false}] + + === Testing Image create, force_size === + +@@ -433,13 +433,13 @@ Formatting 'TEST_DIR/IMGFMT-create-test.IMGFMT', fmt=IMGFMT size=4294967296 + + === Read created image, default opts ==== + +-[{ "start": 0, "length": 4294967296, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4294967296, "depth": 0, "present": true, "zero": true, "data": false}] + + === Read created image, force_size_calc=chs ==== + +-[{ "start": 0, "length": 4294967296, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4294967296, "depth": 0, "present": true, "zero": true, "data": false}] + + === Read created image, force_size_calc=current_size ==== + +-[{ "start": 0, "length": 4294967296, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 4294967296, "depth": 0, "present": true, "zero": true, "data": false}] + *** done +diff --git a/tests/qemu-iotests/154.out b/tests/qemu-iotests/154.out +index 4863e24838..1fa7ffc475 100644 +--- a/tests/qemu-iotests/154.out ++++ b/tests/qemu-iotests/154.out +@@ -11,14 +11,14 @@ wrote 2048/2048 bytes at offset 17408 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 2048/2048 bytes at offset 27648 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 4096, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 8192, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 12288, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 16384, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 20480, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 24576, "length": 8192, "depth": 0, "zero": true, "data": false}, +-{ "start": 32768, "length": 134184960, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4096, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 8192, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 12288, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 16384, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 20480, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 24576, "length": 8192, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 32768, "length": 134184960, "depth": 1, "present": false, "zero": true, "data": false}] + + == backing file contains non-zero data before write_zeroes == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -41,11 +41,11 @@ read 1024/1024 bytes at offset 65536 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 2048/2048 bytes at offset 67584 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 32768, "depth": 1, "zero": true, "data": false}, +-{ "start": 32768, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 36864, "length": 28672, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69632, "length": 134148096, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 32768, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 32768, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 36864, "length": 28672, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69632, "length": 134148096, "depth": 1, "present": false, "zero": true, "data": false}] + + == backing file contains non-zero data after write_zeroes == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -68,11 +68,11 @@ read 1024/1024 bytes at offset 44032 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 3072/3072 bytes at offset 40960 + 3 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 32768, "depth": 1, "zero": true, "data": false}, +-{ "start": 32768, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 36864, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 40960, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 45056, "length": 134172672, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 32768, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 32768, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 36864, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 40960, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 45056, "length": 134172672, "depth": 1, "present": false, "zero": true, "data": false}] + + == write_zeroes covers non-zero data == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -101,15 +101,15 @@ wrote 2048/2048 bytes at offset 29696 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 4096/4096 bytes at offset 28672 + 4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 4096, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 8192, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 12288, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 16384, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 20480, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 24576, "length": 4096, "depth": 1, "zero": true, "data": false}, +-{ "start": 28672, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 32768, "length": 134184960, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 4096, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8192, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 12288, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 16384, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 20480, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 24576, "length": 4096, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 28672, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 32768, "length": 134184960, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning two clusters, non-zero before request == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -142,16 +142,16 @@ read 1024/1024 bytes at offset 67584 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 5120/5120 bytes at offset 68608 + 5 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 32768, "depth": 1, "zero": true, "data": false}, +-{ "start": 32768, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 36864, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 40960, "length": 8192, "depth": 1, "zero": true, "data": false}, +-{ "start": 49152, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 53248, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 57344, "length": 8192, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69632, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 73728, "length": 134144000, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 32768, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 32768, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 36864, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 40960, "length": 8192, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 49152, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 53248, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 57344, "length": 8192, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69632, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 73728, "length": 134144000, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning two clusters, non-zero after request == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -184,16 +184,16 @@ read 7168/7168 bytes at offset 65536 + 7 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 1024/1024 bytes at offset 72704 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 32768, "depth": 1, "zero": true, "data": false}, +-{ "start": 32768, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 36864, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 40960, "length": 8192, "depth": 1, "zero": true, "data": false}, +-{ "start": 49152, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 53248, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 57344, "length": 8192, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 69632, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 73728, "length": 134144000, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 32768, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 32768, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 36864, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 40960, "length": 8192, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 49152, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 53248, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 57344, "length": 8192, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 69632, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 73728, "length": 134144000, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning two clusters, partially overwriting backing file == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -212,8 +212,8 @@ read 1024/1024 bytes at offset 5120 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 2048/2048 bytes at offset 6144 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 8192, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 8192, "length": 134209536, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 8192, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 8192, "length": 134209536, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning multiple clusters, non-zero in first cluster == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -226,10 +226,10 @@ read 2048/2048 bytes at offset 65536 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 10240/10240 bytes at offset 67584 + 10 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69632, "length": 8192, "depth": 0, "zero": true, "data": false}, +-{ "start": 77824, "length": 134139904, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69632, "length": 8192, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 77824, "length": 134139904, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning multiple clusters, non-zero in intermediate cluster == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -240,9 +240,9 @@ wrote 7168/7168 bytes at offset 67584 + 7 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 12288/12288 bytes at offset 65536 + 12 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 12288, "depth": 0, "zero": true, "data": false}, +-{ "start": 77824, "length": 134139904, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 12288, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 77824, "length": 134139904, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning multiple clusters, non-zero in final cluster == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -255,10 +255,10 @@ read 10240/10240 bytes at offset 65536 + 10 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 2048/2048 bytes at offset 75776 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 8192, "depth": 0, "zero": true, "data": false}, +-{ "start": 73728, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 77824, "length": 134139904, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 8192, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 73728, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 77824, "length": 134139904, "depth": 1, "present": false, "zero": true, "data": false}] + + == spanning multiple clusters, partially overwriting backing file == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728 +@@ -277,84 +277,88 @@ read 2048/2048 bytes at offset 74752 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 1024/1024 bytes at offset 76800 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 1, "zero": true, "data": false}, +-{ "start": 65536, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 69632, "length": 4096, "depth": 0, "zero": true, "data": false}, +-{ "start": 73728, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 77824, "length": 134139904, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 65536, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 69632, "length": 4096, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 73728, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 77824, "length": 134139904, "depth": 1, "present": false, "zero": true, "data": false}] + + == unaligned image tail cluster, no allocation needed == + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 + wrote 512/512 bytes at offset 134217728 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134219776, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 + wrote 512/512 bytes at offset 134219264 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134219776, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 + wrote 1024/1024 bytes at offset 134218240 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134219776, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 + wrote 2048/2048 bytes at offset 134217728 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134219776, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134218752 + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 512/512 bytes at offset 134217728 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 512/512 bytes at offset 134219264 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 1024/1024 bytes at offset 134218240 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 2048/2048 bytes at offset 134217728 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + wrote 512/512 bytes at offset 134217728 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 512/512 bytes at offset 134217728 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 512/512 bytes at offset 134219264 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 1024/1024 bytes at offset 134218240 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 2048/2048 bytes at offset 134217728 + 2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 2048/2048 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": true, "data": false}] + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134218752 + wrote 1024/1024 bytes at offset 134217728 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +@@ -365,15 +369,15 @@ read 512/512 bytes at offset 134217728 + read 512/512 bytes at offset 134218240 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 1024/1024 bytes allocated at offset 128 MiB +-[{ "start": 0, "length": 134217728, "depth": 0, "zero": true, "data": false}, +-{ "start": 134217728, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + wrote 1024/1024 bytes at offset 134217728 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + 1024/1024 bytes allocated at offset 128 MiB + read 1024/1024 bytes at offset 134217728 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 134217728, "depth": 0, "zero": true, "data": false}, +-{ "start": 134217728, "length": 1024, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 1024, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + + == unaligned image tail cluster, allocation required == + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134218752 +@@ -386,8 +390,8 @@ read 512/512 bytes at offset 134217728 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 1536/1536 bytes at offset 134218240 + 1.500 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134218752 + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134219776 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT + wrote 512/512 bytes at offset 134218240 +@@ -408,6 +412,6 @@ read 512/512 bytes at offset 134218240 + 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 1024/1024 bytes at offset 134218752 + 1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 134217728, "depth": 1, "zero": true, "data": false}, +-{ "start": 134217728, "length": 2048, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 134217728, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 134217728, "length": 2048, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + *** done +diff --git a/tests/qemu-iotests/179.out b/tests/qemu-iotests/179.out +index 1f7680002c..7cf22cd75f 100644 +--- a/tests/qemu-iotests/179.out ++++ b/tests/qemu-iotests/179.out +@@ -13,7 +13,11 @@ wrote 2097152/2097152 bytes at offset 6291456 + 2 MiB (0x200000) bytes not allocated at offset 4 MiB (0x400000) + 2 MiB (0x200000) bytes allocated at offset 6 MiB (0x600000) + 56 MiB (0x3800000) bytes not allocated at offset 8 MiB (0x800000) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 58720256, "depth": 0, "present": false, "zero": true, "data": false}] + wrote 2097150/2097150 bytes at offset 10485761 + 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 2097150/2097150 bytes at offset 14680065 +@@ -27,7 +31,15 @@ wrote 2097150/2097150 bytes at offset 14680065 + 2 MiB (0x200000) bytes not allocated at offset 12 MiB (0xc00000) + 2 MiB (0x200000) bytes allocated at offset 14 MiB (0xe00000) + 48 MiB (0x3000000) bytes not allocated at offset 16 MiB (0x1000000) +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 16777216, "length": 50331648, "depth": 0, "present": false, "zero": true, "data": false}] + wrote 14680064/14680064 bytes at offset 18874368 + 14 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 2097152/2097152 bytes at offset 20971520 +@@ -45,13 +57,21 @@ wrote 6291456/6291456 bytes at offset 25165824 + 2 MiB (0x200000) bytes not allocated at offset 16 MiB (0x1000000) + 14 MiB (0xe00000) bytes allocated at offset 18 MiB (0x1200000) + 32 MiB (0x2000000) bytes not allocated at offset 32 MiB (0x2000000) +-[{ "start": 0, "length": 18874368, "depth": 0, "zero": true, "data": false}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 6291456, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 33554432, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 6291456, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 33554432, "depth": 0, "present": false, "zero": true, "data": false}] + wrote 2097152/2097152 bytes at offset 27262976 + 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 2097152/2097152 bytes at offset 29360128 +@@ -67,15 +87,23 @@ wrote 2097152/2097152 bytes at offset 29360128 + 2 MiB (0x200000) bytes not allocated at offset 16 MiB (0x1000000) + 14 MiB (0xe00000) bytes allocated at offset 18 MiB (0x1200000) + 32 MiB (0x2000000) bytes not allocated at offset 32 MiB (0x2000000) +-[{ "start": 0, "length": 18874368, "depth": 0, "zero": true, "data": false}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 29360128, "length": 2097152, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 33554432, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 29360128, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 33554432, "depth": 0, "present": false, "zero": true, "data": false}] + wrote 8388608/8388608 bytes at offset 33554432 + 8 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 2097152/2097152 bytes at offset 35651584 +@@ -93,15 +121,24 @@ wrote 2097152/2097152 bytes at offset 37748736 + 2 MiB (0x200000) bytes not allocated at offset 16 MiB (0x1000000) + 22 MiB (0x1600000) bytes allocated at offset 18 MiB (0x1200000) + 24 MiB (0x1800000) bytes not allocated at offset 40 MiB (0x2800000) +-[{ "start": 0, "length": 18874368, "depth": 0, "zero": true, "data": false}, +-{ "start": 18874368, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 23068672, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 29360128, "length": 2097152, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 33554432, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 10485760, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 12582912, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 14680064, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 16777216, "length": 2097152, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 18874368, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 23068672, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 29360128, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 8388608, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 41943040, "length": 25165824, "depth": 0, "present": false, "zero": true, "data": false}] + wrote 8388608/8388608 bytes at offset 41943040 + 8 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + wrote 8388608/8388608 bytes at offset 50331648 +@@ -125,23 +162,31 @@ wrote 2097152/2097152 bytes at offset 62914560 + 4 MiB (0x400000) bytes not allocated at offset 54 MiB (0x3600000) + 4 MiB (0x400000) bytes allocated at offset 58 MiB (0x3a00000) + 2 MiB (0x200000) bytes not allocated at offset 62 MiB (0x3e00000) +-[{ "start": 0, "length": 18874368, "depth": 1, "zero": true, "data": false}, +-{ "start": 18874368, "length": 2097152, "depth": 1, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 20971520, "length": 2097152, "depth": 1, "zero": true, "data": false}, +-{ "start": 23068672, "length": 2097152, "depth": 1, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 25165824, "length": 2097152, "depth": 1, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 27262976, "length": 2097152, "depth": 1, "zero": true, "data": false}, +-{ "start": 29360128, "length": 2097152, "depth": 1, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 31457280, "length": 2097152, "depth": 1, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 33554432, "length": 10485760, "depth": 1, "zero": true, "data": false}, +-{ "start": 44040192, "length": 4194304, "depth": 0, "zero": true, "data": false}, +-{ "start": 48234496, "length": 2097152, "depth": 1, "zero": true, "data": false}, +-{ "start": 50331648, "length": 2097152, "depth": 1, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 52428800, "length": 4194304, "depth": 0, "zero": true, "data": false}, +-{ "start": 56623104, "length": 2097152, "depth": 1, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 58720256, "length": 2097152, "depth": 1, "zero": true, "data": false}, +-{ "start": 60817408, "length": 4194304, "depth": 0, "zero": true, "data": false}, +-{ "start": 65011712, "length": 2097152, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 2097152, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 6291456, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 8388608, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 10485760, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 12582912, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 14680064, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 16777216, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 18874368, "length": 2097152, "depth": 1, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 20971520, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 23068672, "length": 2097152, "depth": 1, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 25165824, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 27262976, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 29360128, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 31457280, "length": 2097152, "depth": 1, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 33554432, "length": 10485760, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 44040192, "length": 4194304, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 48234496, "length": 2097152, "depth": 1, "present": true, "zero": true, "data": false}, ++{ "start": 50331648, "length": 2097152, "depth": 1, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 52428800, "length": 4194304, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 56623104, "length": 2097152, "depth": 1, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 58720256, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 60817408, "length": 4194304, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 65011712, "length": 2097152, "depth": 1, "present": false, "zero": true, "data": false}] + No errors were found on the image. + No errors were found on the image. + +diff --git a/tests/qemu-iotests/209.out b/tests/qemu-iotests/209.out +index 214e27bfce..f27be3fa7b 100644 +--- a/tests/qemu-iotests/209.out ++++ b/tests/qemu-iotests/209.out +@@ -1,2 +1,2 @@ +-[{ "start": 0, "length": 524288, "depth": 0, "zero": false, "data": true, "offset": 0}, +-{ "start": 524288, "length": 524288, "depth": 0, "zero": true, "data": false, "offset": 524288}] ++[{ "start": 0, "length": 524288, "depth": 0, "present": true, "zero": false, "data": true, "offset": 0}, ++{ "start": 524288, "length": 524288, "depth": 0, "present": true, "zero": true, "data": false, "offset": 524288}] +diff --git a/tests/qemu-iotests/211.out b/tests/qemu-iotests/211.out +index 3bc092a8a8..c4425b5982 100644 +--- a/tests/qemu-iotests/211.out ++++ b/tests/qemu-iotests/211.out +@@ -17,7 +17,7 @@ file format: IMGFMT + virtual size: 128 MiB (134217728 bytes) + cluster_size: 1048576 + +-[{ "start": 0, "length": 134217728, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 134217728, "depth": 0, "present": true, "zero": true, "data": false}] + + === Successful image creation (explicit defaults) === + +@@ -36,7 +36,7 @@ file format: IMGFMT + virtual size: 64 MiB (67108864 bytes) + cluster_size: 1048576 + +-[{ "start": 0, "length": 67108864, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 67108864, "depth": 0, "present": true, "zero": true, "data": false}] + + === Successful image creation (with non-default options) === + +@@ -55,8 +55,8 @@ file format: IMGFMT + virtual size: 32 MiB (33554432 bytes) + cluster_size: 1048576 + +-[{ "start": 0, "length": 3072, "depth": 0, "zero": false, "data": true, "offset": 1024}, +-{ "start": 3072, "length": 33551360, "depth": 0, "zero": true, "data": true, "offset": 4096}] ++[{ "start": 0, "length": 3072, "depth": 0, "present": true, "zero": false, "data": true, "offset": 1024}, ++{ "start": 3072, "length": 33551360, "depth": 0, "present": true, "zero": true, "data": true, "offset": 4096}] + + === Invalid BlockdevRef === + +diff --git a/tests/qemu-iotests/221.out b/tests/qemu-iotests/221.out +index 93846c7dab..9cdd171a2d 100644 +--- a/tests/qemu-iotests/221.out ++++ b/tests/qemu-iotests/221.out +@@ -5,14 +5,14 @@ QA output created by 221 + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=65537 + discard 65537/65537 bytes at offset 0 + 64.001 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 66048, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] +-[{ "start": 0, "length": 66048, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 66048, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 66048, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + wrote 1/1 bytes at offset 65536 + 1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 65536, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 65536, "length": 1, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 65537, "length": 511, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] +-[{ "start": 0, "length": 65536, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 65536, "length": 1, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 65537, "length": 511, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 65536, "length": 1, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 65537, "length": 511, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 65536, "length": 1, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 65537, "length": 511, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + *** done +diff --git a/tests/qemu-iotests/223.out b/tests/qemu-iotests/223.out +index 083b62d053..e58ea5abbd 100644 +--- a/tests/qemu-iotests/223.out ++++ b/tests/qemu-iotests/223.out +@@ -100,19 +100,19 @@ read 1048576/1048576 bytes at offset 1048576 + 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 2097152/2097152 bytes at offset 2097152 + 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 1044480, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 1048576, "length": 3145728, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": false}, +-{ "start": 65536, "length": 2031616, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 1044480, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 1048576, "length": 3145728, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 65536, "length": 2031616, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] + + === Contrast to small granularity dirty-bitmap === + +-[{ "start": 0, "length": 512, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 512, "length": 512, "depth": 0, "zero": false, "data": false}, +-{ "start": 1024, "length": 2096128, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] ++[{ "start": 0, "length": 512, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 512, "length": 512, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 1024, "length": 2096128, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] + + === End qemu NBD server === + +@@ -201,19 +201,19 @@ read 1048576/1048576 bytes at offset 1048576 + 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + read 2097152/2097152 bytes at offset 2097152 + 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 1044480, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 1048576, "length": 3145728, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": false}, +-{ "start": 65536, "length": 2031616, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 1044480, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 1048576, "length": 3145728, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 65536, "length": 2031616, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] + + === Contrast to small granularity dirty-bitmap === + +-[{ "start": 0, "length": 512, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 512, "length": 512, "depth": 0, "zero": false, "data": false}, +-{ "start": 1024, "length": 2096128, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] ++[{ "start": 0, "length": 512, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 512, "length": 512, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 1024, "length": 2096128, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] + + === End qemu NBD server === + +@@ -238,12 +238,12 @@ read 2097152/2097152 bytes at offset 2097152 + + === Use qemu-nbd as server === + +-[{ "start": 0, "length": 65536, "depth": 0, "zero": false, "data": false}, +-{ "start": 65536, "length": 2031616, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] +-[{ "start": 0, "length": 512, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 512, "length": 512, "depth": 0, "zero": false, "data": false}, +-{ "start": 1024, "length": 11321, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 12345, "length": 2084807, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": false, "data": false}] ++[{ "start": 0, "length": 65536, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 65536, "length": 2031616, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] ++[{ "start": 0, "length": 512, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 512, "length": 512, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 1024, "length": 11321, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 12345, "length": 2084807, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}] + *** done +diff --git a/tests/qemu-iotests/241.out b/tests/qemu-iotests/241.out +index 3f8c173cc8..56e95b599a 100644 +--- a/tests/qemu-iotests/241.out ++++ b/tests/qemu-iotests/241.out +@@ -4,15 +4,15 @@ QA output created by 241 + + size: 1024 + min block: 1 +-[{ "start": 0, "length": 1000, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 1000, "length": 24, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 1000, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 1000, "length": 24, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + 1 KiB (0x400) bytes allocated at offset 0 bytes (0x0) + + === Exporting unaligned raw image, forced server sector alignment === + + size: 1024 + min block: 512 +-[{ "start": 0, "length": 1024, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 1024, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + 1 KiB (0x400) bytes allocated at offset 0 bytes (0x0) + WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed raw. + Automatically detecting the format is dangerous for raw images, write operations on block 0 will be restricted. +@@ -22,7 +22,7 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed + + size: 1024 + min block: 1 +-[{ "start": 0, "length": 1000, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 1000, "length": 24, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 1000, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 1000, "length": 24, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + 1 KiB (0x400) bytes allocated at offset 0 bytes (0x0) + *** done +diff --git a/tests/qemu-iotests/244.out b/tests/qemu-iotests/244.out +index 99f56ac18c..5e03add054 100644 +--- a/tests/qemu-iotests/244.out ++++ b/tests/qemu-iotests/244.out +@@ -57,11 +57,12 @@ wrote 3145728/3145728 bytes at offset 3145728 + 3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + No errors were found on the image. + +-[{ "start": 0, "length": 1048576, "depth": 0, "zero": true, "data": false}, +-{ "start": 1048576, "length": 1048576, "depth": 0, "zero": false, "data": true, "offset": 1048576}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 4194304, "length": 1048576, "depth": 0, "zero": true, "data": false, "offset": 4194304}, +-{ "start": 5242880, "length": 61865984, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1048576, "depth": 0, "present": false, "zero": true, "data": false}, ++{ "start": 1048576, "length": 1048576, "depth": 0, "present": true, "zero": false, "data": true, "offset": 1048576}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 1048576, "depth": 0, "present": true, "zero": true, "data": false, "offset": 4194304}, ++{ "start": 5242880, "length": 1048576, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 6291456, "length": 60817408, "depth": 0, "present": false, "zero": true, "data": false}] + + read 1048576/1048576 bytes at offset 0 + 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +@@ -93,10 +94,10 @@ wrote 3145728/3145728 bytes at offset 3145728 + 3 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + No errors were found on the image. + +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": 0}, +-{ "start": 2097152, "length": 2097152, "depth": 0, "zero": true, "data": false}, +-{ "start": 4194304, "length": 2097152, "depth": 0, "zero": true, "data": false, "offset": 4194304}, +-{ "start": 6291456, "length": 60817408, "depth": 0, "zero": false, "data": true, "offset": 6291456}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": 0}, ++{ "start": 2097152, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 4194304, "length": 2097152, "depth": 0, "present": true, "zero": true, "data": false, "offset": 4194304}, ++{ "start": 6291456, "length": 60817408, "depth": 0, "present": true, "zero": false, "data": true, "offset": 6291456}] + + read 1048576/1048576 bytes at offset 0 + 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +@@ -122,8 +123,8 @@ read 1048576/1048576 bytes at offset 0 + 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + Offset Length Mapped to File + 0 0x100000 0 TEST_DIR/t.qcow2.data +-[{ "start": 0, "length": 1048576, "depth": 0, "zero": false, "data": true, "offset": 0}, +-{ "start": 1048576, "length": 66060288, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1048576, "depth": 0, "present": true, "zero": false, "data": true, "offset": 0}, ++{ "start": 1048576, "length": 66060288, "depth": 0, "present": false, "zero": true, "data": false}] + + === Copy offloading === + +diff --git a/tests/qemu-iotests/252.out b/tests/qemu-iotests/252.out +index 12dce889f8..c578129c25 100644 +--- a/tests/qemu-iotests/252.out ++++ b/tests/qemu-iotests/252.out +@@ -23,8 +23,8 @@ read 131072/131072 bytes at offset 131072 + read 131072/131072 bytes at offset 262144 + 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +-[{ "start": 0, "length": 262144, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 262144, "length": 131072, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 262144, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 262144, "length": 131072, "depth": 0, "present": false, "zero": true, "data": false}] + + read 131072/131072 bytes at offset 0 + 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +@@ -33,7 +33,7 @@ read 131072/131072 bytes at offset 131072 + read 131072/131072 bytes at offset 262144 + 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +-[{ "start": 0, "length": 262144, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 262144, "length": 65536, "depth": 0, "zero": true, "data": false}, +-{ "start": 327680, "length": 65536, "depth": 1, "zero": true, "data": false}] ++[{ "start": 0, "length": 262144, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 262144, "length": 65536, "depth": 0, "present": true, "zero": true, "data": false}, ++{ "start": 327680, "length": 65536, "depth": 1, "present": false, "zero": true, "data": false}] + *** done +diff --git a/tests/qemu-iotests/253.out b/tests/qemu-iotests/253.out +index 3d08b305d7..b3dca75a89 100644 +--- a/tests/qemu-iotests/253.out ++++ b/tests/qemu-iotests/253.out +@@ -3,16 +3,16 @@ QA output created by 253 + === Check mapping of unaligned raw image === + + Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048575 +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 1044480, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 1044480, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 1044480, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 1044480, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] + wrote 65535/65535 bytes at offset 983040 + 63.999 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 978944, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 4096, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 4096, "length": 978944, "depth": 0, "zero": true, "data": false, "offset": OFFSET}, +-{ "start": 983040, "length": 65536, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 978944, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 983040, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 4096, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 4096, "length": 978944, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}, ++{ "start": 983040, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + *** done +diff --git a/tests/qemu-iotests/274.out b/tests/qemu-iotests/274.out +index cfe17a8659..16a95a4850 100644 +--- a/tests/qemu-iotests/274.out ++++ b/tests/qemu-iotests/274.out +@@ -26,18 +26,18 @@ read 1048576/1048576 bytes at offset 1048576 + 0/1048576 bytes allocated at offset 1 MiB + + === Checking map === +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": 327680}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] + + Offset Length Mapped to File + 0 0x200000 0x50000 TEST_DIR/PID-base + +-[{ "start": 0, "length": 1048576, "depth": 1, "zero": false, "data": true, "offset": 327680}] ++[{ "start": 0, "length": 1048576, "depth": 1, "present": true, "zero": false, "data": true, "offset": 327680}] + + Offset Length Mapped to File + 0 0x100000 0x50000 TEST_DIR/PID-base + +-[{ "start": 0, "length": 1048576, "depth": 2, "zero": false, "data": true, "offset": 327680}, +-{ "start": 1048576, "length": 1048576, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1048576, "depth": 2, "present": true, "zero": false, "data": true, "offset": 327680}, ++{ "start": 1048576, "length": 1048576, "depth": 0, "present": false, "zero": true, "data": false}] + + Offset Length Mapped to File + 0 0x100000 0x50000 TEST_DIR/PID-base +@@ -220,8 +220,8 @@ read 65536/65536 bytes at offset 5368709120 + 1 GiB (0x40000000) bytes not allocated at offset 0 bytes (0x0) + 7 GiB (0x1c0000000) bytes allocated at offset 1 GiB (0x40000000) + +-[{ "start": 0, "length": 1073741824, "depth": 1, "zero": true, "data": false}, +-{ "start": 1073741824, "length": 7516192768, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 1073741824, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 1073741824, "length": 7516192768, "depth": 0, "present": true, "zero": true, "data": false}] + + === preallocation=metadata === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=34359738368 lazy_refcounts=off refcount_bits=16 +@@ -239,13 +239,13 @@ read 65536/65536 bytes at offset 33285996544 + 30 GiB (0x780000000) bytes not allocated at offset 0 bytes (0x0) + 3 GiB (0xc0000000) bytes allocated at offset 30 GiB (0x780000000) + +-[{ "start": 0, "length": 32212254720, "depth": 1, "zero": true, "data": false}, +-{ "start": 32212254720, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 327680}, +-{ "start": 32749125632, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 537264128}, +-{ "start": 33285996544, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 1074200576}, +-{ "start": 33822867456, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 1611137024}, +-{ "start": 34359738368, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 2148139008}, +-{ "start": 34896609280, "length": 536870912, "depth": 0, "zero": true, "data": false, "offset": 2685075456}] ++[{ "start": 0, "length": 32212254720, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 32212254720, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 327680}, ++{ "start": 32749125632, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 537264128}, ++{ "start": 33285996544, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 1074200576}, ++{ "start": 33822867456, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 1611137024}, ++{ "start": 34359738368, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 2148139008}, ++{ "start": 34896609280, "length": 536870912, "depth": 0, "present": true, "zero": true, "data": false, "offset": 2685075456}] + + === preallocation=falloc === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=10485760 lazy_refcounts=off refcount_bits=16 +@@ -263,8 +263,8 @@ read 65536/65536 bytes at offset 9437184 + 5 MiB (0x500000) bytes not allocated at offset 0 bytes (0x0) + 10 MiB (0xa00000) bytes allocated at offset 5 MiB (0x500000) + +-[{ "start": 0, "length": 5242880, "depth": 1, "zero": true, "data": false}, +-{ "start": 5242880, "length": 10485760, "depth": 0, "zero": false, "data": true, "offset": 327680}] ++[{ "start": 0, "length": 5242880, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 5242880, "length": 10485760, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] + + === preallocation=full === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=16777216 lazy_refcounts=off refcount_bits=16 +@@ -282,8 +282,8 @@ read 65536/65536 bytes at offset 11534336 + 8 MiB (0x800000) bytes not allocated at offset 0 bytes (0x0) + 4 MiB (0x400000) bytes allocated at offset 8 MiB (0x800000) + +-[{ "start": 0, "length": 8388608, "depth": 1, "zero": true, "data": false}, +-{ "start": 8388608, "length": 4194304, "depth": 0, "zero": false, "data": true, "offset": 327680}] ++[{ "start": 0, "length": 8388608, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 8388608, "length": 4194304, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}] + + === preallocation=off === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=393216 lazy_refcounts=off refcount_bits=16 +@@ -301,9 +301,9 @@ read 65536/65536 bytes at offset 259072 + 192 KiB (0x30000) bytes not allocated at offset 0 bytes (0x0) + 320 KiB (0x50000) bytes allocated at offset 192 KiB (0x30000) + +-[{ "start": 0, "length": 196608, "depth": 1, "zero": true, "data": false}, +-{ "start": 196608, "length": 65536, "depth": 0, "zero": false, "data": true, "offset": 327680}, +-{ "start": 262144, "length": 262144, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 196608, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 196608, "length": 65536, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}, ++{ "start": 262144, "length": 262144, "depth": 0, "present": true, "zero": true, "data": false}] + + === preallocation=off === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=409600 lazy_refcounts=off refcount_bits=16 +@@ -321,8 +321,8 @@ read 65536/65536 bytes at offset 344064 + 256 KiB (0x40000) bytes not allocated at offset 0 bytes (0x0) + 256 KiB (0x40000) bytes allocated at offset 256 KiB (0x40000) + +-[{ "start": 0, "length": 262144, "depth": 1, "zero": true, "data": false}, +-{ "start": 262144, "length": 262144, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 262144, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 262144, "length": 262144, "depth": 0, "present": true, "zero": true, "data": false}] + + === preallocation=off === + Formatting 'TEST_DIR/PID-base', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=524288 lazy_refcounts=off refcount_bits=16 +@@ -340,6 +340,6 @@ read 65536/65536 bytes at offset 446464 + 256 KiB (0x40000) bytes not allocated at offset 0 bytes (0x0) + 244 KiB (0x3d000) bytes allocated at offset 256 KiB (0x40000) + +-[{ "start": 0, "length": 262144, "depth": 1, "zero": true, "data": false}, +-{ "start": 262144, "length": 249856, "depth": 0, "zero": true, "data": false}] ++[{ "start": 0, "length": 262144, "depth": 1, "present": false, "zero": true, "data": false}, ++{ "start": 262144, "length": 249856, "depth": 0, "present": true, "zero": true, "data": false}] + +diff --git a/tests/qemu-iotests/291.out b/tests/qemu-iotests/291.out +index 23411c0ff4..018d6b103f 100644 +--- a/tests/qemu-iotests/291.out ++++ b/tests/qemu-iotests/291.out +@@ -103,16 +103,16 @@ Format specific information: + + === Check bitmap contents === + +-[{ "start": 0, "length": 3145728, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 3145728, "length": 1048576, "depth": 0, "zero": false, "data": false}, +-{ "start": 4194304, "length": 6291456, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 1048576, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 1048576, "length": 1048576, "depth": 0, "zero": false, "data": false}, +-{ "start": 2097152, "length": 8388608, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 1048576, "depth": 0, "zero": false, "data": false}, +-{ "start": 3145728, "length": 7340032, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-[{ "start": 0, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 2097152, "length": 1048576, "depth": 0, "zero": false, "data": false}, +-{ "start": 3145728, "length": 7340032, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 3145728, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 3145728, "length": 1048576, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 4194304, "length": 6291456, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 1048576, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 1048576, "length": 1048576, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 2097152, "length": 8388608, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 1048576, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 3145728, "length": 7340032, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++[{ "start": 0, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 2097152, "length": 1048576, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 3145728, "length": 7340032, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] + *** done +diff --git a/tests/qemu-iotests/309 b/tests/qemu-iotests/309 +deleted file mode 100755 +index b90b279994..0000000000 +--- a/tests/qemu-iotests/309 ++++ /dev/null +@@ -1,78 +0,0 @@ +-#!/usr/bin/env bash +-# group: rw auto quick +-# +-# Test qemu-nbd -A +-# +-# Copyright (C) 2018-2020 Red Hat, Inc. +-# +-# This program is free software; you can redistribute it and/or modify +-# it under the terms of the GNU General Public License as published by +-# the Free Software Foundation; either version 2 of the License, or +-# (at your option) any later version. +-# +-# This program is distributed in the hope that it will be useful, +-# but WITHOUT ANY WARRANTY; without even the implied warranty of +-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +-# GNU General Public License for more details. +-# +-# You should have received a copy of the GNU General Public License +-# along with this program. If not, see . +-# +- +-seq="$(basename $0)" +-echo "QA output created by $seq" +- +-status=1 # failure is the default! +- +-_cleanup() +-{ +- _cleanup_test_img +- nbd_server_stop +-} +-trap "_cleanup; exit \$status" 0 1 2 3 15 +- +-# get standard environment, filters and checks +-. ./common.rc +-. ./common.filter +-. ./common.nbd +- +-_supported_fmt qcow2 +-_supported_proto file +-_supported_os Linux +-_require_command QEMU_NBD +- +-echo +-echo "=== Initial image setup ===" +-echo +- +-TEST_IMG="$TEST_IMG.base" _make_test_img 4M +-$QEMU_IO -c 'w 0 2M' -f $IMGFMT "$TEST_IMG.base" | _filter_qemu_io +-_make_test_img -b "$TEST_IMG.base" -F $IMGFMT 4M +-$QEMU_IO -c 'w 1M 2M' -f $IMGFMT "$TEST_IMG" | _filter_qemu_io +- +-echo +-echo "=== Check allocation over NBD ===" +-echo +- +-$QEMU_IMG map --output=json -f qcow2 "$TEST_IMG" +-IMG="driver=nbd,server.type=unix,server.path=$nbd_unix_socket" +-nbd_server_start_unix_socket -r -f qcow2 -A "$TEST_IMG" +-# Normal -f raw NBD block status loses access to allocation information +-$QEMU_IMG map --output=json --image-opts \ +- "$IMG" | _filter_qemu_img_map +-# But when we use -A, coupled with x-dirty-bitmap in the client for feeding +-# 2-bit block status from an alternative NBD metadata context (note that +-# the client code for x-dirty-bitmap intentionally collapses all depths +-# beyond 2 into a single value), we can determine: +-# unallocated (depth 0) => "zero":false, "data":true +-# local (depth 1) => "zero":false, "data":false +-# backing (depth 2+) => "zero":true, "data":true +-$QEMU_IMG map --output=json --image-opts \ +- "$IMG,x-dirty-bitmap=qemu:allocation-depth" | _filter_qemu_img_map +-# More accurate results can be obtained by other NBD clients such as +-# libnbd, but this test works without such external dependencies. +- +-# success, all done +-echo '*** done' +-rm -f $seq.full +-status=0 +diff --git a/tests/qemu-iotests/309.out b/tests/qemu-iotests/309.out +deleted file mode 100644 +index db75bb6b0d..0000000000 +--- a/tests/qemu-iotests/309.out ++++ /dev/null +@@ -1,22 +0,0 @@ +-QA output created by 309 +- +-=== Initial image setup === +- +-Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=4194304 +-wrote 2097152/2097152 bytes at offset 0 +-2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +-Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=4194304 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT +-wrote 2097152/2097152 bytes at offset 1048576 +-2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +- +-=== Check allocation over NBD === +- +-[{ "start": 0, "length": 1048576, "depth": 1, "zero": false, "data": true, "offset": 327680}, +-{ "start": 1048576, "length": 2097152, "depth": 0, "zero": false, "data": true, "offset": 327680}, +-{ "start": 3145728, "length": 1048576, "depth": 1, "zero": true, "data": false}] +-[{ "start": 0, "length": 3145728, "depth": 0, "zero": false, "data": true, "offset": OFFSET}, +-{ "start": 3145728, "length": 1048576, "depth": 0, "zero": true, "data": false, "offset": OFFSET}] +-[{ "start": 0, "length": 1048576, "depth": 0, "zero": true, "data": true, "offset": OFFSET}, +-{ "start": 1048576, "length": 2097152, "depth": 0, "zero": false, "data": false}, +-{ "start": 3145728, "length": 1048576, "depth": 0, "zero": false, "data": true, "offset": OFFSET}] +-*** done +diff --git a/tests/qemu-iotests/tests/nbd-qemu-allocation b/tests/qemu-iotests/tests/nbd-qemu-allocation +new file mode 100755 +index 0000000000..4ee73db803 +--- /dev/null ++++ b/tests/qemu-iotests/tests/nbd-qemu-allocation +@@ -0,0 +1,81 @@ ++#!/usr/bin/env bash ++# group: rw auto quick ++# ++# Test qemu-nbd -A ++# ++# Copyright (C) 2018-2021 Red Hat, Inc. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++# ++ ++seq="$(basename $0)" ++echo "QA output created by $seq" ++ ++status=1 # failure is the default! ++ ++_cleanup() ++{ ++ _cleanup_test_img ++ nbd_server_stop ++} ++trap "_cleanup; exit \$status" 0 1 2 3 15 ++ ++# get standard environment, filters and checks ++cd .. ++. ./common.rc ++. ./common.filter ++. ./common.nbd ++ ++_supported_fmt qcow2 ++_supported_proto file ++_supported_os Linux ++_require_command QEMU_NBD ++ ++echo ++echo "=== Initial image setup ===" ++echo ++ ++TEST_IMG="$TEST_IMG.base" _make_test_img 4M ++$QEMU_IO -c 'w 0 2M' -f $IMGFMT "$TEST_IMG.base" | _filter_qemu_io ++_make_test_img -b "$TEST_IMG.base" -F $IMGFMT 4M ++$QEMU_IO -c 'w 1M 2M' -f $IMGFMT "$TEST_IMG" | _filter_qemu_io ++ ++echo ++echo "=== Check allocation over NBD ===" ++echo ++ ++$QEMU_IMG map --output=json -f qcow2 "$TEST_IMG" ++IMG="driver=nbd,server.type=unix,server.path=$nbd_unix_socket" ++nbd_server_start_unix_socket -r -f qcow2 -A "$TEST_IMG" ++# Inspect what the server is exposing ++$QEMU_NBD --list -k $nbd_unix_socket ++# Normal -f raw NBD block status loses access to allocation information ++$QEMU_IMG map --output=json --image-opts \ ++ "$IMG" | _filter_qemu_img_map ++# But when we use -A, coupled with x-dirty-bitmap in the client for feeding ++# 2-bit block status from an alternative NBD metadata context (note that ++# the client code for x-dirty-bitmap intentionally collapses all depths ++# beyond 2 into a single value), we can determine: ++# unallocated (depth 0) => "zero":false, "data":true ++# local (depth 1) => "zero":false, "data":false ++# backing (depth 2+) => "zero":true, "data":true ++$QEMU_IMG map --output=json --image-opts \ ++ "$IMG,x-dirty-bitmap=qemu:allocation-depth" | _filter_qemu_img_map ++# More accurate results can be obtained by other NBD clients such as ++# libnbd, but this test works without such external dependencies. ++ ++# success, all done ++echo '*** done' ++rm -f $seq.full ++status=0 +diff --git a/tests/qemu-iotests/tests/nbd-qemu-allocation.out b/tests/qemu-iotests/tests/nbd-qemu-allocation.out +new file mode 100644 +index 0000000000..0bf1abb063 +--- /dev/null ++++ b/tests/qemu-iotests/tests/nbd-qemu-allocation.out +@@ -0,0 +1,32 @@ ++QA output created by nbd-qemu-allocation ++ ++=== Initial image setup === ++ ++Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=4194304 ++wrote 2097152/2097152 bytes at offset 0 ++2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) ++Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=4194304 backing_file=TEST_DIR/t.IMGFMT.base backing_fmt=IMGFMT ++wrote 2097152/2097152 bytes at offset 1048576 ++2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) ++ ++=== Check allocation over NBD === ++ ++[{ "start": 0, "length": 1048576, "depth": 1, "present": true, "zero": false, "data": true, "offset": 327680}, ++{ "start": 1048576, "length": 2097152, "depth": 0, "present": true, "zero": false, "data": true, "offset": 327680}, ++{ "start": 3145728, "length": 1048576, "depth": 1, "present": false, "zero": true, "data": false}] ++exports available: 1 ++ export: '' ++ size: 4194304 ++ flags: 0x58f ( readonly flush fua df multi cache ) ++ min block: 1 ++ opt block: 4096 ++ max block: 33554432 ++ available meta contexts: 2 ++ base:allocation ++ qemu:allocation-depth ++[{ "start": 0, "length": 3145728, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}, ++{ "start": 3145728, "length": 1048576, "depth": 0, "present": true, "zero": true, "data": false, "offset": OFFSET}] ++[{ "start": 0, "length": 1048576, "depth": 0, "present": true, "zero": true, "data": true, "offset": OFFSET}, ++{ "start": 1048576, "length": 2097152, "depth": 0, "present": false, "zero": false, "data": false}, ++{ "start": 3145728, "length": 1048576, "depth": 0, "present": true, "zero": false, "data": true, "offset": OFFSET}] ++*** done +diff --git a/tests/qtest/fuzz-sdcard-test.c b/tests/qtest/fuzz-sdcard-test.c +new file mode 100644 +index 0000000000..96602eac7e +--- /dev/null ++++ b/tests/qtest/fuzz-sdcard-test.c +@@ -0,0 +1,66 @@ ++/* ++ * QTest fuzzer-generated testcase for sdcard device ++ * ++ * Copyright (c) 2021 Philippe Mathieu-Daudé ++ * ++ * SPDX-License-Identifier: GPL-2.0-or-later ++ */ ++ ++#include "qemu/osdep.h" ++#include "libqos/libqtest.h" ++ ++/* ++ * https://gitlab.com/qemu-project/qemu/-/issues/450 ++ * Used to trigger: ++ * Assertion `wpnum < sd->wpgrps_size' failed. ++ */ ++static void oss_fuzz_29225(void) ++{ ++ QTestState *s; ++ ++ s = qtest_init(" -display none -m 512m -nodefaults -nographic" ++ " -device sdhci-pci,sd-spec-version=3" ++ " -device sd-card,drive=d0" ++ " -drive if=none,index=0,file=null-co://,format=raw,id=d0"); ++ ++ qtest_outl(s, 0xcf8, 0x80001010); ++ qtest_outl(s, 0xcfc, 0xd0690); ++ qtest_outl(s, 0xcf8, 0x80001003); ++ qtest_outl(s, 0xcf8, 0x80001013); ++ qtest_outl(s, 0xcfc, 0xffffffff); ++ qtest_outl(s, 0xcf8, 0x80001003); ++ qtest_outl(s, 0xcfc, 0x3effe00); ++ ++ qtest_bufwrite(s, 0xff0d062c, "\xff", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\xb7", 0x1); ++ qtest_bufwrite(s, 0xff0d060a, "\xc9", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\x29", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\xc2", 0x1); ++ qtest_bufwrite(s, 0xff0d0628, "\xf7", 0x1); ++ qtest_bufwrite(s, 0x0, "\xe3", 0x1); ++ qtest_bufwrite(s, 0x7, "\x13", 0x1); ++ qtest_bufwrite(s, 0x8, "\xe3", 0x1); ++ qtest_bufwrite(s, 0xf, "\xe3", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\x03", 0x1); ++ qtest_bufwrite(s, 0xff0d0605, "\x01", 0x1); ++ qtest_bufwrite(s, 0xff0d060b, "\xff", 0x1); ++ qtest_bufwrite(s, 0xff0d060c, "\xff", 0x1); ++ qtest_bufwrite(s, 0xff0d060e, "\xff", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\x06", 0x1); ++ qtest_bufwrite(s, 0xff0d060f, "\x9e", 0x1); ++ ++ qtest_quit(s); ++} ++ ++int main(int argc, char **argv) ++{ ++ const char *arch = qtest_get_arch(); ++ ++ g_test_init(&argc, &argv, NULL); ++ ++ if (strcmp(arch, "i386") == 0) { ++ qtest_add_func("fuzz/sdcard/oss_fuzz_29225", oss_fuzz_29225); ++ } ++ ++ return g_test_run(); ++} +diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build +index ee7347b727..e22a0792c5 100644 +--- a/tests/qtest/meson.build ++++ b/tests/qtest/meson.build +@@ -21,6 +21,7 @@ qtests_generic = \ + (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ + (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ + (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ ++ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ + [ + 'cdrom-test', + 'device-introspect-test', +diff --git a/trace/control-target.c b/trace/control-target.c +index e293eeed7c..8418673c18 100644 +--- a/trace/control-target.c ++++ b/trace/control-target.c +@@ -127,7 +127,7 @@ void trace_init_vcpu(CPUState *vcpu) + { + TraceEventIter iter; + TraceEvent *ev; +- trace_event_iter_init(&iter, NULL); ++ trace_event_iter_init_all(&iter); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (trace_event_is_vcpu(ev) && + trace_event_get_state_static(ev) && +diff --git a/trace/control.c b/trace/control.c +index 4be38e1af2..d5b68e846e 100644 +--- a/trace/control.c ++++ b/trace/control.c +@@ -82,6 +82,10 @@ void trace_event_register_group(TraceEvent **events) + event_groups = g_renew(TraceEventGroup, event_groups, nevent_groups + 1); + event_groups[nevent_groups].events = events; + nevent_groups++; ++ ++#ifdef CONFIG_TRACE_SIMPLE ++ st_init_group(nevent_groups - 1); ++#endif + } + + +@@ -91,7 +95,7 @@ TraceEvent *trace_event_name(const char *name) + + TraceEventIter iter; + TraceEvent *ev; +- trace_event_iter_init(&iter, NULL); ++ trace_event_iter_init_all(&iter); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (strcmp(trace_event_get_name(ev), name) == 0) { + return ev; +@@ -100,27 +104,46 @@ TraceEvent *trace_event_name(const char *name) + return NULL; + } + +-void trace_event_iter_init(TraceEventIter *iter, const char *pattern) ++void trace_event_iter_init_all(TraceEventIter *iter) + { + iter->event = 0; + iter->group = 0; ++ iter->group_id = -1; ++ iter->pattern = NULL; ++} ++ ++void trace_event_iter_init_pattern(TraceEventIter *iter, const char *pattern) ++{ ++ trace_event_iter_init_all(iter); + iter->pattern = pattern; + } + ++void trace_event_iter_init_group(TraceEventIter *iter, size_t group_id) ++{ ++ trace_event_iter_init_all(iter); ++ iter->group_id = group_id; ++} ++ + TraceEvent *trace_event_iter_next(TraceEventIter *iter) + { + while (iter->group < nevent_groups && + event_groups[iter->group].events[iter->event] != NULL) { + TraceEvent *ev = event_groups[iter->group].events[iter->event]; ++ size_t group = iter->group; + iter->event++; + if (event_groups[iter->group].events[iter->event] == NULL) { + iter->event = 0; + iter->group++; + } +- if (!iter->pattern || +- g_pattern_match_simple(iter->pattern, trace_event_get_name(ev))) { +- return ev; ++ if (iter->pattern && ++ !g_pattern_match_simple(iter->pattern, trace_event_get_name(ev))) { ++ continue; ++ } ++ if (iter->group_id != -1 && ++ iter->group_id != group) { ++ continue; + } ++ return ev; + } + + return NULL; +@@ -130,7 +153,7 @@ void trace_list_events(FILE *f) + { + TraceEventIter iter; + TraceEvent *ev; +- trace_event_iter_init(&iter, NULL); ++ trace_event_iter_init_all(&iter); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + fprintf(f, "%s\n", trace_event_get_name(ev)); + } +@@ -150,7 +173,7 @@ static void do_trace_enable_events(const char *line_buf) + TraceEvent *ev; + bool is_pattern = trace_event_is_pattern(line_ptr); + +- trace_event_iter_init(&iter, line_ptr); ++ trace_event_iter_init_pattern(&iter, line_ptr); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (!trace_event_get_state_static(ev)) { + if (!is_pattern) { +@@ -256,7 +279,7 @@ void trace_fini_vcpu(CPUState *vcpu) + + trace_guest_cpu_exit(vcpu); + +- trace_event_iter_init(&iter, NULL); ++ trace_event_iter_init_all(&iter); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (trace_event_is_vcpu(ev) && + trace_event_get_state_static(ev) && +diff --git a/trace/control.h b/trace/control.h +index 9522a7b318..23b8393b29 100644 +--- a/trace/control.h ++++ b/trace/control.h +@@ -13,22 +13,44 @@ + #include "event-internal.h" + + typedef struct TraceEventIter { ++ /* iter state */ + size_t event; + size_t group; ++ /* filter conditions */ ++ size_t group_id; + const char *pattern; + } TraceEventIter; + + + /** +- * trace_event_iter_init: ++ * trace_event_iter_init_all: + * @iter: the event iterator struct +- * @pattern: optional pattern to filter events on name + * + * Initialize the event iterator struct @iter, +- * optionally using @pattern to filter out events ++ * for all events. ++ */ ++void trace_event_iter_init_all(TraceEventIter *iter); ++ ++/** ++ * trace_event_iter_init_pattern: ++ * @iter: the event iterator struct ++ * @pattern: pattern to filter events on name ++ * ++ * Initialize the event iterator struct @iter, ++ * using @pattern to filter out events + * with non-matching names. + */ +-void trace_event_iter_init(TraceEventIter *iter, const char *pattern); ++void trace_event_iter_init_pattern(TraceEventIter *iter, const char *pattern); ++ ++/** ++ * trace_event_iter_init_group: ++ * @iter: the event iterator struct ++ * @group_id: group_id to filter events by group. ++ * ++ * Initialize the event iterator struct @iter, ++ * using @group_id to filter for events in the group. ++ */ ++void trace_event_iter_init_group(TraceEventIter *iter, size_t group_id); + + /** + * trace_event_iter_next: +diff --git a/trace/meson.build b/trace/meson.build +index 08f83a15c3..ef18f11d64 100644 +--- a/trace/meson.build ++++ b/trace/meson.build +@@ -26,7 +26,7 @@ foreach dir : [ '.' ] + trace_events_subdirs + input: trace_events_file, + command: [ tracetool, group, '--format=ust-events-h', '@INPUT@', '@OUTPUT@' ], + depend_files: tracetool_depends) +- trace_ss.add(trace_ust_h, lttng, urcubp) ++ trace_ss.add(trace_ust_h, lttng) + genh += trace_ust_h + endif + trace_ss.add(trace_h, trace_c) +diff --git a/trace/qmp.c b/trace/qmp.c +index 85f81e47cc..3b4f4702b4 100644 +--- a/trace/qmp.c ++++ b/trace/qmp.c +@@ -55,7 +55,7 @@ static bool check_events(bool has_vcpu, bool ignore_unavailable, bool is_pattern + /* error for unavailable events */ + TraceEventIter iter; + TraceEvent *ev; +- trace_event_iter_init(&iter, name); ++ trace_event_iter_init_pattern(&iter, name); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (!ignore_unavailable && !trace_event_get_state_static(ev)) { + error_setg(errp, "event \"%s\" is disabled", trace_event_get_name(ev)); +@@ -90,7 +90,7 @@ TraceEventInfoList *qmp_trace_event_get_state(const char *name, + } + + /* Get states (all errors checked above) */ +- trace_event_iter_init(&iter, name); ++ trace_event_iter_init_pattern(&iter, name); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + TraceEventInfo *value; + bool is_vcpu = trace_event_is_vcpu(ev); +@@ -153,7 +153,7 @@ void qmp_trace_event_set_state(const char *name, bool enable, + } + + /* Apply changes (all errors checked above) */ +- trace_event_iter_init(&iter, name); ++ trace_event_iter_init_pattern(&iter, name); + while ((ev = trace_event_iter_next(&iter)) != NULL) { + if (!trace_event_get_state_static(ev) || + (has_vcpu && !trace_event_is_vcpu(ev))) { +diff --git a/trace/simple.c b/trace/simple.c +index 9cd2ed1fb3..ac499edee0 100644 +--- a/trace/simple.c ++++ b/trace/simple.c +@@ -280,14 +280,12 @@ void trace_record_finish(TraceBufferRecord *rec) + } + } + +-static int st_write_event_mapping(void) ++static int st_write_event_mapping(TraceEventIter *iter) + { + uint64_t type = TRACE_RECORD_TYPE_MAPPING; +- TraceEventIter iter; + TraceEvent *ev; + +- trace_event_iter_init(&iter, NULL); +- while ((ev = trace_event_iter_next(&iter)) != NULL) { ++ while ((ev = trace_event_iter_next(iter)) != NULL) { + uint64_t id = trace_event_get_id(ev); + const char *name = trace_event_get_name(ev); + uint32_t len = strlen(name); +@@ -309,6 +307,7 @@ static int st_write_event_mapping(void) + */ + bool st_set_trace_file_enabled(bool enable) + { ++ TraceEventIter iter; + bool was_enabled = trace_fp; + + if (enable == !!trace_fp) { +@@ -333,8 +332,9 @@ bool st_set_trace_file_enabled(bool enable) + return was_enabled; + } + ++ trace_event_iter_init_all(&iter); + if (fwrite(&header, sizeof header, 1, trace_fp) != 1 || +- st_write_event_mapping() < 0) { ++ st_write_event_mapping(&iter) < 0) { + fclose(trace_fp); + trace_fp = NULL; + return was_enabled; +@@ -422,3 +422,15 @@ bool st_init(void) + atexit(st_flush_trace_buffer); + return true; + } ++ ++void st_init_group(size_t group) ++{ ++ TraceEventIter iter; ++ ++ if (!trace_writeout_enabled) { ++ return; ++ } ++ ++ trace_event_iter_init_group(&iter, group); ++ st_write_event_mapping(&iter); ++} +diff --git a/trace/simple.h b/trace/simple.h +index 26ccbc8b8a..ee1983ce56 100644 +--- a/trace/simple.h ++++ b/trace/simple.h +@@ -15,6 +15,7 @@ void st_print_trace_file_status(void); + bool st_set_trace_file_enabled(bool enable); + void st_set_trace_file(const char *file); + bool st_init(void); ++void st_init_group(size_t group); + void st_flush_trace_buffer(void); + + typedef struct { +diff --git a/ui/cocoa.m b/ui/cocoa.m +deleted file mode 100644 +index 9f72844b07..0000000000 +--- a/ui/cocoa.m ++++ /dev/null +@@ -1,2063 +0,0 @@ +-/* +- * QEMU Cocoa CG display driver +- * +- * Copyright (c) 2008 Mike Kronenberg +- * +- * Permission is hereby granted, free of charge, to any person obtaining a copy +- * of this software and associated documentation files (the "Software"), to deal +- * in the Software without restriction, including without limitation the rights +- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +- * copies of the Software, and to permit persons to whom the Software is +- * furnished to do so, subject to the following conditions: +- * +- * The above copyright notice and this permission notice shall be included in +- * all copies or substantial portions of the Software. +- * +- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +- * THE SOFTWARE. +- */ +- +-#include "qemu/osdep.h" +- +-#import +-#include +- +-#include "qemu-common.h" +-#include "ui/clipboard.h" +-#include "ui/console.h" +-#include "ui/input.h" +-#include "ui/kbd-state.h" +-#include "sysemu/sysemu.h" +-#include "sysemu/runstate.h" +-#include "sysemu/cpu-throttle.h" +-#include "qapi/error.h" +-#include "qapi/qapi-commands-block.h" +-#include "qapi/qapi-commands-machine.h" +-#include "qapi/qapi-commands-misc.h" +-#include "sysemu/blockdev.h" +-#include "qemu-version.h" +-#include "qemu/cutils.h" +-#include "qemu/main-loop.h" +-#include "qemu/module.h" +-#include +-#include "hw/core/cpu.h" +- +-#ifndef MAC_OS_X_VERSION_10_13 +-#define MAC_OS_X_VERSION_10_13 101300 +-#endif +- +-/* 10.14 deprecates NSOnState and NSOffState in favor of +- * NSControlStateValueOn/Off, which were introduced in 10.13. +- * Define for older versions +- */ +-#if MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_13 +-#define NSControlStateValueOn NSOnState +-#define NSControlStateValueOff NSOffState +-#endif +- +-//#define DEBUG +- +-#ifdef DEBUG +-#define COCOA_DEBUG(...) { (void) fprintf (stdout, __VA_ARGS__); } +-#else +-#define COCOA_DEBUG(...) ((void) 0) +-#endif +- +-#define cgrect(nsrect) (*(CGRect *)&(nsrect)) +- +-typedef struct { +- int width; +- int height; +-} QEMUScreen; +- +-static void cocoa_update(DisplayChangeListener *dcl, +- int x, int y, int w, int h); +- +-static void cocoa_switch(DisplayChangeListener *dcl, +- DisplaySurface *surface); +- +-static void cocoa_refresh(DisplayChangeListener *dcl); +- +-static NSWindow *normalWindow, *about_window; +-static const DisplayChangeListenerOps dcl_ops = { +- .dpy_name = "cocoa", +- .dpy_gfx_update = cocoa_update, +- .dpy_gfx_switch = cocoa_switch, +- .dpy_refresh = cocoa_refresh, +-}; +-static DisplayChangeListener dcl = { +- .ops = &dcl_ops, +-}; +-static int last_buttons; +-static int cursor_hide = 1; +- +-static int gArgc; +-static char **gArgv; +-static bool stretch_video; +-static NSTextField *pauseLabel; +-static NSArray * supportedImageFileTypes; +- +-static QemuSemaphore display_init_sem; +-static QemuSemaphore app_started_sem; +-static bool allow_events; +- +-static NSInteger cbchangecount = -1; +-static QemuClipboardInfo *cbinfo; +-static QemuEvent cbevent; +- +-// Utility functions to run specified code block with iothread lock held +-typedef void (^CodeBlock)(void); +-typedef bool (^BoolCodeBlock)(void); +- +-static void with_iothread_lock(CodeBlock block) +-{ +- bool locked = qemu_mutex_iothread_locked(); +- if (!locked) { +- qemu_mutex_lock_iothread(); +- } +- block(); +- if (!locked) { +- qemu_mutex_unlock_iothread(); +- } +-} +- +-static bool bool_with_iothread_lock(BoolCodeBlock block) +-{ +- bool locked = qemu_mutex_iothread_locked(); +- bool val; +- +- if (!locked) { +- qemu_mutex_lock_iothread(); +- } +- val = block(); +- if (!locked) { +- qemu_mutex_unlock_iothread(); +- } +- return val; +-} +- +-// Mac to QKeyCode conversion +-static const int mac_to_qkeycode_map[] = { +- [kVK_ANSI_A] = Q_KEY_CODE_A, +- [kVK_ANSI_B] = Q_KEY_CODE_B, +- [kVK_ANSI_C] = Q_KEY_CODE_C, +- [kVK_ANSI_D] = Q_KEY_CODE_D, +- [kVK_ANSI_E] = Q_KEY_CODE_E, +- [kVK_ANSI_F] = Q_KEY_CODE_F, +- [kVK_ANSI_G] = Q_KEY_CODE_G, +- [kVK_ANSI_H] = Q_KEY_CODE_H, +- [kVK_ANSI_I] = Q_KEY_CODE_I, +- [kVK_ANSI_J] = Q_KEY_CODE_J, +- [kVK_ANSI_K] = Q_KEY_CODE_K, +- [kVK_ANSI_L] = Q_KEY_CODE_L, +- [kVK_ANSI_M] = Q_KEY_CODE_M, +- [kVK_ANSI_N] = Q_KEY_CODE_N, +- [kVK_ANSI_O] = Q_KEY_CODE_O, +- [kVK_ANSI_P] = Q_KEY_CODE_P, +- [kVK_ANSI_Q] = Q_KEY_CODE_Q, +- [kVK_ANSI_R] = Q_KEY_CODE_R, +- [kVK_ANSI_S] = Q_KEY_CODE_S, +- [kVK_ANSI_T] = Q_KEY_CODE_T, +- [kVK_ANSI_U] = Q_KEY_CODE_U, +- [kVK_ANSI_V] = Q_KEY_CODE_V, +- [kVK_ANSI_W] = Q_KEY_CODE_W, +- [kVK_ANSI_X] = Q_KEY_CODE_X, +- [kVK_ANSI_Y] = Q_KEY_CODE_Y, +- [kVK_ANSI_Z] = Q_KEY_CODE_Z, +- +- [kVK_ANSI_0] = Q_KEY_CODE_0, +- [kVK_ANSI_1] = Q_KEY_CODE_1, +- [kVK_ANSI_2] = Q_KEY_CODE_2, +- [kVK_ANSI_3] = Q_KEY_CODE_3, +- [kVK_ANSI_4] = Q_KEY_CODE_4, +- [kVK_ANSI_5] = Q_KEY_CODE_5, +- [kVK_ANSI_6] = Q_KEY_CODE_6, +- [kVK_ANSI_7] = Q_KEY_CODE_7, +- [kVK_ANSI_8] = Q_KEY_CODE_8, +- [kVK_ANSI_9] = Q_KEY_CODE_9, +- +- [kVK_ANSI_Grave] = Q_KEY_CODE_GRAVE_ACCENT, +- [kVK_ANSI_Minus] = Q_KEY_CODE_MINUS, +- [kVK_ANSI_Equal] = Q_KEY_CODE_EQUAL, +- [kVK_Delete] = Q_KEY_CODE_BACKSPACE, +- [kVK_CapsLock] = Q_KEY_CODE_CAPS_LOCK, +- [kVK_Tab] = Q_KEY_CODE_TAB, +- [kVK_Return] = Q_KEY_CODE_RET, +- [kVK_ANSI_LeftBracket] = Q_KEY_CODE_BRACKET_LEFT, +- [kVK_ANSI_RightBracket] = Q_KEY_CODE_BRACKET_RIGHT, +- [kVK_ANSI_Backslash] = Q_KEY_CODE_BACKSLASH, +- [kVK_ANSI_Semicolon] = Q_KEY_CODE_SEMICOLON, +- [kVK_ANSI_Quote] = Q_KEY_CODE_APOSTROPHE, +- [kVK_ANSI_Comma] = Q_KEY_CODE_COMMA, +- [kVK_ANSI_Period] = Q_KEY_CODE_DOT, +- [kVK_ANSI_Slash] = Q_KEY_CODE_SLASH, +- [kVK_Space] = Q_KEY_CODE_SPC, +- +- [kVK_ANSI_Keypad0] = Q_KEY_CODE_KP_0, +- [kVK_ANSI_Keypad1] = Q_KEY_CODE_KP_1, +- [kVK_ANSI_Keypad2] = Q_KEY_CODE_KP_2, +- [kVK_ANSI_Keypad3] = Q_KEY_CODE_KP_3, +- [kVK_ANSI_Keypad4] = Q_KEY_CODE_KP_4, +- [kVK_ANSI_Keypad5] = Q_KEY_CODE_KP_5, +- [kVK_ANSI_Keypad6] = Q_KEY_CODE_KP_6, +- [kVK_ANSI_Keypad7] = Q_KEY_CODE_KP_7, +- [kVK_ANSI_Keypad8] = Q_KEY_CODE_KP_8, +- [kVK_ANSI_Keypad9] = Q_KEY_CODE_KP_9, +- [kVK_ANSI_KeypadDecimal] = Q_KEY_CODE_KP_DECIMAL, +- [kVK_ANSI_KeypadEnter] = Q_KEY_CODE_KP_ENTER, +- [kVK_ANSI_KeypadPlus] = Q_KEY_CODE_KP_ADD, +- [kVK_ANSI_KeypadMinus] = Q_KEY_CODE_KP_SUBTRACT, +- [kVK_ANSI_KeypadMultiply] = Q_KEY_CODE_KP_MULTIPLY, +- [kVK_ANSI_KeypadDivide] = Q_KEY_CODE_KP_DIVIDE, +- [kVK_ANSI_KeypadEquals] = Q_KEY_CODE_KP_EQUALS, +- [kVK_ANSI_KeypadClear] = Q_KEY_CODE_NUM_LOCK, +- +- [kVK_UpArrow] = Q_KEY_CODE_UP, +- [kVK_DownArrow] = Q_KEY_CODE_DOWN, +- [kVK_LeftArrow] = Q_KEY_CODE_LEFT, +- [kVK_RightArrow] = Q_KEY_CODE_RIGHT, +- +- [kVK_Help] = Q_KEY_CODE_INSERT, +- [kVK_Home] = Q_KEY_CODE_HOME, +- [kVK_PageUp] = Q_KEY_CODE_PGUP, +- [kVK_PageDown] = Q_KEY_CODE_PGDN, +- [kVK_End] = Q_KEY_CODE_END, +- [kVK_ForwardDelete] = Q_KEY_CODE_DELETE, +- +- [kVK_Escape] = Q_KEY_CODE_ESC, +- +- /* The Power key can't be used directly because the operating system uses +- * it. This key can be emulated by using it in place of another key such as +- * F1. Don't forget to disable the real key binding. +- */ +- /* [kVK_F1] = Q_KEY_CODE_POWER, */ +- +- [kVK_F1] = Q_KEY_CODE_F1, +- [kVK_F2] = Q_KEY_CODE_F2, +- [kVK_F3] = Q_KEY_CODE_F3, +- [kVK_F4] = Q_KEY_CODE_F4, +- [kVK_F5] = Q_KEY_CODE_F5, +- [kVK_F6] = Q_KEY_CODE_F6, +- [kVK_F7] = Q_KEY_CODE_F7, +- [kVK_F8] = Q_KEY_CODE_F8, +- [kVK_F9] = Q_KEY_CODE_F9, +- [kVK_F10] = Q_KEY_CODE_F10, +- [kVK_F11] = Q_KEY_CODE_F11, +- [kVK_F12] = Q_KEY_CODE_F12, +- [kVK_F13] = Q_KEY_CODE_PRINT, +- [kVK_F14] = Q_KEY_CODE_SCROLL_LOCK, +- [kVK_F15] = Q_KEY_CODE_PAUSE, +- +- // JIS keyboards only +- [kVK_JIS_Yen] = Q_KEY_CODE_YEN, +- [kVK_JIS_Underscore] = Q_KEY_CODE_RO, +- [kVK_JIS_KeypadComma] = Q_KEY_CODE_KP_COMMA, +- [kVK_JIS_Eisu] = Q_KEY_CODE_MUHENKAN, +- [kVK_JIS_Kana] = Q_KEY_CODE_HENKAN, +- +- /* +- * The eject and volume keys can't be used here because they are handled at +- * a lower level than what an Application can see. +- */ +-}; +- +-static int cocoa_keycode_to_qemu(int keycode) +-{ +- if (ARRAY_SIZE(mac_to_qkeycode_map) <= keycode) { +- error_report("(cocoa) warning unknown keycode 0x%x", keycode); +- return 0; +- } +- return mac_to_qkeycode_map[keycode]; +-} +- +-/* Displays an alert dialog box with the specified message */ +-static void QEMU_Alert(NSString *message) +-{ +- NSAlert *alert; +- alert = [NSAlert new]; +- [alert setMessageText: message]; +- [alert runModal]; +-} +- +-/* Handles any errors that happen with a device transaction */ +-static void handleAnyDeviceErrors(Error * err) +-{ +- if (err) { +- QEMU_Alert([NSString stringWithCString: error_get_pretty(err) +- encoding: NSASCIIStringEncoding]); +- error_free(err); +- } +-} +- +-/* +- ------------------------------------------------------ +- QemuCocoaView +- ------------------------------------------------------ +-*/ +-@interface QemuCocoaView : NSView +-{ +- QEMUScreen screen; +- NSWindow *fullScreenWindow; +- float cx,cy,cw,ch,cdx,cdy; +- pixman_image_t *pixman_image; +- QKbdState *kbd; +- BOOL isMouseGrabbed; +- BOOL isFullscreen; +- BOOL isAbsoluteEnabled; +-} +-- (void) switchSurface:(pixman_image_t *)image; +-- (void) grabMouse; +-- (void) ungrabMouse; +-- (void) toggleFullScreen:(id)sender; +-- (void) handleMonitorInput:(NSEvent *)event; +-- (bool) handleEvent:(NSEvent *)event; +-- (bool) handleEventLocked:(NSEvent *)event; +-- (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled; +-/* The state surrounding mouse grabbing is potentially confusing. +- * isAbsoluteEnabled tracks qemu_input_is_absolute() [ie "is the emulated +- * pointing device an absolute-position one?"], but is only updated on +- * next refresh. +- * isMouseGrabbed tracks whether GUI events are directed to the guest; +- * it controls whether special keys like Cmd get sent to the guest, +- * and whether we capture the mouse when in non-absolute mode. +- */ +-- (BOOL) isMouseGrabbed; +-- (BOOL) isAbsoluteEnabled; +-- (float) cdx; +-- (float) cdy; +-- (QEMUScreen) gscreen; +-- (void) raiseAllKeys; +-@end +- +-QemuCocoaView *cocoaView; +- +-@implementation QemuCocoaView +-- (id)initWithFrame:(NSRect)frameRect +-{ +- COCOA_DEBUG("QemuCocoaView: initWithFrame\n"); +- +- self = [super initWithFrame:frameRect]; +- if (self) { +- +- screen.width = frameRect.size.width; +- screen.height = frameRect.size.height; +- kbd = qkbd_state_init(dcl.con); +- +- } +- return self; +-} +- +-- (void) dealloc +-{ +- COCOA_DEBUG("QemuCocoaView: dealloc\n"); +- +- if (pixman_image) { +- pixman_image_unref(pixman_image); +- } +- +- qkbd_state_free(kbd); +- [super dealloc]; +-} +- +-- (BOOL) isOpaque +-{ +- return YES; +-} +- +-- (BOOL) screenContainsPoint:(NSPoint) p +-{ +- return (p.x > -1 && p.x < screen.width && p.y > -1 && p.y < screen.height); +-} +- +-/* Get location of event and convert to virtual screen coordinate */ +-- (CGPoint) screenLocationOfEvent:(NSEvent *)ev +-{ +- NSWindow *eventWindow = [ev window]; +- // XXX: Use CGRect and -convertRectFromScreen: to support macOS 10.10 +- CGRect r = CGRectZero; +- r.origin = [ev locationInWindow]; +- if (!eventWindow) { +- if (!isFullscreen) { +- return [[self window] convertRectFromScreen:r].origin; +- } else { +- CGPoint locationInSelfWindow = [[self window] convertRectFromScreen:r].origin; +- CGPoint loc = [self convertPoint:locationInSelfWindow fromView:nil]; +- if (stretch_video) { +- loc.x /= cdx; +- loc.y /= cdy; +- } +- return loc; +- } +- } else if ([[self window] isEqual:eventWindow]) { +- if (!isFullscreen) { +- return r.origin; +- } else { +- CGPoint loc = [self convertPoint:r.origin fromView:nil]; +- if (stretch_video) { +- loc.x /= cdx; +- loc.y /= cdy; +- } +- return loc; +- } +- } else { +- return [[self window] convertRectFromScreen:[eventWindow convertRectToScreen:r]].origin; +- } +-} +- +-- (void) hideCursor +-{ +- if (!cursor_hide) { +- return; +- } +- [NSCursor hide]; +-} +- +-- (void) unhideCursor +-{ +- if (!cursor_hide) { +- return; +- } +- [NSCursor unhide]; +-} +- +-- (void) drawRect:(NSRect) rect +-{ +- COCOA_DEBUG("QemuCocoaView: drawRect\n"); +- +- // get CoreGraphic context +- CGContextRef viewContextRef = [[NSGraphicsContext currentContext] CGContext]; +- +- CGContextSetInterpolationQuality (viewContextRef, kCGInterpolationNone); +- CGContextSetShouldAntialias (viewContextRef, NO); +- +- // draw screen bitmap directly to Core Graphics context +- if (!pixman_image) { +- // Draw request before any guest device has set up a framebuffer: +- // just draw an opaque black rectangle +- CGContextSetRGBFillColor(viewContextRef, 0, 0, 0, 1.0); +- CGContextFillRect(viewContextRef, NSRectToCGRect(rect)); +- } else { +- int w = pixman_image_get_width(pixman_image); +- int h = pixman_image_get_height(pixman_image); +- int bitsPerPixel = PIXMAN_FORMAT_BPP(pixman_image_get_format(pixman_image)); +- int stride = pixman_image_get_stride(pixman_image); +- CGDataProviderRef dataProviderRef = CGDataProviderCreateWithData( +- NULL, +- pixman_image_get_data(pixman_image), +- stride * h, +- NULL +- ); +- CGImageRef imageRef = CGImageCreate( +- w, //width +- h, //height +- DIV_ROUND_UP(bitsPerPixel, 8) * 2, //bitsPerComponent +- bitsPerPixel, //bitsPerPixel +- stride, //bytesPerRow +- CGColorSpaceCreateWithName(kCGColorSpaceSRGB), //colorspace +- kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst, //bitmapInfo +- dataProviderRef, //provider +- NULL, //decode +- 0, //interpolate +- kCGRenderingIntentDefault //intent +- ); +- // selective drawing code (draws only dirty rectangles) (OS X >= 10.4) +- const NSRect *rectList; +- NSInteger rectCount; +- int i; +- CGImageRef clipImageRef; +- CGRect clipRect; +- +- [self getRectsBeingDrawn:&rectList count:&rectCount]; +- for (i = 0; i < rectCount; i++) { +- clipRect.origin.x = rectList[i].origin.x / cdx; +- clipRect.origin.y = (float)h - (rectList[i].origin.y + rectList[i].size.height) / cdy; +- clipRect.size.width = rectList[i].size.width / cdx; +- clipRect.size.height = rectList[i].size.height / cdy; +- clipImageRef = CGImageCreateWithImageInRect( +- imageRef, +- clipRect +- ); +- CGContextDrawImage (viewContextRef, cgrect(rectList[i]), clipImageRef); +- CGImageRelease (clipImageRef); +- } +- CGImageRelease (imageRef); +- CGDataProviderRelease(dataProviderRef); +- } +-} +- +-- (void) setContentDimensions +-{ +- COCOA_DEBUG("QemuCocoaView: setContentDimensions\n"); +- +- if (isFullscreen) { +- cdx = [[NSScreen mainScreen] frame].size.width / (float)screen.width; +- cdy = [[NSScreen mainScreen] frame].size.height / (float)screen.height; +- +- /* stretches video, but keeps same aspect ratio */ +- if (stretch_video == true) { +- /* use smallest stretch value - prevents clipping on sides */ +- if (MIN(cdx, cdy) == cdx) { +- cdy = cdx; +- } else { +- cdx = cdy; +- } +- } else { /* No stretching */ +- cdx = cdy = 1; +- } +- cw = screen.width * cdx; +- ch = screen.height * cdy; +- cx = ([[NSScreen mainScreen] frame].size.width - cw) / 2.0; +- cy = ([[NSScreen mainScreen] frame].size.height - ch) / 2.0; +- } else { +- cx = 0; +- cy = 0; +- cw = screen.width; +- ch = screen.height; +- cdx = 1.0; +- cdy = 1.0; +- } +-} +- +-- (void) updateUIInfo +-{ +- NSSize frameSize; +- QemuUIInfo info; +- +- if (!qemu_console_is_graphic(dcl.con)) { +- return; +- } +- +- if ([self window]) { +- NSDictionary *description = [[[self window] screen] deviceDescription]; +- CGDirectDisplayID display = [[description objectForKey:@"NSScreenNumber"] unsignedIntValue]; +- NSSize screenSize = [[[self window] screen] frame].size; +- CGSize screenPhysicalSize = CGDisplayScreenSize(display); +- +- frameSize = isFullscreen ? screenSize : [self frame].size; +- info.width_mm = frameSize.width / screenSize.width * screenPhysicalSize.width; +- info.height_mm = frameSize.height / screenSize.height * screenPhysicalSize.height; +- } else { +- frameSize = [self frame].size; +- info.width_mm = 0; +- info.height_mm = 0; +- } +- +- info.xoff = 0; +- info.yoff = 0; +- info.width = frameSize.width; +- info.height = frameSize.height; +- +- dpy_set_ui_info(dcl.con, &info); +-} +- +-- (void)viewDidMoveToWindow +-{ +- [self updateUIInfo]; +-} +- +-- (void) switchSurface:(pixman_image_t *)image +-{ +- COCOA_DEBUG("QemuCocoaView: switchSurface\n"); +- +- int w = pixman_image_get_width(image); +- int h = pixman_image_get_height(image); +- /* cdx == 0 means this is our very first surface, in which case we need +- * to recalculate the content dimensions even if it happens to be the size +- * of the initial empty window. +- */ +- bool isResize = (w != screen.width || h != screen.height || cdx == 0.0); +- +- int oldh = screen.height; +- if (isResize) { +- // Resize before we trigger the redraw, or we'll redraw at the wrong size +- COCOA_DEBUG("switchSurface: new size %d x %d\n", w, h); +- screen.width = w; +- screen.height = h; +- [self setContentDimensions]; +- [self setFrame:NSMakeRect(cx, cy, cw, ch)]; +- } +- +- // update screenBuffer +- if (pixman_image) { +- pixman_image_unref(pixman_image); +- } +- +- pixman_image = image; +- +- // update windows +- if (isFullscreen) { +- [[fullScreenWindow contentView] setFrame:[[NSScreen mainScreen] frame]]; +- [normalWindow setFrame:NSMakeRect([normalWindow frame].origin.x, [normalWindow frame].origin.y - h + oldh, w, h + [normalWindow frame].size.height - oldh) display:NO animate:NO]; +- } else { +- if (qemu_name) +- [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; +- [normalWindow setFrame:NSMakeRect([normalWindow frame].origin.x, [normalWindow frame].origin.y - h + oldh, w, h + [normalWindow frame].size.height - oldh) display:YES animate:NO]; +- } +- +- if (isResize) { +- [normalWindow center]; +- } +-} +- +-- (void) toggleFullScreen:(id)sender +-{ +- COCOA_DEBUG("QemuCocoaView: toggleFullScreen\n"); +- +- if (isFullscreen) { // switch from fullscreen to desktop +- isFullscreen = FALSE; +- [self ungrabMouse]; +- [self setContentDimensions]; +- [fullScreenWindow close]; +- [normalWindow setContentView: self]; +- [normalWindow makeKeyAndOrderFront: self]; +- [NSMenu setMenuBarVisible:YES]; +- } else { // switch from desktop to fullscreen +- isFullscreen = TRUE; +- [normalWindow orderOut: nil]; /* Hide the window */ +- [self grabMouse]; +- [self setContentDimensions]; +- [NSMenu setMenuBarVisible:NO]; +- fullScreenWindow = [[NSWindow alloc] initWithContentRect:[[NSScreen mainScreen] frame] +- styleMask:NSWindowStyleMaskBorderless +- backing:NSBackingStoreBuffered +- defer:NO]; +- [fullScreenWindow setAcceptsMouseMovedEvents: YES]; +- [fullScreenWindow setHasShadow:NO]; +- [fullScreenWindow setBackgroundColor: [NSColor blackColor]]; +- [self setFrame:NSMakeRect(cx, cy, cw, ch)]; +- [[fullScreenWindow contentView] addSubview: self]; +- [fullScreenWindow makeKeyAndOrderFront:self]; +- } +-} +- +-- (void) toggleKey: (int)keycode { +- qkbd_state_key_event(kbd, keycode, !qkbd_state_key_get(kbd, keycode)); +-} +- +-// Does the work of sending input to the monitor +-- (void) handleMonitorInput:(NSEvent *)event +-{ +- int keysym = 0; +- int control_key = 0; +- +- // if the control key is down +- if ([event modifierFlags] & NSEventModifierFlagControl) { +- control_key = 1; +- } +- +- /* translates Macintosh keycodes to QEMU's keysym */ +- +- int without_control_translation[] = { +- [0 ... 0xff] = 0, // invalid key +- +- [kVK_UpArrow] = QEMU_KEY_UP, +- [kVK_DownArrow] = QEMU_KEY_DOWN, +- [kVK_RightArrow] = QEMU_KEY_RIGHT, +- [kVK_LeftArrow] = QEMU_KEY_LEFT, +- [kVK_Home] = QEMU_KEY_HOME, +- [kVK_End] = QEMU_KEY_END, +- [kVK_PageUp] = QEMU_KEY_PAGEUP, +- [kVK_PageDown] = QEMU_KEY_PAGEDOWN, +- [kVK_ForwardDelete] = QEMU_KEY_DELETE, +- [kVK_Delete] = QEMU_KEY_BACKSPACE, +- }; +- +- int with_control_translation[] = { +- [0 ... 0xff] = 0, // invalid key +- +- [kVK_UpArrow] = QEMU_KEY_CTRL_UP, +- [kVK_DownArrow] = QEMU_KEY_CTRL_DOWN, +- [kVK_RightArrow] = QEMU_KEY_CTRL_RIGHT, +- [kVK_LeftArrow] = QEMU_KEY_CTRL_LEFT, +- [kVK_Home] = QEMU_KEY_CTRL_HOME, +- [kVK_End] = QEMU_KEY_CTRL_END, +- [kVK_PageUp] = QEMU_KEY_CTRL_PAGEUP, +- [kVK_PageDown] = QEMU_KEY_CTRL_PAGEDOWN, +- }; +- +- if (control_key != 0) { /* If the control key is being used */ +- if ([event keyCode] < ARRAY_SIZE(with_control_translation)) { +- keysym = with_control_translation[[event keyCode]]; +- } +- } else { +- if ([event keyCode] < ARRAY_SIZE(without_control_translation)) { +- keysym = without_control_translation[[event keyCode]]; +- } +- } +- +- // if not a key that needs translating +- if (keysym == 0) { +- NSString *ks = [event characters]; +- if ([ks length] > 0) { +- keysym = [ks characterAtIndex:0]; +- } +- } +- +- if (keysym) { +- kbd_put_keysym(keysym); +- } +-} +- +-- (bool) handleEvent:(NSEvent *)event +-{ +- if(!allow_events) { +- /* +- * Just let OSX have all events that arrive before +- * applicationDidFinishLaunching. +- * This avoids a deadlock on the iothread lock, which cocoa_display_init() +- * will not drop until after the app_started_sem is posted. (In theory +- * there should not be any such events, but OSX Catalina now emits some.) +- */ +- return false; +- } +- return bool_with_iothread_lock(^{ +- return [self handleEventLocked:event]; +- }); +-} +- +-- (bool) handleEventLocked:(NSEvent *)event +-{ +- /* Return true if we handled the event, false if it should be given to OSX */ +- COCOA_DEBUG("QemuCocoaView: handleEvent\n"); +- int buttons = 0; +- int keycode = 0; +- bool mouse_event = false; +- static bool switched_to_fullscreen = false; +- // Location of event in virtual screen coordinates +- NSPoint p = [self screenLocationOfEvent:event]; +- NSUInteger modifiers = [event modifierFlags]; +- +- /* +- * Check -[NSEvent modifierFlags] here. +- * +- * There is a NSEventType for an event notifying the change of +- * -[NSEvent modifierFlags], NSEventTypeFlagsChanged but these operations +- * are performed for any events because a modifier state may change while +- * the application is inactive (i.e. no events fire) and we don't want to +- * wait for another modifier state change to detect such a change. +- * +- * NSEventModifierFlagCapsLock requires a special treatment. The other flags +- * are handled in similar manners. +- * +- * NSEventModifierFlagCapsLock +- * --------------------------- +- * +- * If CapsLock state is changed, "up" and "down" events will be fired in +- * sequence, effectively updates CapsLock state on the guest. +- * +- * The other flags +- * --------------- +- * +- * If a flag is not set, fire "up" events for all keys which correspond to +- * the flag. Note that "down" events are not fired here because the flags +- * checked here do not tell what exact keys are down. +- * +- * If one of the keys corresponding to a flag is down, we rely on +- * -[NSEvent keyCode] of an event whose -[NSEvent type] is +- * NSEventTypeFlagsChanged to know the exact key which is down, which has +- * the following two downsides: +- * - It does not work when the application is inactive as described above. +- * - It malfactions *after* the modifier state is changed while the +- * application is inactive. It is because -[NSEvent keyCode] does not tell +- * if the key is up or down, and requires to infer the current state from +- * the previous state. It is still possible to fix such a malfanction by +- * completely leaving your hands from the keyboard, which hopefully makes +- * this implementation usable enough. +- */ +- if (!!(modifiers & NSEventModifierFlagCapsLock) != +- qkbd_state_modifier_get(kbd, QKBD_MOD_CAPSLOCK)) { +- qkbd_state_key_event(kbd, Q_KEY_CODE_CAPS_LOCK, true); +- qkbd_state_key_event(kbd, Q_KEY_CODE_CAPS_LOCK, false); +- } +- +- if (!(modifiers & NSEventModifierFlagShift)) { +- qkbd_state_key_event(kbd, Q_KEY_CODE_SHIFT, false); +- qkbd_state_key_event(kbd, Q_KEY_CODE_SHIFT_R, false); +- } +- if (!(modifiers & NSEventModifierFlagControl)) { +- qkbd_state_key_event(kbd, Q_KEY_CODE_CTRL, false); +- qkbd_state_key_event(kbd, Q_KEY_CODE_CTRL_R, false); +- } +- if (!(modifiers & NSEventModifierFlagOption)) { +- qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); +- qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); +- } +- if (!(modifiers & NSEventModifierFlagCommand)) { +- qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); +- qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); +- } +- +- switch ([event type]) { +- case NSEventTypeFlagsChanged: +- switch ([event keyCode]) { +- case kVK_Shift: +- if (!!(modifiers & NSEventModifierFlagShift)) { +- [self toggleKey:Q_KEY_CODE_SHIFT]; +- } +- break; +- +- case kVK_RightShift: +- if (!!(modifiers & NSEventModifierFlagShift)) { +- [self toggleKey:Q_KEY_CODE_SHIFT_R]; +- } +- break; +- +- case kVK_Control: +- if (!!(modifiers & NSEventModifierFlagControl)) { +- [self toggleKey:Q_KEY_CODE_CTRL]; +- } +- break; +- +- case kVK_RightControl: +- if (!!(modifiers & NSEventModifierFlagControl)) { +- [self toggleKey:Q_KEY_CODE_CTRL_R]; +- } +- break; +- +- case kVK_Option: +- if (!!(modifiers & NSEventModifierFlagOption)) { +- [self toggleKey:Q_KEY_CODE_ALT]; +- } +- break; +- +- case kVK_RightOption: +- if (!!(modifiers & NSEventModifierFlagOption)) { +- [self toggleKey:Q_KEY_CODE_ALT_R]; +- } +- break; +- +- /* Don't pass command key changes to guest unless mouse is grabbed */ +- case kVK_Command: +- if (isMouseGrabbed && +- !!(modifiers & NSEventModifierFlagCommand)) { +- [self toggleKey:Q_KEY_CODE_META_L]; +- } +- break; +- +- case kVK_RightCommand: +- if (isMouseGrabbed && +- !!(modifiers & NSEventModifierFlagCommand)) { +- [self toggleKey:Q_KEY_CODE_META_R]; +- } +- break; +- } +- break; +- case NSEventTypeKeyDown: +- keycode = cocoa_keycode_to_qemu([event keyCode]); +- +- // forward command key combos to the host UI unless the mouse is grabbed +- if (!isMouseGrabbed && ([event modifierFlags] & NSEventModifierFlagCommand)) { +- /* +- * Prevent the command key from being stuck down in the guest +- * when using Command-F to switch to full screen mode. +- */ +- if (keycode == Q_KEY_CODE_F) { +- switched_to_fullscreen = true; +- } +- return false; +- } +- +- // default +- +- // handle control + alt Key Combos (ctrl+alt+[1..9,g] is reserved for QEMU) +- if (([event modifierFlags] & NSEventModifierFlagControl) && ([event modifierFlags] & NSEventModifierFlagOption)) { +- NSString *keychar = [event charactersIgnoringModifiers]; +- if ([keychar length] == 1) { +- char key = [keychar characterAtIndex:0]; +- switch (key) { +- +- // enable graphic console +- case '1' ... '9': +- console_select(key - '0' - 1); /* ascii math */ +- return true; +- +- // release the mouse grab +- case 'g': +- [self ungrabMouse]; +- return true; +- } +- } +- } +- +- if (qemu_console_is_graphic(NULL)) { +- qkbd_state_key_event(kbd, keycode, true); +- } else { +- [self handleMonitorInput: event]; +- } +- break; +- case NSEventTypeKeyUp: +- keycode = cocoa_keycode_to_qemu([event keyCode]); +- +- // don't pass the guest a spurious key-up if we treated this +- // command-key combo as a host UI action +- if (!isMouseGrabbed && ([event modifierFlags] & NSEventModifierFlagCommand)) { +- return true; +- } +- +- if (qemu_console_is_graphic(NULL)) { +- qkbd_state_key_event(kbd, keycode, false); +- } +- break; +- case NSEventTypeMouseMoved: +- if (isAbsoluteEnabled) { +- // Cursor re-entered into a window might generate events bound to screen coordinates +- // and `nil` window property, and in full screen mode, current window might not be +- // key window, where event location alone should suffice. +- if (![self screenContainsPoint:p] || !([[self window] isKeyWindow] || isFullscreen)) { +- if (isMouseGrabbed) { +- [self ungrabMouse]; +- } +- } else { +- if (!isMouseGrabbed) { +- [self grabMouse]; +- } +- } +- } +- mouse_event = true; +- break; +- case NSEventTypeLeftMouseDown: +- buttons |= MOUSE_EVENT_LBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeRightMouseDown: +- buttons |= MOUSE_EVENT_RBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeOtherMouseDown: +- buttons |= MOUSE_EVENT_MBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeLeftMouseDragged: +- buttons |= MOUSE_EVENT_LBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeRightMouseDragged: +- buttons |= MOUSE_EVENT_RBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeOtherMouseDragged: +- buttons |= MOUSE_EVENT_MBUTTON; +- mouse_event = true; +- break; +- case NSEventTypeLeftMouseUp: +- mouse_event = true; +- if (!isMouseGrabbed && [self screenContainsPoint:p]) { +- /* +- * In fullscreen mode, the window of cocoaView may not be the +- * key window, therefore the position relative to the virtual +- * screen alone will be sufficient. +- */ +- if(isFullscreen || [[self window] isKeyWindow]) { +- [self grabMouse]; +- } +- } +- break; +- case NSEventTypeRightMouseUp: +- mouse_event = true; +- break; +- case NSEventTypeOtherMouseUp: +- mouse_event = true; +- break; +- case NSEventTypeScrollWheel: +- /* +- * Send wheel events to the guest regardless of window focus. +- * This is in-line with standard Mac OS X UI behaviour. +- */ +- +- /* +- * When deltaY is zero, it means that this scrolling event was +- * either horizontal, or so fine that it only appears in +- * scrollingDeltaY. So we drop the event. +- */ +- if ([event deltaY] != 0) { +- /* Determine if this is a scroll up or scroll down event */ +- buttons = ([event deltaY] > 0) ? +- INPUT_BUTTON_WHEEL_UP : INPUT_BUTTON_WHEEL_DOWN; +- qemu_input_queue_btn(dcl.con, buttons, true); +- qemu_input_event_sync(); +- qemu_input_queue_btn(dcl.con, buttons, false); +- qemu_input_event_sync(); +- } +- /* +- * Since deltaY also reports scroll wheel events we prevent mouse +- * movement code from executing. +- */ +- mouse_event = false; +- break; +- default: +- return false; +- } +- +- if (mouse_event) { +- /* Don't send button events to the guest unless we've got a +- * mouse grab or window focus. If we have neither then this event +- * is the user clicking on the background window to activate and +- * bring us to the front, which will be done by the sendEvent +- * call below. We definitely don't want to pass that click through +- * to the guest. +- */ +- if ((isMouseGrabbed || [[self window] isKeyWindow]) && +- (last_buttons != buttons)) { +- static uint32_t bmap[INPUT_BUTTON__MAX] = { +- [INPUT_BUTTON_LEFT] = MOUSE_EVENT_LBUTTON, +- [INPUT_BUTTON_MIDDLE] = MOUSE_EVENT_MBUTTON, +- [INPUT_BUTTON_RIGHT] = MOUSE_EVENT_RBUTTON +- }; +- qemu_input_update_buttons(dcl.con, bmap, last_buttons, buttons); +- last_buttons = buttons; +- } +- if (isMouseGrabbed) { +- if (isAbsoluteEnabled) { +- /* Note that the origin for Cocoa mouse coords is bottom left, not top left. +- * The check on screenContainsPoint is to avoid sending out of range values for +- * clicks in the titlebar. +- */ +- if ([self screenContainsPoint:p]) { +- qemu_input_queue_abs(dcl.con, INPUT_AXIS_X, p.x, 0, screen.width); +- qemu_input_queue_abs(dcl.con, INPUT_AXIS_Y, screen.height - p.y, 0, screen.height); +- } +- } else { +- qemu_input_queue_rel(dcl.con, INPUT_AXIS_X, (int)[event deltaX]); +- qemu_input_queue_rel(dcl.con, INPUT_AXIS_Y, (int)[event deltaY]); +- } +- } else { +- return false; +- } +- qemu_input_event_sync(); +- } +- return true; +-} +- +-- (void) grabMouse +-{ +- COCOA_DEBUG("QemuCocoaView: grabMouse\n"); +- +- if (!isFullscreen) { +- if (qemu_name) +- [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s - (Press ctrl + alt + g to release Mouse)", qemu_name]]; +- else +- [normalWindow setTitle:@"QEMU - (Press ctrl + alt + g to release Mouse)"]; +- } +- [self hideCursor]; +- CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); +- isMouseGrabbed = TRUE; // while isMouseGrabbed = TRUE, QemuCocoaApp sends all events to [cocoaView handleEvent:] +-} +- +-- (void) ungrabMouse +-{ +- COCOA_DEBUG("QemuCocoaView: ungrabMouse\n"); +- +- if (!isFullscreen) { +- if (qemu_name) +- [normalWindow setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; +- else +- [normalWindow setTitle:@"QEMU"]; +- } +- [self unhideCursor]; +- CGAssociateMouseAndMouseCursorPosition(TRUE); +- isMouseGrabbed = FALSE; +-} +- +-- (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled { +- isAbsoluteEnabled = tIsAbsoluteEnabled; +- if (isMouseGrabbed) { +- CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); +- } +-} +-- (BOOL) isMouseGrabbed {return isMouseGrabbed;} +-- (BOOL) isAbsoluteEnabled {return isAbsoluteEnabled;} +-- (float) cdx {return cdx;} +-- (float) cdy {return cdy;} +-- (QEMUScreen) gscreen {return screen;} +- +-/* +- * Makes the target think all down keys are being released. +- * This prevents a stuck key problem, since we will not see +- * key up events for those keys after we have lost focus. +- */ +-- (void) raiseAllKeys +-{ +- with_iothread_lock(^{ +- qkbd_state_lift_all_keys(kbd); +- }); +-} +-@end +- +- +- +-/* +- ------------------------------------------------------ +- QemuCocoaAppController +- ------------------------------------------------------ +-*/ +-@interface QemuCocoaAppController : NSObject +- +-{ +-} +-- (void)doToggleFullScreen:(id)sender; +-- (void)toggleFullScreen:(id)sender; +-- (void)showQEMUDoc:(id)sender; +-- (void)zoomToFit:(id) sender; +-- (void)displayConsole:(id)sender; +-- (void)pauseQEMU:(id)sender; +-- (void)resumeQEMU:(id)sender; +-- (void)displayPause; +-- (void)removePause; +-- (void)restartQEMU:(id)sender; +-- (void)powerDownQEMU:(id)sender; +-- (void)ejectDeviceMedia:(id)sender; +-- (void)changeDeviceMedia:(id)sender; +-- (BOOL)verifyQuit; +-- (void)openDocumentation:(NSString *)filename; +-- (IBAction) do_about_menu_item: (id) sender; +-- (void)make_about_window; +-- (void)adjustSpeed:(id)sender; +-@end +- +-@implementation QemuCocoaAppController +-- (id) init +-{ +- COCOA_DEBUG("QemuCocoaAppController: init\n"); +- +- self = [super init]; +- if (self) { +- +- // create a view and add it to the window +- cocoaView = [[QemuCocoaView alloc] initWithFrame:NSMakeRect(0.0, 0.0, 640.0, 480.0)]; +- if(!cocoaView) { +- error_report("(cocoa) can't create a view"); +- exit(1); +- } +- +- // create a window +- normalWindow = [[NSWindow alloc] initWithContentRect:[cocoaView frame] +- styleMask:NSWindowStyleMaskTitled|NSWindowStyleMaskMiniaturizable|NSWindowStyleMaskClosable +- backing:NSBackingStoreBuffered defer:NO]; +- if(!normalWindow) { +- error_report("(cocoa) can't create window"); +- exit(1); +- } +- [normalWindow setAcceptsMouseMovedEvents:YES]; +- [normalWindow setTitle:@"QEMU"]; +- [normalWindow setContentView:cocoaView]; +- [normalWindow makeKeyAndOrderFront:self]; +- [normalWindow center]; +- [normalWindow setDelegate: self]; +- stretch_video = false; +- +- /* Used for displaying pause on the screen */ +- pauseLabel = [NSTextField new]; +- [pauseLabel setBezeled:YES]; +- [pauseLabel setDrawsBackground:YES]; +- [pauseLabel setBackgroundColor: [NSColor whiteColor]]; +- [pauseLabel setEditable:NO]; +- [pauseLabel setSelectable:NO]; +- [pauseLabel setStringValue: @"Paused"]; +- [pauseLabel setFont: [NSFont fontWithName: @"Helvetica" size: 90]]; +- [pauseLabel setTextColor: [NSColor blackColor]]; +- [pauseLabel sizeToFit]; +- +- // set the supported image file types that can be opened +- supportedImageFileTypes = [NSArray arrayWithObjects: @"img", @"iso", @"dmg", +- @"qcow", @"qcow2", @"cloop", @"vmdk", @"cdr", +- @"toast", nil]; +- [self make_about_window]; +- } +- return self; +-} +- +-- (void) dealloc +-{ +- COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); +- +- if (cocoaView) +- [cocoaView release]; +- [super dealloc]; +-} +- +-- (void)applicationDidFinishLaunching: (NSNotification *) note +-{ +- COCOA_DEBUG("QemuCocoaAppController: applicationDidFinishLaunching\n"); +- allow_events = true; +- /* Tell cocoa_display_init to proceed */ +- qemu_sem_post(&app_started_sem); +-} +- +-- (void)applicationWillTerminate:(NSNotification *)aNotification +-{ +- COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); +- +- qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); +- +- /* +- * Sleep here, because returning will cause OSX to kill us +- * immediately; the QEMU main loop will handle the shutdown +- * request and terminate the process. +- */ +- [NSThread sleepForTimeInterval:INFINITY]; +-} +- +-- (BOOL)applicationShouldTerminateAfterLastWindowClosed:(NSApplication *)theApplication +-{ +- return YES; +-} +- +-- (NSApplicationTerminateReply)applicationShouldTerminate: +- (NSApplication *)sender +-{ +- COCOA_DEBUG("QemuCocoaAppController: applicationShouldTerminate\n"); +- return [self verifyQuit]; +-} +- +-- (void)windowDidChangeScreen:(NSNotification *)notification +-{ +- [cocoaView updateUIInfo]; +-} +- +-- (void)windowDidResize:(NSNotification *)notification +-{ +- [cocoaView updateUIInfo]; +-} +- +-/* Called when the user clicks on a window's close button */ +-- (BOOL)windowShouldClose:(id)sender +-{ +- COCOA_DEBUG("QemuCocoaAppController: windowShouldClose\n"); +- [NSApp terminate: sender]; +- /* If the user allows the application to quit then the call to +- * NSApp terminate will never return. If we get here then the user +- * cancelled the quit, so we should return NO to not permit the +- * closing of this window. +- */ +- return NO; +-} +- +-/* Called when QEMU goes into the background */ +-- (void) applicationWillResignActive: (NSNotification *)aNotification +-{ +- COCOA_DEBUG("QemuCocoaAppController: applicationWillResignActive\n"); +- [cocoaView raiseAllKeys]; +-} +- +-/* We abstract the method called by the Enter Fullscreen menu item +- * because Mac OS 10.7 and higher disables it. This is because of the +- * menu item's old selector's name toggleFullScreen: +- */ +-- (void) doToggleFullScreen:(id)sender +-{ +- [self toggleFullScreen:(id)sender]; +-} +- +-- (void)toggleFullScreen:(id)sender +-{ +- COCOA_DEBUG("QemuCocoaAppController: toggleFullScreen\n"); +- +- [cocoaView toggleFullScreen:sender]; +-} +- +-/* Tries to find then open the specified filename */ +-- (void) openDocumentation: (NSString *) filename +-{ +- /* Where to look for local files */ +- NSString *path_array[] = {@"../share/doc/qemu/", @"../doc/qemu/", @"docs/"}; +- NSString *full_file_path; +- NSURL *full_file_url; +- +- /* iterate thru the possible paths until the file is found */ +- int index; +- for (index = 0; index < ARRAY_SIZE(path_array); index++) { +- full_file_path = [[NSBundle mainBundle] executablePath]; +- full_file_path = [full_file_path stringByDeletingLastPathComponent]; +- full_file_path = [NSString stringWithFormat: @"%@/%@%@", full_file_path, +- path_array[index], filename]; +- full_file_url = [NSURL fileURLWithPath: full_file_path +- isDirectory: false]; +- if ([[NSWorkspace sharedWorkspace] openURL: full_file_url] == YES) { +- return; +- } +- } +- +- /* If none of the paths opened a file */ +- NSBeep(); +- QEMU_Alert(@"Failed to open file"); +-} +- +-- (void)showQEMUDoc:(id)sender +-{ +- COCOA_DEBUG("QemuCocoaAppController: showQEMUDoc\n"); +- +- [self openDocumentation: @"index.html"]; +-} +- +-/* Stretches video to fit host monitor size */ +-- (void)zoomToFit:(id) sender +-{ +- stretch_video = !stretch_video; +- if (stretch_video == true) { +- [sender setState: NSControlStateValueOn]; +- } else { +- [sender setState: NSControlStateValueOff]; +- } +-} +- +-/* Displays the console on the screen */ +-- (void)displayConsole:(id)sender +-{ +- console_select([sender tag]); +-} +- +-/* Pause the guest */ +-- (void)pauseQEMU:(id)sender +-{ +- with_iothread_lock(^{ +- qmp_stop(NULL); +- }); +- [sender setEnabled: NO]; +- [[[sender menu] itemWithTitle: @"Resume"] setEnabled: YES]; +- [self displayPause]; +-} +- +-/* Resume running the guest operating system */ +-- (void)resumeQEMU:(id) sender +-{ +- with_iothread_lock(^{ +- qmp_cont(NULL); +- }); +- [sender setEnabled: NO]; +- [[[sender menu] itemWithTitle: @"Pause"] setEnabled: YES]; +- [self removePause]; +-} +- +-/* Displays the word pause on the screen */ +-- (void)displayPause +-{ +- /* Coordinates have to be calculated each time because the window can change its size */ +- int xCoord, yCoord, width, height; +- xCoord = ([normalWindow frame].size.width - [pauseLabel frame].size.width)/2; +- yCoord = [normalWindow frame].size.height - [pauseLabel frame].size.height - ([pauseLabel frame].size.height * .5); +- width = [pauseLabel frame].size.width; +- height = [pauseLabel frame].size.height; +- [pauseLabel setFrame: NSMakeRect(xCoord, yCoord, width, height)]; +- [cocoaView addSubview: pauseLabel]; +-} +- +-/* Removes the word pause from the screen */ +-- (void)removePause +-{ +- [pauseLabel removeFromSuperview]; +-} +- +-/* Restarts QEMU */ +-- (void)restartQEMU:(id)sender +-{ +- with_iothread_lock(^{ +- qmp_system_reset(NULL); +- }); +-} +- +-/* Powers down QEMU */ +-- (void)powerDownQEMU:(id)sender +-{ +- with_iothread_lock(^{ +- qmp_system_powerdown(NULL); +- }); +-} +- +-/* Ejects the media. +- * Uses sender's tag to figure out the device to eject. +- */ +-- (void)ejectDeviceMedia:(id)sender +-{ +- NSString * drive; +- drive = [sender representedObject]; +- if(drive == nil) { +- NSBeep(); +- QEMU_Alert(@"Failed to find drive to eject!"); +- return; +- } +- +- __block Error *err = NULL; +- with_iothread_lock(^{ +- qmp_eject(true, [drive cStringUsingEncoding: NSASCIIStringEncoding], +- false, NULL, false, false, &err); +- }); +- handleAnyDeviceErrors(err); +-} +- +-/* Displays a dialog box asking the user to select an image file to load. +- * Uses sender's represented object value to figure out which drive to use. +- */ +-- (void)changeDeviceMedia:(id)sender +-{ +- /* Find the drive name */ +- NSString * drive; +- drive = [sender representedObject]; +- if(drive == nil) { +- NSBeep(); +- QEMU_Alert(@"Could not find drive!"); +- return; +- } +- +- /* Display the file open dialog */ +- NSOpenPanel * openPanel; +- openPanel = [NSOpenPanel openPanel]; +- [openPanel setCanChooseFiles: YES]; +- [openPanel setAllowsMultipleSelection: NO]; +- [openPanel setAllowedFileTypes: supportedImageFileTypes]; +- if([openPanel runModal] == NSModalResponseOK) { +- NSString * file = [[[openPanel URLs] objectAtIndex: 0] path]; +- if(file == nil) { +- NSBeep(); +- QEMU_Alert(@"Failed to convert URL to file path!"); +- return; +- } +- +- __block Error *err = NULL; +- with_iothread_lock(^{ +- qmp_blockdev_change_medium(true, +- [drive cStringUsingEncoding: +- NSASCIIStringEncoding], +- false, NULL, +- [file cStringUsingEncoding: +- NSASCIIStringEncoding], +- true, "raw", +- false, 0, +- &err); +- }); +- handleAnyDeviceErrors(err); +- } +-} +- +-/* Verifies if the user really wants to quit */ +-- (BOOL)verifyQuit +-{ +- NSAlert *alert = [NSAlert new]; +- [alert autorelease]; +- [alert setMessageText: @"Are you sure you want to quit QEMU?"]; +- [alert addButtonWithTitle: @"Cancel"]; +- [alert addButtonWithTitle: @"Quit"]; +- if([alert runModal] == NSAlertSecondButtonReturn) { +- return YES; +- } else { +- return NO; +- } +-} +- +-/* The action method for the About menu item */ +-- (IBAction) do_about_menu_item: (id) sender +-{ +- [about_window makeKeyAndOrderFront: nil]; +-} +- +-/* Create and display the about dialog */ +-- (void)make_about_window +-{ +- /* Make the window */ +- int x = 0, y = 0, about_width = 400, about_height = 200; +- NSRect window_rect = NSMakeRect(x, y, about_width, about_height); +- about_window = [[NSWindow alloc] initWithContentRect:window_rect +- styleMask:NSWindowStyleMaskTitled | NSWindowStyleMaskClosable | +- NSWindowStyleMaskMiniaturizable +- backing:NSBackingStoreBuffered +- defer:NO]; +- [about_window setTitle: @"About"]; +- [about_window setReleasedWhenClosed: NO]; +- [about_window center]; +- NSView *superView = [about_window contentView]; +- +- /* Create the dimensions of the picture */ +- int picture_width = 80, picture_height = 80; +- x = (about_width - picture_width)/2; +- y = about_height - picture_height - 10; +- NSRect picture_rect = NSMakeRect(x, y, picture_width, picture_height); +- +- /* Make the picture of QEMU */ +- NSImageView *picture_view = [[NSImageView alloc] initWithFrame: +- picture_rect]; +- char *qemu_image_path_c = get_relocated_path(CONFIG_QEMU_ICONDIR "/hicolor/512x512/apps/qemu.png"); +- NSString *qemu_image_path = [NSString stringWithUTF8String:qemu_image_path_c]; +- g_free(qemu_image_path_c); +- NSImage *qemu_image = [[NSImage alloc] initWithContentsOfFile:qemu_image_path]; +- [picture_view setImage: qemu_image]; +- [picture_view setImageScaling: NSImageScaleProportionallyUpOrDown]; +- [superView addSubview: picture_view]; +- +- /* Make the name label */ +- NSBundle *bundle = [NSBundle mainBundle]; +- if (bundle) { +- x = 0; +- y = y - 25; +- int name_width = about_width, name_height = 20; +- NSRect name_rect = NSMakeRect(x, y, name_width, name_height); +- NSTextField *name_label = [[NSTextField alloc] initWithFrame: name_rect]; +- [name_label setEditable: NO]; +- [name_label setBezeled: NO]; +- [name_label setDrawsBackground: NO]; +- [name_label setAlignment: NSTextAlignmentCenter]; +- NSString *qemu_name = [[bundle executablePath] lastPathComponent]; +- [name_label setStringValue: qemu_name]; +- [superView addSubview: name_label]; +- } +- +- /* Set the version label's attributes */ +- x = 0; +- y = 50; +- int version_width = about_width, version_height = 20; +- NSRect version_rect = NSMakeRect(x, y, version_width, version_height); +- NSTextField *version_label = [[NSTextField alloc] initWithFrame: +- version_rect]; +- [version_label setEditable: NO]; +- [version_label setBezeled: NO]; +- [version_label setAlignment: NSTextAlignmentCenter]; +- [version_label setDrawsBackground: NO]; +- +- /* Create the version string*/ +- NSString *version_string; +- version_string = [[NSString alloc] initWithFormat: +- @"QEMU emulator version %s", QEMU_FULL_VERSION]; +- [version_label setStringValue: version_string]; +- [superView addSubview: version_label]; +- +- /* Make copyright label */ +- x = 0; +- y = 35; +- int copyright_width = about_width, copyright_height = 20; +- NSRect copyright_rect = NSMakeRect(x, y, copyright_width, copyright_height); +- NSTextField *copyright_label = [[NSTextField alloc] initWithFrame: +- copyright_rect]; +- [copyright_label setEditable: NO]; +- [copyright_label setBezeled: NO]; +- [copyright_label setDrawsBackground: NO]; +- [copyright_label setAlignment: NSTextAlignmentCenter]; +- [copyright_label setStringValue: [NSString stringWithFormat: @"%s", +- QEMU_COPYRIGHT]]; +- [superView addSubview: copyright_label]; +-} +- +-/* Used by the Speed menu items */ +-- (void)adjustSpeed:(id)sender +-{ +- int throttle_pct; /* throttle percentage */ +- NSMenu *menu; +- +- menu = [sender menu]; +- if (menu != nil) +- { +- /* Unselect the currently selected item */ +- for (NSMenuItem *item in [menu itemArray]) { +- if (item.state == NSControlStateValueOn) { +- [item setState: NSControlStateValueOff]; +- break; +- } +- } +- } +- +- // check the menu item +- [sender setState: NSControlStateValueOn]; +- +- // get the throttle percentage +- throttle_pct = [sender tag]; +- +- with_iothread_lock(^{ +- cpu_throttle_set(throttle_pct); +- }); +- COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%'); +-} +- +-@end +- +-@interface QemuApplication : NSApplication +-@end +- +-@implementation QemuApplication +-- (void)sendEvent:(NSEvent *)event +-{ +- COCOA_DEBUG("QemuApplication: sendEvent\n"); +- if (![cocoaView handleEvent:event]) { +- [super sendEvent: event]; +- } +-} +-@end +- +-static void create_initial_menus(void) +-{ +- // Add menus +- NSMenu *menu; +- NSMenuItem *menuItem; +- +- [NSApp setMainMenu:[[NSMenu alloc] init]]; +- +- // Application menu +- menu = [[NSMenu alloc] initWithTitle:@""]; +- [menu addItemWithTitle:@"About QEMU" action:@selector(do_about_menu_item:) keyEquivalent:@""]; // About QEMU +- [menu addItem:[NSMenuItem separatorItem]]; //Separator +- [menu addItemWithTitle:@"Hide QEMU" action:@selector(hide:) keyEquivalent:@"h"]; //Hide QEMU +- menuItem = (NSMenuItem *)[menu addItemWithTitle:@"Hide Others" action:@selector(hideOtherApplications:) keyEquivalent:@"h"]; // Hide Others +- [menuItem setKeyEquivalentModifierMask:(NSEventModifierFlagOption|NSEventModifierFlagCommand)]; +- [menu addItemWithTitle:@"Show All" action:@selector(unhideAllApplications:) keyEquivalent:@""]; // Show All +- [menu addItem:[NSMenuItem separatorItem]]; //Separator +- [menu addItemWithTitle:@"Quit QEMU" action:@selector(terminate:) keyEquivalent:@"q"]; +- menuItem = [[NSMenuItem alloc] initWithTitle:@"Apple" action:nil keyEquivalent:@""]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +- [NSApp performSelector:@selector(setAppleMenu:) withObject:menu]; // Workaround (this method is private since 10.4+) +- +- // Machine menu +- menu = [[NSMenu alloc] initWithTitle: @"Machine"]; +- [menu setAutoenablesItems: NO]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Pause" action: @selector(pauseQEMU:) keyEquivalent: @""] autorelease]]; +- menuItem = [[[NSMenuItem alloc] initWithTitle: @"Resume" action: @selector(resumeQEMU:) keyEquivalent: @""] autorelease]; +- [menu addItem: menuItem]; +- [menuItem setEnabled: NO]; +- [menu addItem: [NSMenuItem separatorItem]]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Reset" action: @selector(restartQEMU:) keyEquivalent: @""] autorelease]]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Power Down" action: @selector(powerDownQEMU:) keyEquivalent: @""] autorelease]]; +- menuItem = [[[NSMenuItem alloc] initWithTitle: @"Machine" action:nil keyEquivalent:@""] autorelease]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +- +- // View menu +- menu = [[NSMenu alloc] initWithTitle:@"View"]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Enter Fullscreen" action:@selector(doToggleFullScreen:) keyEquivalent:@"f"] autorelease]]; // Fullscreen +- [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Zoom To Fit" action:@selector(zoomToFit:) keyEquivalent:@""] autorelease]]; +- menuItem = [[[NSMenuItem alloc] initWithTitle:@"View" action:nil keyEquivalent:@""] autorelease]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +- +- // Speed menu +- menu = [[NSMenu alloc] initWithTitle:@"Speed"]; +- +- // Add the rest of the Speed menu items +- int p, percentage, throttle_pct; +- for (p = 10; p >= 0; p--) +- { +- percentage = p * 10 > 1 ? p * 10 : 1; // prevent a 0% menu item +- +- menuItem = [[[NSMenuItem alloc] +- initWithTitle: [NSString stringWithFormat: @"%d%%", percentage] action:@selector(adjustSpeed:) keyEquivalent:@""] autorelease]; +- +- if (percentage == 100) { +- [menuItem setState: NSControlStateValueOn]; +- } +- +- /* Calculate the throttle percentage */ +- throttle_pct = -1 * percentage + 100; +- +- [menuItem setTag: throttle_pct]; +- [menu addItem: menuItem]; +- } +- menuItem = [[[NSMenuItem alloc] initWithTitle:@"Speed" action:nil keyEquivalent:@""] autorelease]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +- +- // Window menu +- menu = [[NSMenu alloc] initWithTitle:@"Window"]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Minimize" action:@selector(performMiniaturize:) keyEquivalent:@"m"] autorelease]]; // Miniaturize +- menuItem = [[[NSMenuItem alloc] initWithTitle:@"Window" action:nil keyEquivalent:@""] autorelease]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +- [NSApp setWindowsMenu:menu]; +- +- // Help menu +- menu = [[NSMenu alloc] initWithTitle:@"Help"]; +- [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"QEMU Documentation" action:@selector(showQEMUDoc:) keyEquivalent:@"?"] autorelease]]; // QEMU Help +- menuItem = [[[NSMenuItem alloc] initWithTitle:@"Window" action:nil keyEquivalent:@""] autorelease]; +- [menuItem setSubmenu:menu]; +- [[NSApp mainMenu] addItem:menuItem]; +-} +- +-/* Returns a name for a given console */ +-static NSString * getConsoleName(QemuConsole * console) +-{ +- return [NSString stringWithFormat: @"%s", qemu_console_get_label(console)]; +-} +- +-/* Add an entry to the View menu for each console */ +-static void add_console_menu_entries(void) +-{ +- NSMenu *menu; +- NSMenuItem *menuItem; +- int index = 0; +- +- menu = [[[NSApp mainMenu] itemWithTitle:@"View"] submenu]; +- +- [menu addItem:[NSMenuItem separatorItem]]; +- +- while (qemu_console_lookup_by_index(index) != NULL) { +- menuItem = [[[NSMenuItem alloc] initWithTitle: getConsoleName(qemu_console_lookup_by_index(index)) +- action: @selector(displayConsole:) keyEquivalent: @""] autorelease]; +- [menuItem setTag: index]; +- [menu addItem: menuItem]; +- index++; +- } +-} +- +-/* Make menu items for all removable devices. +- * Each device is given an 'Eject' and 'Change' menu item. +- */ +-static void addRemovableDevicesMenuItems(void) +-{ +- NSMenu *menu; +- NSMenuItem *menuItem; +- BlockInfoList *currentDevice, *pointerToFree; +- NSString *deviceName; +- +- currentDevice = qmp_query_block(NULL); +- pointerToFree = currentDevice; +- if(currentDevice == NULL) { +- NSBeep(); +- QEMU_Alert(@"Failed to query for block devices!"); +- return; +- } +- +- menu = [[[NSApp mainMenu] itemWithTitle:@"Machine"] submenu]; +- +- // Add a separator between related groups of menu items +- [menu addItem:[NSMenuItem separatorItem]]; +- +- // Set the attributes to the "Removable Media" menu item +- NSString *titleString = @"Removable Media"; +- NSMutableAttributedString *attString=[[NSMutableAttributedString alloc] initWithString:titleString]; +- NSColor *newColor = [NSColor blackColor]; +- NSFontManager *fontManager = [NSFontManager sharedFontManager]; +- NSFont *font = [fontManager fontWithFamily:@"Helvetica" +- traits:NSBoldFontMask|NSItalicFontMask +- weight:0 +- size:14]; +- [attString addAttribute:NSFontAttributeName value:font range:NSMakeRange(0, [titleString length])]; +- [attString addAttribute:NSForegroundColorAttributeName value:newColor range:NSMakeRange(0, [titleString length])]; +- [attString addAttribute:NSUnderlineStyleAttributeName value:[NSNumber numberWithInt: 1] range:NSMakeRange(0, [titleString length])]; +- +- // Add the "Removable Media" menu item +- menuItem = [NSMenuItem new]; +- [menuItem setAttributedTitle: attString]; +- [menuItem setEnabled: NO]; +- [menu addItem: menuItem]; +- +- /* Loop through all the block devices in the emulator */ +- while (currentDevice) { +- deviceName = [[NSString stringWithFormat: @"%s", currentDevice->value->device] retain]; +- +- if(currentDevice->value->removable) { +- menuItem = [[NSMenuItem alloc] initWithTitle: [NSString stringWithFormat: @"Change %s...", currentDevice->value->device] +- action: @selector(changeDeviceMedia:) +- keyEquivalent: @""]; +- [menu addItem: menuItem]; +- [menuItem setRepresentedObject: deviceName]; +- [menuItem autorelease]; +- +- menuItem = [[NSMenuItem alloc] initWithTitle: [NSString stringWithFormat: @"Eject %s", currentDevice->value->device] +- action: @selector(ejectDeviceMedia:) +- keyEquivalent: @""]; +- [menu addItem: menuItem]; +- [menuItem setRepresentedObject: deviceName]; +- [menuItem autorelease]; +- } +- currentDevice = currentDevice->next; +- } +- qapi_free_BlockInfoList(pointerToFree); +-} +- +-@interface QemuCocoaPasteboardTypeOwner : NSObject +-@end +- +-@implementation QemuCocoaPasteboardTypeOwner +- +-- (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)type +-{ +- if (type != NSPasteboardTypeString) { +- return; +- } +- +- with_iothread_lock(^{ +- QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo); +- qemu_event_reset(&cbevent); +- qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT); +- +- while (info == cbinfo && +- info->types[QEMU_CLIPBOARD_TYPE_TEXT].available && +- info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) { +- qemu_mutex_unlock_iothread(); +- qemu_event_wait(&cbevent); +- qemu_mutex_lock_iothread(); +- } +- +- if (info == cbinfo) { +- NSData *data = [[NSData alloc] initWithBytes:info->types[QEMU_CLIPBOARD_TYPE_TEXT].data +- length:info->types[QEMU_CLIPBOARD_TYPE_TEXT].size]; +- [sender setData:data forType:NSPasteboardTypeString]; +- [data release]; +- } +- +- qemu_clipboard_info_unref(info); +- }); +-} +- +-@end +- +-static QemuCocoaPasteboardTypeOwner *cbowner; +- +-static void cocoa_clipboard_notify(Notifier *notifier, void *data); +-static void cocoa_clipboard_request(QemuClipboardInfo *info, +- QemuClipboardType type); +- +-static QemuClipboardPeer cbpeer = { +- .name = "cocoa", +- .update = { .notify = cocoa_clipboard_notify }, +- .request = cocoa_clipboard_request +-}; +- +-static void cocoa_clipboard_notify(Notifier *notifier, void *data) +-{ +- QemuClipboardInfo *info = data; +- +- if (info->owner == &cbpeer || info->selection != QEMU_CLIPBOARD_SELECTION_CLIPBOARD) { +- return; +- } +- +- if (info != cbinfo) { +- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +- qemu_clipboard_info_unref(cbinfo); +- cbinfo = qemu_clipboard_info_ref(info); +- cbchangecount = [[NSPasteboard generalPasteboard] declareTypes:@[NSPasteboardTypeString] owner:cbowner]; +- [pool release]; +- } +- +- qemu_event_set(&cbevent); +-} +- +-static void cocoa_clipboard_request(QemuClipboardInfo *info, +- QemuClipboardType type) +-{ +- NSData *text; +- +- switch (type) { +- case QEMU_CLIPBOARD_TYPE_TEXT: +- text = [[NSPasteboard generalPasteboard] dataForType:NSPasteboardTypeString]; +- if (text) { +- qemu_clipboard_set_data(&cbpeer, info, type, +- [text length], [text bytes], true); +- [text release]; +- } +- break; +- default: +- break; +- } +-} +- +-/* +- * The startup process for the OSX/Cocoa UI is complicated, because +- * OSX insists that the UI runs on the initial main thread, and so we +- * need to start a second thread which runs the vl.c qemu_main(): +- * +- * Initial thread: 2nd thread: +- * in main(): +- * create qemu-main thread +- * wait on display_init semaphore +- * call qemu_main() +- * ... +- * in cocoa_display_init(): +- * post the display_init semaphore +- * wait on app_started semaphore +- * create application, menus, etc +- * enter OSX run loop +- * in applicationDidFinishLaunching: +- * post app_started semaphore +- * tell main thread to fullscreen if needed +- * [...] +- * run qemu main-loop +- * +- * We do this in two stages so that we don't do the creation of the +- * GUI application menus and so on for command line options like --help +- * where we want to just print text to stdout and exit immediately. +- */ +- +-static void *call_qemu_main(void *opaque) +-{ +- int status; +- +- COCOA_DEBUG("Second thread: calling qemu_main()\n"); +- status = qemu_main(gArgc, gArgv, *_NSGetEnviron()); +- COCOA_DEBUG("Second thread: qemu_main() returned, exiting\n"); +- [cbowner release]; +- exit(status); +-} +- +-int main (int argc, const char * argv[]) { +- QemuThread thread; +- +- COCOA_DEBUG("Entered main()\n"); +- gArgc = argc; +- gArgv = (char **)argv; +- +- qemu_sem_init(&display_init_sem, 0); +- qemu_sem_init(&app_started_sem, 0); +- +- qemu_thread_create(&thread, "qemu_main", call_qemu_main, +- NULL, QEMU_THREAD_DETACHED); +- +- COCOA_DEBUG("Main thread: waiting for display_init_sem\n"); +- qemu_sem_wait(&display_init_sem); +- COCOA_DEBUG("Main thread: initializing app\n"); +- +- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +- +- // Pull this console process up to being a fully-fledged graphical +- // app with a menubar and Dock icon +- ProcessSerialNumber psn = { 0, kCurrentProcess }; +- TransformProcessType(&psn, kProcessTransformToForegroundApplication); +- +- [QemuApplication sharedApplication]; +- +- create_initial_menus(); +- +- /* +- * Create the menu entries which depend on QEMU state (for consoles +- * and removeable devices). These make calls back into QEMU functions, +- * which is OK because at this point we know that the second thread +- * holds the iothread lock and is synchronously waiting for us to +- * finish. +- */ +- add_console_menu_entries(); +- addRemovableDevicesMenuItems(); +- +- // Create an Application controller +- QemuCocoaAppController *appController = [[QemuCocoaAppController alloc] init]; +- [NSApp setDelegate:appController]; +- +- // Start the main event loop +- COCOA_DEBUG("Main thread: entering OSX run loop\n"); +- [NSApp run]; +- COCOA_DEBUG("Main thread: left OSX run loop, exiting\n"); +- +- [appController release]; +- [pool release]; +- +- return 0; +-} +- +- +- +-#pragma mark qemu +-static void cocoa_update(DisplayChangeListener *dcl, +- int x, int y, int w, int h) +-{ +- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +- +- COCOA_DEBUG("qemu_cocoa: cocoa_update\n"); +- +- dispatch_async(dispatch_get_main_queue(), ^{ +- NSRect rect; +- if ([cocoaView cdx] == 1.0) { +- rect = NSMakeRect(x, [cocoaView gscreen].height - y - h, w, h); +- } else { +- rect = NSMakeRect( +- x * [cocoaView cdx], +- ([cocoaView gscreen].height - y - h) * [cocoaView cdy], +- w * [cocoaView cdx], +- h * [cocoaView cdy]); +- } +- [cocoaView setNeedsDisplayInRect:rect]; +- }); +- +- [pool release]; +-} +- +-static void cocoa_switch(DisplayChangeListener *dcl, +- DisplaySurface *surface) +-{ +- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +- pixman_image_t *image = surface->image; +- +- COCOA_DEBUG("qemu_cocoa: cocoa_switch\n"); +- +- [cocoaView updateUIInfo]; +- +- // The DisplaySurface will be freed as soon as this callback returns. +- // We take a reference to the underlying pixman image here so it does +- // not disappear from under our feet; the switchSurface method will +- // deref the old image when it is done with it. +- pixman_image_ref(image); +- +- dispatch_async(dispatch_get_main_queue(), ^{ +- [cocoaView switchSurface:image]; +- }); +- [pool release]; +-} +- +-static void cocoa_refresh(DisplayChangeListener *dcl) +-{ +- NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +- +- COCOA_DEBUG("qemu_cocoa: cocoa_refresh\n"); +- graphic_hw_update(NULL); +- +- if (qemu_input_is_absolute()) { +- dispatch_async(dispatch_get_main_queue(), ^{ +- if (![cocoaView isAbsoluteEnabled]) { +- if ([cocoaView isMouseGrabbed]) { +- [cocoaView ungrabMouse]; +- } +- } +- [cocoaView setAbsoluteEnabled:YES]; +- }); +- } +- +- if (cbchangecount != [[NSPasteboard generalPasteboard] changeCount]) { +- qemu_clipboard_info_unref(cbinfo); +- cbinfo = qemu_clipboard_info_new(&cbpeer, QEMU_CLIPBOARD_SELECTION_CLIPBOARD); +- if ([[NSPasteboard generalPasteboard] availableTypeFromArray:@[NSPasteboardTypeString]]) { +- cbinfo->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; +- } +- qemu_clipboard_update(cbinfo); +- cbchangecount = [[NSPasteboard generalPasteboard] changeCount]; +- qemu_event_set(&cbevent); +- } +- +- [pool release]; +-} +- +-static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) +-{ +- COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); +- +- /* Tell main thread to go ahead and create the app and enter the run loop */ +- qemu_sem_post(&display_init_sem); +- qemu_sem_wait(&app_started_sem); +- COCOA_DEBUG("cocoa_display_init: app start completed\n"); +- +- /* if fullscreen mode is to be used */ +- if (opts->has_full_screen && opts->full_screen) { +- dispatch_async(dispatch_get_main_queue(), ^{ +- [NSApp activateIgnoringOtherApps: YES]; +- [(QemuCocoaAppController *)[[NSApplication sharedApplication] delegate] toggleFullScreen: nil]; +- }); +- } +- if (opts->has_show_cursor && opts->show_cursor) { +- cursor_hide = 0; +- } +- +- // register vga output callbacks +- register_displaychangelistener(&dcl); +- +- qemu_event_init(&cbevent, false); +- cbowner = [[QemuCocoaPasteboardTypeOwner alloc] init]; +- qemu_clipboard_peer_register(&cbpeer); +-} +- +-static QemuDisplay qemu_display_cocoa = { +- .type = DISPLAY_TYPE_COCOA, +- .init = cocoa_display_init, +-}; +- +-static void register_cocoa(void) +-{ +- qemu_display_register(&qemu_display_cocoa); +-} +- +-type_init(register_cocoa); +diff --git a/ui/cocoa/app_controller.m b/ui/cocoa/app_controller.m +new file mode 100644 +index 0000000000..d79ffd4f7b +--- /dev/null ++++ b/ui/cocoa/app_controller.m +@@ -0,0 +1,676 @@ ++/* ++ * QEMU Cocoa CG display driver ++ * ++ * Copyright (c) 2008 Mike Kronenberg ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++ ++#include "qemu-common.h" ++#include "ui/cocoa.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/runstate.h" ++#include "sysemu/cpu-throttle.h" ++#include "qapi/error.h" ++#include "qapi/qapi-commands-block.h" ++#include "qapi/qapi-commands-machine.h" ++#include "qapi/qapi-commands-misc.h" ++#include "sysemu/blockdev.h" ++#include "qemu-version.h" ++#include "qemu/cutils.h" ++#include "qemu/main-loop.h" ++#include "qemu/module.h" ++#include "hw/core/cpu.h" ++ ++#ifndef MAC_OS_X_VERSION_10_13 ++#define MAC_OS_X_VERSION_10_13 101300 ++#endif ++ ++/* 10.14 deprecates NSOnState and NSOffState in favor of ++ * NSControlStateValueOn/Off, which were introduced in 10.13. ++ * Define for older versions ++ */ ++#if MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_13 ++#define NSControlStateValueOn NSOnState ++#define NSControlStateValueOff NSOffState ++#endif ++ ++// Utility function to run specified code block with iothread lock held ++static void with_iothread_lock(CodeBlock block) ++{ ++ bool locked = qemu_mutex_iothread_locked(); ++ if (!locked) { ++ qemu_mutex_lock_iothread(); ++ } ++ block(); ++ if (!locked) { ++ qemu_mutex_unlock_iothread(); ++ } ++} ++ ++/* Displays an alert dialog box with the specified message */ ++static void QEMU_Alert(NSString *message) ++{ ++ NSAlert *alert; ++ alert = [NSAlert new]; ++ [alert setMessageText: message]; ++ [alert runModal]; ++} ++ ++/* Handles any errors that happen with a device transaction */ ++static void handleAnyDeviceErrors(Error * err) ++{ ++ if (err) { ++ QEMU_Alert([NSString stringWithCString: error_get_pretty(err) ++ encoding: NSASCIIStringEncoding]); ++ error_free(err); ++ } ++} ++ ++static void create_initial_menus(void) ++{ ++ // Add menus ++ NSMenu *menu; ++ NSMenuItem *menuItem; ++ ++ [NSApp setMainMenu:[[NSMenu alloc] init]]; ++ ++ // Application menu ++ menu = [[NSMenu alloc] initWithTitle:@""]; ++ [menu addItemWithTitle:@"About QEMU" action:@selector(do_about_menu_item:) keyEquivalent:@""]; // About QEMU ++ [menu addItem:[NSMenuItem separatorItem]]; //Separator ++ [menu addItemWithTitle:@"Hide QEMU" action:@selector(hide:) keyEquivalent:@"h"]; //Hide QEMU ++ menuItem = (NSMenuItem *)[menu addItemWithTitle:@"Hide Others" action:@selector(hideOtherApplications:) keyEquivalent:@"h"]; // Hide Others ++ [menuItem setKeyEquivalentModifierMask:(NSEventModifierFlagOption|NSEventModifierFlagCommand)]; ++ [menu addItemWithTitle:@"Show All" action:@selector(unhideAllApplications:) keyEquivalent:@""]; // Show All ++ [menu addItem:[NSMenuItem separatorItem]]; //Separator ++ [menu addItemWithTitle:@"Quit QEMU" action:@selector(terminate:) keyEquivalent:@"q"]; ++ menuItem = [[NSMenuItem alloc] initWithTitle:@"Apple" action:nil keyEquivalent:@""]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++ [NSApp performSelector:@selector(setAppleMenu:) withObject:menu]; // Workaround (this method is private since 10.4+) ++ ++ // Machine menu ++ menu = [[NSMenu alloc] initWithTitle: @"Machine"]; ++ [menu setAutoenablesItems: NO]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Pause" action: @selector(pauseQEMU:) keyEquivalent: @""] autorelease]]; ++ menuItem = [[[NSMenuItem alloc] initWithTitle: @"Resume" action: @selector(resumeQEMU:) keyEquivalent: @""] autorelease]; ++ [menu addItem: menuItem]; ++ [menuItem setEnabled: NO]; ++ [menu addItem: [NSMenuItem separatorItem]]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Reset" action: @selector(restartQEMU:) keyEquivalent: @""] autorelease]]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle: @"Power Down" action: @selector(powerDownQEMU:) keyEquivalent: @""] autorelease]]; ++ menuItem = [[[NSMenuItem alloc] initWithTitle: @"Machine" action:nil keyEquivalent:@""] autorelease]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++ ++ // View menu ++ menu = [[NSMenu alloc] initWithTitle:@"View"]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Enter Fullscreen" action:@selector(doToggleFullScreen:) keyEquivalent:@"f"] autorelease]]; // Fullscreen ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Zoom To Fit" action:@selector(zoomToFit:) keyEquivalent:@""] autorelease]]; ++ menuItem = [[[NSMenuItem alloc] initWithTitle:@"View" action:nil keyEquivalent:@""] autorelease]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++ ++ // Speed menu ++ menu = [[NSMenu alloc] initWithTitle:@"Speed"]; ++ ++ // Add the rest of the Speed menu items ++ int p, percentage, throttle_pct; ++ for (p = 10; p >= 0; p--) ++ { ++ percentage = p * 10 > 1 ? p * 10 : 1; // prevent a 0% menu item ++ ++ menuItem = [[[NSMenuItem alloc] ++ initWithTitle: [NSString stringWithFormat: @"%d%%", percentage] action:@selector(adjustSpeed:) keyEquivalent:@""] autorelease]; ++ ++ if (percentage == 100) { ++ [menuItem setState: NSControlStateValueOn]; ++ } ++ ++ /* Calculate the throttle percentage */ ++ throttle_pct = -1 * percentage + 100; ++ ++ [menuItem setTag: throttle_pct]; ++ [menu addItem: menuItem]; ++ } ++ menuItem = [[[NSMenuItem alloc] initWithTitle:@"Speed" action:nil keyEquivalent:@""] autorelease]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++ ++ // Window menu ++ menu = [[NSMenu alloc] initWithTitle:@"Window"]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"Minimize" action:@selector(performMiniaturize:) keyEquivalent:@"m"] autorelease]]; // Miniaturize ++ menuItem = [[[NSMenuItem alloc] initWithTitle:@"Window" action:nil keyEquivalent:@""] autorelease]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++ [NSApp setWindowsMenu:menu]; ++ ++ // Help menu ++ menu = [[NSMenu alloc] initWithTitle:@"Help"]; ++ [menu addItem: [[[NSMenuItem alloc] initWithTitle:@"QEMU Documentation" action:@selector(showQEMUDoc:) keyEquivalent:@"?"] autorelease]]; // QEMU Help ++ menuItem = [[[NSMenuItem alloc] initWithTitle:@"Window" action:nil keyEquivalent:@""] autorelease]; ++ [menuItem setSubmenu:menu]; ++ [[NSApp mainMenu] addItem:menuItem]; ++} ++ ++/* Returns a name for a given console */ ++static NSString * getConsoleName(QemuConsole * console) ++{ ++ return [NSString stringWithFormat: @"%s", qemu_console_get_label(console)]; ++} ++ ++/* Add an entry to the View menu for each console */ ++static void add_console_menu_entries(void) ++{ ++ NSMenu *menu; ++ NSMenuItem *menuItem; ++ int index = 0; ++ ++ menu = [[[NSApp mainMenu] itemWithTitle:@"View"] submenu]; ++ ++ [menu addItem:[NSMenuItem separatorItem]]; ++ ++ while (qemu_console_lookup_by_index(index) != NULL) { ++ menuItem = [[[NSMenuItem alloc] initWithTitle: getConsoleName(qemu_console_lookup_by_index(index)) ++ action: @selector(displayConsole:) keyEquivalent: @""] autorelease]; ++ [menuItem setTag: index]; ++ [menu addItem: menuItem]; ++ index++; ++ } ++} ++ ++/* Make menu items for all removable devices. ++ * Each device is given an 'Eject' and 'Change' menu item. ++ */ ++static void addRemovableDevicesMenuItems(void) ++{ ++ NSMenu *menu; ++ NSMenuItem *menuItem; ++ BlockInfoList *currentDevice, *pointerToFree; ++ NSString *deviceName; ++ ++ currentDevice = qmp_query_block(NULL); ++ pointerToFree = currentDevice; ++ if(currentDevice == NULL) { ++ NSBeep(); ++ QEMU_Alert(@"Failed to query for block devices!"); ++ return; ++ } ++ ++ menu = [[[NSApp mainMenu] itemWithTitle:@"Machine"] submenu]; ++ ++ // Add a separator between related groups of menu items ++ [menu addItem:[NSMenuItem separatorItem]]; ++ ++ // Set the attributes to the "Removable Media" menu item ++ NSString *titleString = @"Removable Media"; ++ NSMutableAttributedString *attString=[[NSMutableAttributedString alloc] initWithString:titleString]; ++ NSColor *newColor = [NSColor blackColor]; ++ NSFontManager *fontManager = [NSFontManager sharedFontManager]; ++ NSFont *font = [fontManager fontWithFamily:@"Helvetica" ++ traits:NSBoldFontMask|NSItalicFontMask ++ weight:0 ++ size:14]; ++ [attString addAttribute:NSFontAttributeName value:font range:NSMakeRange(0, [titleString length])]; ++ [attString addAttribute:NSForegroundColorAttributeName value:newColor range:NSMakeRange(0, [titleString length])]; ++ [attString addAttribute:NSUnderlineStyleAttributeName value:[NSNumber numberWithInt: 1] range:NSMakeRange(0, [titleString length])]; ++ ++ // Add the "Removable Media" menu item ++ menuItem = [NSMenuItem new]; ++ [menuItem setAttributedTitle: attString]; ++ [menuItem setEnabled: NO]; ++ [menu addItem: menuItem]; ++ ++ /* Loop through all the block devices in the emulator */ ++ while (currentDevice) { ++ deviceName = [[NSString stringWithFormat: @"%s", currentDevice->value->device] retain]; ++ ++ if(currentDevice->value->removable) { ++ menuItem = [[NSMenuItem alloc] initWithTitle: [NSString stringWithFormat: @"Change %s...", currentDevice->value->device] ++ action: @selector(changeDeviceMedia:) ++ keyEquivalent: @""]; ++ [menu addItem: menuItem]; ++ [menuItem setRepresentedObject: deviceName]; ++ [menuItem autorelease]; ++ ++ menuItem = [[NSMenuItem alloc] initWithTitle: [NSString stringWithFormat: @"Eject %s", currentDevice->value->device] ++ action: @selector(ejectDeviceMedia:) ++ keyEquivalent: @""]; ++ [menu addItem: menuItem]; ++ [menuItem setRepresentedObject: deviceName]; ++ [menuItem autorelease]; ++ } ++ currentDevice = currentDevice->next; ++ } ++ qapi_free_BlockInfoList(pointerToFree); ++} ++ ++@implementation QemuCocoaAppController ++- (id) initWithStartedSem:(QemuSemaphore *)given_started_sem ++ screen:(QEMUScreen *)screen ++{ ++ COCOA_DEBUG("%s\n", __func__); ++ ++ self = [super init]; ++ if (self) { ++ ++ started_sem = given_started_sem; ++ ++ create_initial_menus(); ++ ++ /* ++ * Create the menu entries which depend on QEMU state (for consoles ++ * and removeable devices). These make calls back into QEMU functions, ++ * which is OK because at this point we know that the second thread ++ * holds the iothread lock and is synchronously waiting for us to ++ * finish. ++ */ ++ add_console_menu_entries(); ++ addRemovableDevicesMenuItems(); ++ ++ // create a view and add it to the window ++ cocoaView = [[QemuCocoaView alloc] initWithFrame:NSMakeRect(0.0, 0.0, 640.0, 480.0) ++ screen:screen]; ++ if(!cocoaView) { ++ error_report("(cocoa) can't create a view"); ++ exit(1); ++ } ++ ++ // create a window ++ NSWindow *normalWindow = [[NSWindow alloc] initWithContentRect:[cocoaView frame] ++ styleMask:NSWindowStyleMaskTitled|NSWindowStyleMaskMiniaturizable|NSWindowStyleMaskClosable ++ backing:NSBackingStoreBuffered defer:NO]; ++ if(!normalWindow) { ++ error_report("(cocoa) can't create window"); ++ exit(1); ++ } ++ [normalWindow setAcceptsMouseMovedEvents:YES]; ++ [normalWindow setCollectionBehavior:NSWindowCollectionBehaviorFullScreenPrimary]; ++ [normalWindow setTitle:qemu_name ? [NSString stringWithFormat:@"QEMU %s", qemu_name] : @"QEMU"]; ++ [normalWindow setContentView:cocoaView]; ++ [normalWindow makeKeyAndOrderFront:self]; ++ [normalWindow center]; ++ [normalWindow setDelegate: self]; ++ [normalWindow release]; ++ ++ // set the supported image file types that can be opened ++ supportedImageFileTypes = [NSArray arrayWithObjects: @"img", @"iso", @"dmg", ++ @"qcow", @"qcow2", @"cloop", @"vmdk", @"cdr", ++ @"toast", nil]; ++ } ++ return self; ++} ++ ++- (void) dealloc ++{ ++ COCOA_DEBUG("QemuCocoaAppController: dealloc\n"); ++ ++ if (cocoaView) ++ [cocoaView release]; ++ [super dealloc]; ++} ++ ++- (void)applicationDidFinishLaunching: (NSNotification *) note ++{ ++ COCOA_DEBUG("QemuCocoaAppController: applicationDidFinishLaunching\n"); ++ /* Tell cocoa_display_init to proceed */ ++ qemu_sem_post(started_sem); ++} ++ ++- (void)applicationWillTerminate:(NSNotification *)aNotification ++{ ++ COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); ++ ++ qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); ++ ++ /* ++ * Sleep here, because returning will cause OSX to kill us ++ * immediately; the QEMU main loop will handle the shutdown ++ * request and terminate the process. ++ */ ++ [NSThread sleepForTimeInterval:INFINITY]; ++} ++ ++- (BOOL)applicationShouldTerminateAfterLastWindowClosed:(NSApplication *)theApplication ++{ ++ return YES; ++} ++ ++- (NSApplicationTerminateReply)applicationShouldTerminate: ++ (NSApplication *)sender ++{ ++ COCOA_DEBUG("QemuCocoaAppController: applicationShouldTerminate\n"); ++ return [self verifyQuit]; ++} ++ ++- (void)windowDidChangeScreen:(NSNotification *)notification ++{ ++ [cocoaView updateUIInfo]; ++} ++ ++- (void)windowDidEnterFullScreen:(NSNotification *)notification ++{ ++ [cocoaView grabMouse]; ++} ++ ++- (void)windowDidExitFullScreen:(NSNotification *)notification ++{ ++ [cocoaView resizeWindow]; ++ [cocoaView ungrabMouse]; ++} ++ ++- (void)windowDidResize:(NSNotification *)notification ++{ ++ [cocoaView frameUpdated]; ++} ++ ++- (void)windowDidResignKey:(NSNotification *)notification ++{ ++ [cocoaView ungrabMouse]; ++} ++ ++- (void)windowDidBecomeKey:(NSNotification *)notification ++{ ++ /* If we became key and are fullscreen there is no point in waiting for ++ * a click to grab the mouse. ++ */ ++ if (([[cocoaView window] styleMask] & NSWindowStyleMaskFullScreen) != 0) { ++ [cocoaView grabMouse]; ++ } ++} ++ ++/* Called when the user clicks on a window's close button */ ++- (BOOL)windowShouldClose:(id)sender ++{ ++ COCOA_DEBUG("QemuCocoaAppController: windowShouldClose\n"); ++ [NSApp terminate: sender]; ++ /* If the user allows the application to quit then the call to ++ * NSApp terminate will never return. If we get here then the user ++ * cancelled the quit, so we should return NO to not permit the ++ * closing of this window. ++ */ ++ return NO; ++} ++ ++- (NSSize) window:(NSWindow *)window willUseFullScreenContentSize:(NSSize)proposedSize ++{ ++ if (([[cocoaView window] styleMask] & NSWindowStyleMaskResizable) == 0) { ++ return [cocoaView computeUnzoomedSize]; ++ } ++ ++ return [cocoaView fixZoomedFullScreenSize:proposedSize]; ++} ++ ++- (NSApplicationPresentationOptions) window:(NSWindow *)window ++ willUseFullScreenPresentationOptions:(NSApplicationPresentationOptions)proposedOptions; ++ ++{ ++ return (proposedOptions & ~(NSApplicationPresentationAutoHideDock | NSApplicationPresentationAutoHideMenuBar)) | ++ NSApplicationPresentationHideDock | NSApplicationPresentationHideMenuBar; ++} ++ ++/* We abstract the method called by the Enter Fullscreen menu item ++ * because Mac OS 10.7 and higher disables it. This is because of the ++ * menu item's old selector's name toggleFullScreen: ++ */ ++- (void) doToggleFullScreen:(id)sender ++{ ++ [[cocoaView window] toggleFullScreen:sender]; ++} ++ ++/* Tries to find then open the specified filename */ ++- (void) openDocumentation: (NSString *) filename ++{ ++ /* Where to look for local files */ ++ NSString *path_array[] = {@"../share/doc/qemu/", @"../doc/qemu/", @"docs/"}; ++ NSString *full_file_path; ++ NSURL *full_file_url; ++ ++ /* iterate thru the possible paths until the file is found */ ++ int index; ++ for (index = 0; index < ARRAY_SIZE(path_array); index++) { ++ full_file_path = [[NSBundle mainBundle] executablePath]; ++ full_file_path = [full_file_path stringByDeletingLastPathComponent]; ++ full_file_path = [NSString stringWithFormat: @"%@/%@%@", full_file_path, ++ path_array[index], filename]; ++ full_file_url = [NSURL fileURLWithPath: full_file_path ++ isDirectory: false]; ++ if ([[NSWorkspace sharedWorkspace] openURL: full_file_url] == YES) { ++ return; ++ } ++ } ++ ++ /* If none of the paths opened a file */ ++ NSBeep(); ++ QEMU_Alert(@"Failed to open file"); ++} ++ ++- (void)showQEMUDoc:(id)sender ++{ ++ COCOA_DEBUG("QemuCocoaAppController: showQEMUDoc\n"); ++ ++ [self openDocumentation: @"index.html"]; ++} ++ ++/* Toggles the flag which stretches video to fit host window size */ ++- (void)zoomToFit:(id) sender ++{ ++ if (([[cocoaView window] styleMask] & NSWindowStyleMaskResizable) == 0) { ++ [[cocoaView window] setStyleMask:[[cocoaView window] styleMask] | NSWindowStyleMaskResizable]; ++ [sender setState: NSControlStateValueOn]; ++ } else { ++ [[cocoaView window] setStyleMask:[[cocoaView window] styleMask] & ~NSWindowStyleMaskResizable]; ++ [cocoaView resizeWindow]; ++ [sender setState: NSControlStateValueOff]; ++ } ++} ++ ++/* Displays the console on the screen */ ++- (void)displayConsole:(id)sender ++{ ++ with_iothread_lock(^{ ++ console_select([sender tag]); ++ }); ++} ++ ++/* Pause the guest */ ++- (void)pauseQEMU:(id)sender ++{ ++ with_iothread_lock(^{ ++ qmp_stop(NULL); ++ }); ++ [sender setEnabled: NO]; ++ [[[sender menu] itemWithTitle: @"Resume"] setEnabled: YES]; ++ [cocoaView displayPause]; ++} ++ ++/* Resume running the guest operating system */ ++- (void)resumeQEMU:(id) sender ++{ ++ with_iothread_lock(^{ ++ qmp_cont(NULL); ++ }); ++ [sender setEnabled: NO]; ++ [[[sender menu] itemWithTitle: @"Pause"] setEnabled: YES]; ++ [cocoaView removePause]; ++} ++ ++/* Restarts QEMU */ ++- (void)restartQEMU:(id)sender ++{ ++ with_iothread_lock(^{ ++ qmp_system_reset(NULL); ++ }); ++} ++ ++/* Powers down QEMU */ ++- (void)powerDownQEMU:(id)sender ++{ ++ with_iothread_lock(^{ ++ qmp_system_powerdown(NULL); ++ }); ++} ++ ++/* Ejects the media. ++ * Uses sender's tag to figure out the device to eject. ++ */ ++- (void)ejectDeviceMedia:(id)sender ++{ ++ NSString * drive; ++ drive = [sender representedObject]; ++ if(drive == nil) { ++ NSBeep(); ++ QEMU_Alert(@"Failed to find drive to eject!"); ++ return; ++ } ++ ++ __block Error *err = NULL; ++ with_iothread_lock(^{ ++ qmp_eject(true, [drive cStringUsingEncoding: NSASCIIStringEncoding], ++ false, NULL, false, false, &err); ++ }); ++ handleAnyDeviceErrors(err); ++} ++ ++/* Displays a dialog box asking the user to select an image file to load. ++ * Uses sender's represented object value to figure out which drive to use. ++ */ ++- (void)changeDeviceMedia:(id)sender ++{ ++ /* Find the drive name */ ++ NSString * drive; ++ drive = [sender representedObject]; ++ if(drive == nil) { ++ NSBeep(); ++ QEMU_Alert(@"Could not find drive!"); ++ return; ++ } ++ ++ /* Display the file open dialog */ ++ NSOpenPanel * openPanel; ++ openPanel = [NSOpenPanel openPanel]; ++ [openPanel setCanChooseFiles: YES]; ++ [openPanel setAllowsMultipleSelection: NO]; ++ [openPanel setAllowedFileTypes: supportedImageFileTypes]; ++ if([openPanel runModal] == NSModalResponseOK) { ++ NSString * file = [[[openPanel URLs] objectAtIndex: 0] path]; ++ if(file == nil) { ++ NSBeep(); ++ QEMU_Alert(@"Failed to convert URL to file path!"); ++ return; ++ } ++ ++ __block Error *err = NULL; ++ with_iothread_lock(^{ ++ qmp_blockdev_change_medium(true, ++ [drive cStringUsingEncoding: ++ NSASCIIStringEncoding], ++ false, NULL, ++ [file cStringUsingEncoding: ++ NSASCIIStringEncoding], ++ true, "raw", ++ false, 0, ++ &err); ++ }); ++ handleAnyDeviceErrors(err); ++ } ++} ++ ++/* Verifies if the user really wants to quit */ ++- (BOOL)verifyQuit ++{ ++ NSAlert *alert = [NSAlert new]; ++ [alert autorelease]; ++ [alert setMessageText: @"Are you sure you want to quit QEMU?"]; ++ [alert addButtonWithTitle: @"Cancel"]; ++ [alert addButtonWithTitle: @"Quit"]; ++ if([alert runModal] == NSAlertSecondButtonReturn) { ++ return YES; ++ } else { ++ return NO; ++ } ++} ++ ++/* The action method for the About menu item */ ++- (IBAction) do_about_menu_item: (id) sender ++{ ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ char *icon_path_c = get_relocated_path(CONFIG_QEMU_ICONDIR "/hicolor/512x512/apps/qemu.png"); ++ NSString *icon_path = [NSString stringWithUTF8String:icon_path_c]; ++ g_free(icon_path_c); ++ NSImage *icon = [[NSImage alloc] initWithContentsOfFile:icon_path]; ++ NSString *version = @"QEMU emulator version " QEMU_FULL_VERSION; ++ NSString *copyright = @QEMU_COPYRIGHT; ++ NSDictionary *options; ++ if (icon) { ++ options = @{ ++ NSAboutPanelOptionApplicationIcon : icon, ++ NSAboutPanelOptionApplicationVersion : version, ++ @"Copyright" : copyright, ++ }; ++ [icon release]; ++ } else { ++ options = @{ ++ NSAboutPanelOptionApplicationVersion : version, ++ @"Copyright" : copyright, ++ }; ++ } ++ [NSApp orderFrontStandardAboutPanelWithOptions:options]; ++ [pool release]; ++} ++ ++/* Used by the Speed menu items */ ++- (void)adjustSpeed:(id)sender ++{ ++ int throttle_pct; /* throttle percentage */ ++ NSMenu *menu; ++ ++ menu = [sender menu]; ++ if (menu != nil) ++ { ++ /* Unselect the currently selected item */ ++ for (NSMenuItem *item in [menu itemArray]) { ++ if (item.state == NSControlStateValueOn) { ++ [item setState: NSControlStateValueOff]; ++ break; ++ } ++ } ++ } ++ ++ // check the menu item ++ [sender setState: NSControlStateValueOn]; ++ ++ // get the throttle percentage ++ throttle_pct = [sender tag]; ++ ++ with_iothread_lock(^{ ++ cpu_throttle_set(throttle_pct); ++ }); ++ COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%'); ++} ++ ++- (QemuCocoaView *)cocoaView ++{ ++ return cocoaView; ++} ++ ++@end +diff --git a/ui/cocoa/main.m b/ui/cocoa/main.m +new file mode 100644 +index 0000000000..16bb4ace82 +--- /dev/null ++++ b/ui/cocoa/main.m +@@ -0,0 +1,852 @@ ++/* ++ * QEMU Cocoa CG display driver ++ * ++ * Copyright (c) 2008 Mike Kronenberg ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++ ++#include ++ ++#include "qemu-common.h" ++#include "ui/cocoa.h" ++#include "ui/input.h" ++#include "sysemu/sysemu.h" ++ ++#ifdef CONFIG_EGL ++#include "ui/egl-context.h" ++#endif ++ ++static QEMUScreen screen; ++static QemuCocoaAppController *appController; ++ ++static int gArgc; ++static char **gArgv; ++ ++static QemuSemaphore display_init_sem; ++static QemuSemaphore app_started_sem; ++ ++static NSInteger cbchangecount = -1; ++static QemuCocoaClipboard qemucb; ++static QemuCocoaPasteboardTypeOwner *cbowner; ++ ++#ifdef CONFIG_OPENGL ++ ++typedef struct { ++ uint32_t scanout_id; ++ DisplayGLTextureBorrower scanout_borrow; ++ bool surface_dirty; ++} DisplayGL; ++ ++static DisplayGL *dgs; ++static QEMUGLContext view_ctx; ++static QemuGLShader *gls; ++static GLuint cursor_texture; ++static int cursor_texture_width; ++static int cursor_texture_height; ++ ++#ifdef CONFIG_EGL ++static EGLSurface egl_surface; ++#endif ++ ++static void cocoa_gl_destroy_context(void *dg, QEMUGLContext ctx); ++ ++#endif ++ ++@interface QemuApplication : NSApplication ++@end ++ ++@implementation QemuApplication ++- (void)sendEvent:(NSEvent *)event ++{ ++ COCOA_DEBUG("QemuApplication: sendEvent\n"); ++ if (![[appController cocoaView] handleEvent:event]) { ++ [super sendEvent: event]; ++ } ++} ++@end ++ ++static void cocoa_clipboard_notify(Notifier *notifier, void *data); ++static void cocoa_clipboard_request(QemuClipboardInfo *info, ++ QemuClipboardType type); ++ ++static QemuClipboardPeer cbpeer = { ++ .name = "cocoa", ++ .update = { .notify = cocoa_clipboard_notify }, ++ .request = cocoa_clipboard_request ++}; ++ ++static void cocoa_clipboard_notify(Notifier *notifier, void *data) ++{ ++ QemuClipboardInfo *info = data; ++ ++ if (info->owner == &cbpeer || info->selection != QEMU_CLIPBOARD_SELECTION_CLIPBOARD) { ++ return; ++ } ++ ++ if (info != qemucb.info) { ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ qemu_clipboard_info_unref(qemucb.info); ++ qemucb.info = qemu_clipboard_info_ref(info); ++ cbchangecount = [[NSPasteboard generalPasteboard] declareTypes:@[NSPasteboardTypeString] owner:cbowner]; ++ [pool release]; ++ } ++ ++ qemu_event_set(&qemucb.event); ++} ++ ++static void cocoa_clipboard_request(QemuClipboardInfo *info, ++ QemuClipboardType type) ++{ ++ NSData *text; ++ ++ switch (type) { ++ case QEMU_CLIPBOARD_TYPE_TEXT: ++ text = [[NSPasteboard generalPasteboard] dataForType:NSPasteboardTypeString]; ++ if (text) { ++ qemu_clipboard_set_data(&cbpeer, info, type, ++ [text length], [text bytes], true); ++ [text release]; ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++/* ++ * The startup process for the OSX/Cocoa UI is complicated, because ++ * OSX insists that the UI runs on the initial main thread, and so we ++ * need to start a second thread which runs the vl.c qemu_main(): ++ * ++ * Initial thread: 2nd thread: ++ * in main(): ++ * create qemu-main thread ++ * wait on display_init semaphore ++ * call qemu_main() ++ * ... ++ * in cocoa_display_init(): ++ * post the display_init semaphore ++ * wait on app_started semaphore ++ * create application, menus, etc ++ * enter OSX run loop ++ * in applicationDidFinishLaunching: ++ * post app_started semaphore ++ * tell main thread to fullscreen if needed ++ * [...] ++ * run qemu main-loop ++ * ++ * We do this in two stages so that we don't do the creation of the ++ * GUI application menus and so on for command line options like --help ++ * where we want to just print text to stdout and exit immediately. ++ */ ++ ++static void *call_qemu_main(void *opaque) ++{ ++ int status; ++ ++ COCOA_DEBUG("Second thread: calling qemu_main()\n"); ++ status = qemu_main(gArgc, gArgv, *_NSGetEnviron()); ++ COCOA_DEBUG("Second thread: qemu_main() returned, exiting\n"); ++ [cbowner release]; ++ CGImageRelease(screen.cursor_cgimage); ++#ifdef CONFIG_OPENGL ++ g_free(dgs); ++ qemu_gl_fini_shader(gls); ++ if (view_ctx) { ++ cocoa_gl_destroy_context(NULL, view_ctx); ++ } ++ if (appController) { ++ [appController release]; ++ } ++#endif ++ exit(status); ++} ++ ++int main (int argc, char **argv) { ++ QemuThread thread; ++ ++ COCOA_DEBUG("Entered main()\n"); ++ gArgc = argc; ++ gArgv = argv; ++ ++ qemu_sem_init(&display_init_sem, 0); ++ qemu_sem_init(&app_started_sem, 0); ++ ++ qemu_thread_create(&thread, "qemu_main", call_qemu_main, ++ NULL, QEMU_THREAD_DETACHED); ++ ++ qemu_mutex_init(&screen.draw_mutex); ++ ++ COCOA_DEBUG("Main thread: waiting for display_init_sem\n"); ++ qemu_sem_wait(&display_init_sem); ++ COCOA_DEBUG("Main thread: initializing app\n"); ++ ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ ++ // Pull this console process up to being a fully-fledged graphical ++ // app with a menubar and Dock icon ++ ProcessSerialNumber psn = { 0, kCurrentProcess }; ++ TransformProcessType(&psn, kProcessTransformToForegroundApplication); ++ ++ [QemuApplication sharedApplication]; ++ ++ // Create an Application controller ++ appController = [[QemuCocoaAppController alloc] initWithStartedSem:&app_started_sem ++ screen:&screen]; ++ [NSApp setDelegate:appController]; ++ ++ // Start the main event loop ++ COCOA_DEBUG("Main thread: entering OSX run loop\n"); ++ [NSApp run]; ++ COCOA_DEBUG("Main thread: left OSX run loop, exiting\n"); ++ ++ [pool release]; ++ ++ return 0; ++} ++ ++ ++ ++#pragma mark qemu ++static void cocoa_update(DisplayChangeListener *dcl, ++ int x, int y, int w, int h) ++{ ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ DisplaySurface *updated = screen.surface; ++ ++ COCOA_DEBUG("qemu_cocoa: cocoa_update\n"); ++ ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ qemu_mutex_lock(&screen.draw_mutex); ++ if (updated != screen.surface) { ++ qemu_mutex_unlock(&screen.draw_mutex); ++ return; ++ } ++ int full_height = surface_height(screen.surface); ++ qemu_mutex_unlock(&screen.draw_mutex); ++ ++ CGFloat d = [[appController cocoaView] frame].size.height / full_height; ++ NSRect rect = NSMakeRect(x * d, (full_height - y - h) * d, w * d, h * d); ++ [[appController cocoaView] setNeedsDisplayInRect:rect]; ++ }); ++ ++ [pool release]; ++} ++ ++static void cocoa_switch(DisplayChangeListener *dcl, ++ DisplaySurface *new_surface) ++{ ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ static bool updating_screen; ++ ++ COCOA_DEBUG("qemu_cocoa: cocoa_switch\n"); ++ ++ [[appController cocoaView] updateUIInfo]; ++ ++ qemu_mutex_lock(&screen.draw_mutex); ++ screen.surface = new_surface; ++ if (!updating_screen) { ++ updating_screen = true; ++ ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ qemu_mutex_lock(&screen.draw_mutex); ++ updating_screen = false; ++ int w = surface_width(screen.surface); ++ int h = surface_height(screen.surface); ++ qemu_mutex_unlock(&screen.draw_mutex); ++ ++ [[appController cocoaView] updateScreenWidth:w height:h]; ++ }); ++ } ++ qemu_mutex_unlock(&screen.draw_mutex); ++ [pool release]; ++} ++ ++static void cocoa_refresh(DisplayChangeListener *dcl) ++{ ++ NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; ++ ++ COCOA_DEBUG("qemu_cocoa: cocoa_refresh\n"); ++ graphic_hw_update(NULL); ++ ++ if (qemu_input_is_absolute()) { ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ if (![[appController cocoaView] isAbsoluteEnabled]) { ++ if ([[appController cocoaView] isMouseGrabbed]) { ++ [[appController cocoaView] ungrabMouse]; ++ } ++ } ++ [[appController cocoaView] setAbsoluteEnabled:YES]; ++ }); ++ } ++ ++ if (cbchangecount != [[NSPasteboard generalPasteboard] changeCount]) { ++ qemu_clipboard_info_unref(qemucb.info); ++ qemucb.info = qemu_clipboard_info_new(&cbpeer, QEMU_CLIPBOARD_SELECTION_CLIPBOARD); ++ if ([[NSPasteboard generalPasteboard] availableTypeFromArray:@[NSPasteboardTypeString]]) { ++ qemucb.info->types[QEMU_CLIPBOARD_TYPE_TEXT].available = true; ++ } ++ qemu_clipboard_update(qemucb.info); ++ cbchangecount = [[NSPasteboard generalPasteboard] changeCount]; ++ qemu_event_set(&qemucb.event); ++ } ++ ++ [pool release]; ++} ++ ++static void cocoa_mouse_set(DisplayChangeListener *dcl, int x, int y, int on) ++{ ++ qemu_mutex_lock(&screen.draw_mutex); ++ int full_height = surface_height(screen.surface); ++ size_t cursor_width = CGImageGetWidth(screen.cursor_cgimage); ++ size_t cursor_height = CGImageGetHeight(screen.cursor_cgimage); ++ int old_x = screen.mouse_x; ++ int old_y = screen.mouse_y; ++ int old_on = screen.mouse_on; ++ screen.mouse_x = x; ++ screen.mouse_y = y; ++ screen.mouse_on = on; ++ qemu_mutex_unlock(&screen.draw_mutex); ++ ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ if (old_on) { ++ [[appController cocoaView] setNeedsDisplayForCursorX:old_x ++ y:old_y ++ width:cursor_width ++ height:cursor_height ++ screenHeight:full_height]; ++ } ++ ++ if (on) { ++ [[appController cocoaView] setNeedsDisplayForCursorX:x ++ y:y ++ width:cursor_width ++ height:cursor_height ++ screenHeight:full_height]; ++ } ++ }); ++} ++ ++static void cocoa_cursor_define(DisplayChangeListener *dcl, QEMUCursor *cursor) ++{ ++ int width = cursor->width; ++ int height = cursor->height; ++ ++ CGDataProviderRef dataProviderRef = CGDataProviderCreateWithData( ++ NULL, ++ cursor->data, ++ width * height * 4, ++ NULL ++ ); ++ ++ CGImageRef imageRef = CGImageCreate( ++ width, //width ++ height, //height ++ 8, //bitsPerComponent ++ 32, //bitsPerPixel ++ width * 4, //bytesPerRow ++ CGColorSpaceCreateWithName(kCGColorSpaceSRGB), //colorspace ++ kCGBitmapByteOrder32Little | kCGImageAlphaFirst, //bitmapInfo ++ dataProviderRef, //provider ++ NULL, //decode ++ 0, //interpolate ++ kCGRenderingIntentDefault //intent ++ ); ++ ++ qemu_mutex_lock(&screen.draw_mutex); ++ int full_height = surface_height(screen.surface); ++ int x = screen.mouse_x; ++ int y = screen.mouse_y; ++ int on = screen.mouse_on; ++ size_t old_width; ++ size_t old_height; ++ if (screen.cursor_cgimage) { ++ old_width = CGImageGetWidth(screen.cursor_cgimage); ++ old_height = CGImageGetHeight(screen.cursor_cgimage); ++ } else { ++ old_width = 0; ++ old_height = 0; ++ } ++ screen.cursor_cgimage = CGImageCreateCopy(imageRef); ++ qemu_mutex_unlock(&screen.draw_mutex); ++ ++ CGImageRelease(imageRef); ++ CGDataProviderRelease(dataProviderRef); ++ ++ if (on) { ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ CGFloat d = [[appController cocoaView] frame].size.height / full_height; ++ NSRect rect; ++ ++ rect.origin.x = d * x; ++ rect.origin.y = d * (full_height - y - old_height); ++ rect.size.width = d * old_width; ++ rect.size.height = d * old_height; ++ [[appController cocoaView] setNeedsDisplayInRect:rect]; ++ ++ rect.origin.x = d * x; ++ rect.origin.y = d * (full_height - y - height); ++ rect.size.width = d * width; ++ rect.size.height = d * height; ++ [[appController cocoaView] setNeedsDisplayInRect:rect]; ++ }); ++ } ++} ++ ++static const DisplayChangeListenerOps dcl_ops = { ++ .dpy_name = "cocoa", ++ .dpy_gfx_update = cocoa_update, ++ .dpy_gfx_switch = cocoa_switch, ++ .dpy_refresh = cocoa_refresh, ++ .dpy_mouse_set = cocoa_mouse_set, ++ .dpy_cursor_define = cocoa_cursor_define, ++}; ++ ++#ifdef CONFIG_OPENGL ++ ++static void with_view_ctx(CodeBlock block) ++{ ++#ifdef CONFIG_EGL ++ if (egl_surface) { ++ eglMakeCurrent(qemu_egl_display, egl_surface, egl_surface, view_ctx); ++ block(); ++ return; ++ } ++#endif ++ ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wdeprecated-declarations" ++ [(NSOpenGLContext *)view_ctx lock]; ++ [(NSOpenGLContext *)view_ctx makeCurrentContext]; ++ block(); ++ [(NSOpenGLContext *)view_ctx unlock]; ++#pragma clang diagnostic pop ++} ++ ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wdeprecated-declarations" ++static NSOpenGLContext *cocoa_gl_create_context_ns(NSOpenGLContext *share_context, ++ int bpp) ++{ ++ NSOpenGLPixelFormatAttribute attributes[] = { ++ NSOpenGLPFAOpenGLProfile, ++ NSOpenGLProfileVersion4_1Core, ++ NSOpenGLPFAColorSize, ++ bpp, ++ NSOpenGLPFADoubleBuffer, ++ 0, ++ }; ++ NSOpenGLPixelFormat *format; ++ NSOpenGLContext *ctx; ++ ++ format = [[NSOpenGLPixelFormat alloc] initWithAttributes:attributes]; ++ ctx = [[NSOpenGLContext alloc] initWithFormat:format shareContext:share_context]; ++ [format release]; ++ ++ [ctx retain]; ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ [ctx setView:[appController cocoaView]]; ++ [ctx release]; ++ }); ++ ++ return (QEMUGLContext)ctx; ++} ++#pragma clang diagnostic pop ++ ++static int cocoa_gl_make_context_current(void *dg, QEMUGLContext ctx) ++{ ++#ifdef CONFIG_EGL ++ if (egl_surface) { ++ EGLSurface surface = ctx == EGL_NO_CONTEXT ? EGL_NO_SURFACE : egl_surface; ++ return eglMakeCurrent(qemu_egl_display, surface, surface, ctx); ++ } ++#endif ++ ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wdeprecated-declarations" ++ if (ctx) { ++ [(NSOpenGLContext *)ctx makeCurrentContext]; ++ } else { ++ [NSOpenGLContext clearCurrentContext]; ++ } ++#pragma clang diagnostic pop ++ ++ return 0; ++} ++ ++static QEMUGLContext cocoa_gl_create_context(void *dg, QEMUGLParams *params) ++{ ++#ifdef CONFIG_EGL ++ if (egl_surface) { ++ eglMakeCurrent(qemu_egl_display, egl_surface, egl_surface, view_ctx); ++ return qemu_egl_create_context(dg, params); ++ } ++#endif ++ ++ int bpp = PIXMAN_FORMAT_BPP(surface_format(screen.surface)); ++ return cocoa_gl_create_context_ns(view_ctx, bpp); ++} ++ ++static void cocoa_gl_destroy_context(void *dg, QEMUGLContext ctx) ++{ ++#ifdef CONFIG_EGL ++ if (egl_surface) { ++ eglDestroyContext(qemu_egl_display, ctx); ++ return; ++ } ++#endif ++ ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wdeprecated-declarations" ++ [(NSOpenGLContext *)ctx release]; ++#pragma clang diagnostic pop ++} ++ ++static void cocoa_gl_flush() ++{ ++#ifdef CONFIG_EGL ++ if (egl_surface) { ++ eglSwapBuffers(qemu_egl_display, egl_surface); ++ return; ++ } ++#endif ++ ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wdeprecated-declarations" ++ [[NSOpenGLContext currentContext] flushBuffer]; ++ ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ [(NSOpenGLContext *)view_ctx update]; ++ }); ++#pragma clang diagnostic pop ++} ++ ++static void cocoa_scanout_disable(DisplayGL *dg) ++{ ++ if (!dg->scanout_id) { ++ return; ++ } ++ ++ dg->scanout_id = 0; ++ ++ if (screen.surface) { ++ surface_gl_destroy_texture(gls, screen.surface); ++ surface_gl_create_texture(gls, screen.surface); ++ } ++} ++ ++static void cocoa_gl_render_cursor() ++{ ++ if (!screen.mouse_on) { ++ return; ++ } ++ ++ QemuCocoaView *cocoaView = [appController cocoaView]; ++ NSSize size = [cocoaView convertSizeToBacking:[cocoaView frame].size]; ++ int full_height = surface_height(screen.surface); ++ CGFloat d = size.height / full_height; ++ ++ glViewport( ++ d * screen.mouse_x, ++ d * (full_height - screen.mouse_y - cursor_texture_height), ++ d * cursor_texture_width, ++ d * cursor_texture_height ++ ); ++ glBindTexture(GL_TEXTURE_2D, cursor_texture); ++ glEnable(GL_BLEND); ++ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); ++ qemu_gl_run_texture_blit(gls, false); ++ glDisable(GL_BLEND); ++} ++ ++static void cocoa_gl_render_surface(DisplayGL *dg) ++{ ++ cocoa_scanout_disable(dg); ++ ++ QemuCocoaView *cocoaView = [appController cocoaView]; ++ NSSize size = [cocoaView convertSizeToBacking:[cocoaView frame].size]; ++ ++ surface_gl_setup_viewport(gls, screen.surface, size.width, size.height); ++ glBindTexture(GL_TEXTURE_2D, screen.surface->texture); ++ surface_gl_render_texture(gls, screen.surface); ++ ++ cocoa_gl_render_cursor(); ++ ++ cocoa_gl_flush(); ++} ++ ++static void cocoa_gl_update(DisplayChangeListener *dcl, ++ int x, int y, int w, int h) ++{ ++ with_view_ctx(^{ ++ surface_gl_update_texture(gls, screen.surface, x, y, w, h); ++ dgs[qemu_console_get_index(dcl->con)].surface_dirty = true; ++ }); ++} ++ ++static void cocoa_gl_switch(DisplayChangeListener *dcl, ++ DisplaySurface *new_surface) ++{ ++ cocoa_switch(dcl, new_surface); ++ ++ with_view_ctx(^{ ++ surface_gl_create_texture(gls, new_surface); ++ }); ++} ++ ++static void cocoa_gl_refresh(DisplayChangeListener *dcl) ++{ ++ cocoa_refresh(dcl); ++ ++ with_view_ctx(^{ ++ DisplayGL *dg = dgs + qemu_console_get_index(dcl->con); ++ ++ if (dg->surface_dirty && screen.surface) { ++ dg->surface_dirty = false; ++ cocoa_gl_render_surface(dg); ++ } ++ }); ++} ++ ++static bool cocoa_gl_scanout_get_enabled(void *dg) ++{ ++ return ((DisplayGL *)dg)->scanout_id != 0; ++} ++ ++static void cocoa_gl_scanout_disable(void *dg) ++{ ++ with_view_ctx(^{ ++ cocoa_scanout_disable((DisplayGL *)dg); ++ }); ++} ++ ++static void cocoa_gl_scanout_texture(void *dg, ++ uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) ++{ ++ ((DisplayGL *)dg)->scanout_id = backing_id; ++ ((DisplayGL *)dg)->scanout_borrow = backing_borrow; ++} ++ ++static void cocoa_gl_scanout_flush(DisplayChangeListener *dcl, ++ uint32_t x, uint32_t y, uint32_t w, uint32_t h) ++{ ++ DisplayGL *dg = dgs + qemu_console_get_index(dcl->con); ++ bool y0_top; ++ ++ if (!dg->scanout_id) { ++ return; ++ } ++ ++ GLint texture = dg->scanout_borrow(dg->scanout_id, &y0_top, NULL, NULL); ++ if (!texture) { ++ return; ++ } ++ ++ with_view_ctx(^{ ++ QemuCocoaView *cocoaView = [appController cocoaView]; ++ NSSize size = [cocoaView convertSizeToBacking:[cocoaView frame].size]; ++ ++ glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0); ++ glViewport(0, 0, size.width, size.height); ++ glBindTexture(GL_TEXTURE_2D, texture); ++ qemu_gl_run_texture_blit(gls, y0_top); ++ ++ cocoa_gl_render_cursor(); ++ ++ cocoa_gl_flush(); ++ }); ++} ++ ++static void cocoa_gl_mouse_set(DisplayChangeListener *dcl, int x, int y, int on) ++{ ++ screen.mouse_x = x; ++ screen.mouse_y = y; ++ screen.mouse_on = on; ++ ++ DisplayGL *dg = dgs + qemu_console_get_index(dcl->con); ++ ++ if (dg->scanout_id) { ++ cocoa_gl_scanout_flush(dcl, 0, 0, 0, 0); ++ } else { ++ with_view_ctx(^{ ++ cocoa_gl_render_surface(dg); ++ }); ++ } ++} ++ ++static void cocoa_gl_cursor_define(DisplayChangeListener *dcl, QEMUCursor *cursor) ++{ ++ cursor_texture_width = cursor->width; ++ cursor_texture_height = cursor->height; ++ ++ with_view_ctx(^{ ++ glBindTexture(GL_TEXTURE_2D, cursor_texture); ++ glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, cursor->width); ++ glTexImage2D(GL_TEXTURE_2D, 0, ++ epoxy_is_desktop_gl() ? GL_RGBA : GL_BGRA, ++ cursor->width, ++ cursor->height, ++ 0, GL_BGRA, GL_UNSIGNED_BYTE, ++ cursor->data); ++ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); ++ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); ++ }); ++} ++ ++static const DisplayGLOps dg_ops = { ++ .dpy_gl_ctx_create = cocoa_gl_create_context, ++ .dpy_gl_ctx_destroy = cocoa_gl_destroy_context, ++ .dpy_gl_ctx_make_current = cocoa_gl_make_context_current, ++ .dpy_gl_scanout_get_enabled = cocoa_gl_scanout_get_enabled, ++ .dpy_gl_scanout_disable = cocoa_gl_scanout_disable, ++ .dpy_gl_scanout_texture = cocoa_gl_scanout_texture, ++}; ++ ++static const DisplayChangeListenerOps dcl_gl_ops = { ++ .dpy_name = "cocoa-gl", ++ .dpy_gfx_update = cocoa_gl_update, ++ .dpy_gfx_switch = cocoa_gl_switch, ++ .dpy_gfx_check_format = console_gl_check_format, ++ .dpy_refresh = cocoa_gl_refresh, ++ .dpy_mouse_set = cocoa_gl_mouse_set, ++ .dpy_cursor_define = cocoa_gl_cursor_define, ++ ++ .dpy_gl_update = cocoa_gl_scanout_flush, ++}; ++ ++#endif ++ ++static void cocoa_display_early_init(DisplayOptions *o) ++{ ++ assert(o->type == DISPLAY_TYPE_COCOA); ++ if (o->has_gl && o->gl) { ++ display_opengl = 1; ++ } ++} ++ ++static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) ++{ ++ COCOA_DEBUG("qemu_cocoa: cocoa_display_init\n"); ++ ++ screen.cursor_show = opts->has_show_cursor && opts->show_cursor; ++ screen.swap_option_command = opts->u.cocoa.has_swap_option_command && ++ opts->u.cocoa.swap_option_command; ++ ++ /* Tell main thread to go ahead and create the app and enter the run loop */ ++ qemu_sem_post(&display_init_sem); ++ qemu_sem_wait(&app_started_sem); ++ COCOA_DEBUG("cocoa_display_init: app start completed\n"); ++ ++ /* if fullscreen mode is to be used */ ++ if (opts->has_full_screen && opts->full_screen) { ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ [[[appController cocoaView] window] toggleFullScreen: nil]; ++ }); ++ } ++ if (opts->u.cocoa.has_full_grab && opts->u.cocoa.full_grab) { ++ dispatch_async(dispatch_get_main_queue(), ^{ ++ [[appController cocoaView] setFullGrab: nil]; ++ }); ++ } ++ ++ if (display_opengl) { ++#ifdef CONFIG_OPENGL ++ unsigned int console_count = 0; ++ while (qemu_console_lookup_by_index(console_count)) { ++ console_count++; ++ } ++ ++ dgs = g_new0(DisplayGL, console_count); ++ ++ for (unsigned int index = 0; index < console_count; index++) { ++ QemuConsole *con = qemu_console_lookup_by_index(index); ++ console_set_displayglcontext(con, dgs + index); ++ } ++ ++ if (opts->gl == DISPLAYGL_MODE_ES) { ++#ifdef CONFIG_EGL ++ if (qemu_egl_init_dpy_cocoa(DISPLAYGL_MODE_ES)) { ++ exit(1); ++ } ++ view_ctx = qemu_egl_init_ctx(); ++ if (!view_ctx) { ++ exit(1); ++ } ++ dispatch_sync(dispatch_get_main_queue(), ^{ ++ CALayer *layer = [[appController cocoaView] layer]; ++ egl_surface = qemu_egl_init_surface(view_ctx, layer); ++ }); ++#else ++ error_report("OpenGLES without EGL is not supported - exiting"); ++ exit(1); ++#endif ++ } else { ++ view_ctx = cocoa_gl_create_context_ns(nil, 32); ++#ifdef CONFIG_EGL ++ egl_surface = EGL_NO_SURFACE; ++#endif ++ cocoa_gl_make_context_current(NULL, view_ctx); ++ } ++ ++ gls = qemu_gl_init_shader(); ++ glGenTextures(1, &cursor_texture); ++ ++ // register vga output callbacks ++ screen.dcl.ops = &dcl_gl_ops; ++ ++ register_displayglops(&dg_ops); ++#else ++ error_report("OpenGL is not enabled - exiting"); ++ exit(1); ++#endif ++ } else { ++ // register vga output callbacks ++ screen.dcl.ops = &dcl_ops; ++ } ++ ++ register_displaychangelistener(&screen.dcl); ++ qatomic_store_release(&screen.inited, true); ++ ++ qemu_event_init(&qemucb.event, false); ++ cbowner = [[QemuCocoaPasteboardTypeOwner alloc] initWith:&qemucb]; ++ qemu_clipboard_peer_register(&cbpeer); ++} ++ ++static QemuDisplay qemu_display_cocoa = { ++ .type = DISPLAY_TYPE_COCOA, ++ .early_init = cocoa_display_early_init, ++ .init = cocoa_display_init, ++}; ++ ++static void register_cocoa(void) ++{ ++ qemu_display_register(&qemu_display_cocoa); ++} ++ ++type_init(register_cocoa); ++ ++#ifdef CONFIG_OPENGL ++module_dep("ui-opengl"); ++#endif +diff --git a/ui/cocoa/pasteboard_type_owner.m b/ui/cocoa/pasteboard_type_owner.m +new file mode 100644 +index 0000000000..1dd1e987d7 +--- /dev/null ++++ b/ui/cocoa/pasteboard_type_owner.m +@@ -0,0 +1,75 @@ ++/* ++ * QEMU Cocoa CG display driver ++ * ++ * Copyright (c) 2008 Mike Kronenberg ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++ ++#include "ui/cocoa.h" ++#include "qemu/main-loop.h" ++ ++@implementation QemuCocoaPasteboardTypeOwner ++ ++- (id)initWith:(QemuCocoaClipboard *)aCb ++{ ++ COCOA_DEBUG("QemuCocoaView: initWithFrame\n"); ++ ++ self = [super init]; ++ if (self) { ++ cb = aCb; ++ } ++ return self; ++} ++ ++- (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)type ++{ ++ if (type != NSPasteboardTypeString) { ++ return; ++ } ++ ++ qemu_mutex_lock_iothread(); ++ ++ QemuClipboardInfo *info = qemu_clipboard_info_ref(cb->info); ++ qemu_event_reset(&cb->event); ++ qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT); ++ ++ while (info == cb->info && ++ info->types[QEMU_CLIPBOARD_TYPE_TEXT].available && ++ info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) { ++ qemu_mutex_unlock_iothread(); ++ qemu_event_wait(&cb->event); ++ qemu_mutex_lock_iothread(); ++ } ++ ++ if (info == cb->info) { ++ NSData *data = [[NSData alloc] initWithBytes:info->types[QEMU_CLIPBOARD_TYPE_TEXT].data ++ length:info->types[QEMU_CLIPBOARD_TYPE_TEXT].size]; ++ [sender setData:data forType:NSPasteboardTypeString]; ++ [data release]; ++ } ++ ++ qemu_clipboard_info_unref(info); ++ ++ qemu_mutex_unlock_iothread(); ++} ++ ++@end +diff --git a/ui/cocoa/view.m b/ui/cocoa/view.m +new file mode 100644 +index 0000000000..8beb963cbd +--- /dev/null ++++ b/ui/cocoa/view.m +@@ -0,0 +1,921 @@ ++/* ++ * QEMU Cocoa CG display driver ++ * ++ * Copyright (c) 2008 Mike Kronenberg ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++ ++#include "ui/cocoa.h" ++#include "ui/input.h" ++#include "sysemu/sysemu.h" ++#include "qemu/main-loop.h" ++#include "qemu/error-report.h" ++#include ++ ++#define cgrect(nsrect) (*(CGRect *)&(nsrect)) ++ ++static int cocoa_keycode_to_qemu(int keycode) ++{ ++ if (qemu_input_map_osx_to_qcode_len <= keycode) { ++ error_report("(cocoa) warning unknown keycode 0x%x", keycode); ++ return 0; ++ } ++ return qemu_input_map_osx_to_qcode[keycode]; ++} ++ ++static CGRect compute_cursor_clip_rect(int screen_height, ++ int given_mouse_x, int given_mouse_y, ++ int cursor_width, int cursor_height) ++{ ++ CGRect rect; ++ ++ rect.origin.x = MAX(0, -given_mouse_x); ++ rect.origin.y = 0; ++ rect.size.width = MIN(cursor_width, cursor_width + given_mouse_x); ++ rect.size.height = cursor_height - rect.origin.x; ++ ++ return rect; ++} ++ ++static CGEventRef handleTapEvent(CGEventTapProxy proxy, CGEventType type, CGEventRef cgEvent, void *userInfo) ++{ ++ QemuCocoaView *cocoaView = (QemuCocoaView*) userInfo; ++ NSEvent* event = [NSEvent eventWithCGEvent:cgEvent]; ++ if ([cocoaView isMouseGrabbed] && [cocoaView handleEvent:event]) { ++ COCOA_DEBUG("Global events tap: qemu handled the event, capturing!\n"); ++ return NULL; ++ } ++ COCOA_DEBUG("Global events tap: qemu did not handle the event, letting it through...\n"); ++ ++ return cgEvent; ++} ++ ++@implementation QemuCocoaView ++- (id)initWithFrame:(NSRect)frameRect ++ screen:(QEMUScreen *)given_screen ++{ ++ COCOA_DEBUG("QemuCocoaView: initWithFrame\n"); ++ ++ self = [super initWithFrame:frameRect]; ++ if (self) { ++ ++ screen = given_screen; ++ screen_width = frameRect.size.width; ++ screen_height = frameRect.size.height; ++ kbd = qkbd_state_init(screen->dcl.con); ++ ++ /* Used for displaying pause on the screen */ ++ pauseLabel = [NSTextField new]; ++ [pauseLabel setBezeled:YES]; ++ [pauseLabel setDrawsBackground:YES]; ++ [pauseLabel setBackgroundColor: [NSColor whiteColor]]; ++ [pauseLabel setEditable:NO]; ++ [pauseLabel setSelectable:NO]; ++ [pauseLabel setStringValue: @"Paused"]; ++ [pauseLabel setFont: [NSFont fontWithName: @"Helvetica" size: 90]]; ++ [pauseLabel setTextColor: [NSColor blackColor]]; ++ [pauseLabel sizeToFit]; ++ ++ } ++ return self; ++} ++ ++- (void) dealloc ++{ ++ COCOA_DEBUG("QemuCocoaView: dealloc\n"); ++ ++ if (pauseLabel) { ++ [pauseLabel release]; ++ } ++ ++ qkbd_state_free(kbd); ++ ++ if (eventsTap) { ++ CFRelease(eventsTap); ++ } ++ ++ [super dealloc]; ++} ++ ++- (BOOL) isOpaque ++{ ++ return YES; ++} ++ ++- (void) removeTrackingRect ++{ ++ if (trackingArea) { ++ [self removeTrackingArea:trackingArea]; ++ [trackingArea release]; ++ trackingArea = nil; ++ } ++} ++ ++- (void) frameUpdated ++{ ++ [self removeTrackingRect]; ++ ++ if ([self window]) { ++ NSTrackingAreaOptions options = NSTrackingActiveInKeyWindow | ++ NSTrackingMouseEnteredAndExited | ++ NSTrackingMouseMoved; ++ trackingArea = [[NSTrackingArea alloc] initWithRect:[self frame] ++ options:options ++ owner:self ++ userInfo:nil]; ++ [self addTrackingArea:trackingArea]; ++ [self updateUIInfo]; ++ } ++} ++ ++- (void) viewDidMoveToWindow ++{ ++ [self resizeWindow]; ++ [self frameUpdated]; ++} ++ ++- (void) viewWillMoveToWindow:(NSWindow *)newWindow ++{ ++ [self removeTrackingRect]; ++} ++ ++- (void) hideCursor ++{ ++ if (screen->cursor_show) { ++ return; ++ } ++ [NSCursor hide]; ++} ++ ++- (void) unhideCursor ++{ ++ if (screen->cursor_show) { ++ return; ++ } ++ [NSCursor unhide]; ++} ++ ++- (CGRect) convertCursorClipRectToDraw:(CGRect)rect ++ screenHeight:(int)given_screen_height ++ mouseX:(int)mouse_x ++ mouseY:(int)mouse_y ++{ ++ CGFloat d = [self frame].size.height / (CGFloat)given_screen_height; ++ ++ rect.origin.x = (rect.origin.x + mouse_x) * d; ++ rect.origin.y = (given_screen_height - rect.origin.y - mouse_y - rect.size.height) * d; ++ rect.size.width *= d; ++ rect.size.height *= d; ++ ++ return rect; ++} ++ ++- (void) drawRect:(NSRect) rect ++{ ++ COCOA_DEBUG("QemuCocoaView: drawRect\n"); ++ ++#ifdef CONFIG_OPENGL ++ if (display_opengl) { ++ return; ++ } ++#endif ++ ++ // get CoreGraphic context ++ CGContextRef viewContextRef = [[NSGraphicsContext currentContext] CGContext]; ++ ++ CGContextSetInterpolationQuality (viewContextRef, kCGInterpolationNone); ++ CGContextSetShouldAntialias (viewContextRef, NO); ++ ++ qemu_mutex_lock(&screen->draw_mutex); ++ ++ // draw screen bitmap directly to Core Graphics context ++ if (!screen->surface) { ++ // Draw request before any guest device has set up a framebuffer: ++ // just draw an opaque black rectangle ++ CGContextSetRGBFillColor(viewContextRef, 0, 0, 0, 1.0); ++ CGContextFillRect(viewContextRef, NSRectToCGRect(rect)); ++ } else { ++ int w = surface_width(screen->surface); ++ int h = surface_height(screen->surface); ++ int bitsPerPixel = PIXMAN_FORMAT_BPP(surface_format(screen->surface)); ++ int stride = surface_stride(screen->surface); ++ ++ CGDataProviderRef dataProviderRef = CGDataProviderCreateWithData( ++ NULL, ++ surface_data(screen->surface), ++ stride * h, ++ NULL ++ ); ++ ++ CGImageRef imageRef = CGImageCreate( ++ w, //width ++ h, //height ++ DIV_ROUND_UP(bitsPerPixel, 8) * 2, //bitsPerComponent ++ bitsPerPixel, //bitsPerPixel ++ stride, //bytesPerRow ++ CGColorSpaceCreateWithName(kCGColorSpaceSRGB), //colorspace ++ kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst, //bitmapInfo ++ dataProviderRef, //provider ++ NULL, //decode ++ 0, //interpolate ++ kCGRenderingIntentDefault //intent ++ ); ++ // selective drawing code (draws only dirty rectangles) (OS X >= 10.4) ++ const NSRect *rectList; ++ NSInteger rectCount; ++ int i; ++ CGImageRef clipImageRef; ++ CGRect clipRect; ++ CGFloat d = (CGFloat)h / [self frame].size.height; ++ ++ [self getRectsBeingDrawn:&rectList count:&rectCount]; ++ for (i = 0; i < rectCount; i++) { ++ clipRect.origin.x = rectList[i].origin.x * d; ++ clipRect.origin.y = (float)h - (rectList[i].origin.y + rectList[i].size.height) * d; ++ clipRect.size.width = rectList[i].size.width * d; ++ clipRect.size.height = rectList[i].size.height * d; ++ clipImageRef = CGImageCreateWithImageInRect( ++ imageRef, ++ clipRect ++ ); ++ CGContextDrawImage (viewContextRef, cgrect(rectList[i]), clipImageRef); ++ CGImageRelease (clipImageRef); ++ } ++ CGImageRelease (imageRef); ++ CGDataProviderRelease(dataProviderRef); ++ ++ if (screen->mouse_on) { ++ size_t cursor_width = CGImageGetWidth(screen->cursor_cgimage); ++ size_t cursor_height = CGImageGetHeight(screen->cursor_cgimage); ++ clipRect = compute_cursor_clip_rect(h, screen->mouse_x, screen->mouse_y, ++ cursor_width, ++ cursor_height); ++ CGRect drawRect = [self convertCursorClipRectToDraw:clipRect ++ screenHeight:h ++ mouseX:screen->mouse_x ++ mouseY:screen->mouse_y]; ++ clipImageRef = CGImageCreateWithImageInRect( ++ screen->cursor_cgimage, ++ clipRect ++ ); ++ CGContextDrawImage(viewContextRef, drawRect, clipImageRef); ++ CGImageRelease (clipImageRef); ++ } ++ } ++ ++ qemu_mutex_unlock(&screen->draw_mutex); ++} ++ ++- (NSSize) computeUnzoomedSize ++{ ++ CGFloat width = screen_width / [[self window] backingScaleFactor]; ++ CGFloat height = screen_height / [[self window] backingScaleFactor]; ++ ++ return NSMakeSize(width, height); ++} ++ ++- (NSSize) fixZoomedFullScreenSize:(NSSize)proposedSize ++{ ++ NSSize size; ++ ++ size.width = (CGFloat)screen_width * proposedSize.height; ++ size.height = (CGFloat)screen_height * proposedSize.width; ++ ++ if (size.width < size.height) { ++ size.width /= screen_height; ++ size.height = proposedSize.height; ++ } else { ++ size.width = proposedSize.width; ++ size.height /= screen_width; ++ } ++ ++ return size; ++} ++ ++- (void) resizeWindow ++{ ++ [[self window] setContentAspectRatio:NSMakeSize(screen_width, screen_height)]; ++ ++ if (([[self window] styleMask] & NSWindowStyleMaskResizable) == 0) { ++ [[self window] setContentSize:[self computeUnzoomedSize]]; ++ [[self window] center]; ++ } else if (([[self window] styleMask] & NSWindowStyleMaskFullScreen) != 0) { ++ [[self window] setContentSize:[self fixZoomedFullScreenSize:[[[self window] screen] frame].size]]; ++ [[self window] center]; ++ } ++} ++ ++- (void) updateUIInfo ++{ ++ NSSize frameSize; ++ QemuUIInfo info = {}; ++ ++ if (!qatomic_load_acquire(&screen->inited)) { ++ return; ++ } ++ ++ if ([self window]) { ++ NSDictionary *description = [[[self window] screen] deviceDescription]; ++ CGDirectDisplayID display = [[description objectForKey:@"NSScreenNumber"] unsignedIntValue]; ++ NSSize screenSize = [[[self window] screen] frame].size; ++ CGSize screenPhysicalSize = CGDisplayScreenSize(display); ++ CVDisplayLinkRef displayLink; ++ ++ if (([[self window] styleMask] & NSWindowStyleMaskFullScreen) == 0) { ++ frameSize = [self frame].size; ++ } else { ++ frameSize = screenSize; ++ } ++ ++ if (!CVDisplayLinkCreateWithCGDisplay(display, &displayLink)) { ++ CVTime period = CVDisplayLinkGetNominalOutputVideoRefreshPeriod(displayLink); ++ CVDisplayLinkRelease(displayLink); ++ if (!(period.flags & kCVTimeIsIndefinite)) { ++ update_displaychangelistener(&screen->dcl, ++ 1000 * period.timeValue / period.timeScale); ++ info.refresh_rate = (int64_t)1000 * period.timeScale / period.timeValue; ++ } ++ } ++ ++ info.width_mm = frameSize.width / screenSize.width * screenPhysicalSize.width; ++ info.height_mm = frameSize.height / screenSize.height * screenPhysicalSize.height; ++ } else { ++ frameSize = [self frame].size; ++ } ++ ++ NSSize frameBackingSize = [self convertSizeToBacking:frameSize]; ++ ++ info.width = frameBackingSize.width; ++ info.height = frameBackingSize.height; ++ ++ dpy_set_ui_info(screen->dcl.con, &info); ++} ++ ++- (void) updateScreenWidth:(int)w height:(int)h ++{ ++ COCOA_DEBUG("QemuCocoaView: updateScreenWidth:height:\n"); ++ ++ if (w != screen_width || h != screen_height) { ++ COCOA_DEBUG("updateScreenWidth:height: new size %d x %d\n", w, h); ++ screen_width = w; ++ screen_height = h; ++ [self resizeWindow]; ++ } ++} ++ ++- (void) setFullGrab:(id)sender ++{ ++ COCOA_DEBUG("QemuCocoaView: setFullGrab\n"); ++ ++ CGEventMask mask = CGEventMaskBit(kCGEventKeyDown) | CGEventMaskBit(kCGEventKeyUp) | CGEventMaskBit(kCGEventFlagsChanged); ++ eventsTap = CGEventTapCreate(kCGHIDEventTap, kCGHeadInsertEventTap, kCGEventTapOptionDefault, ++ mask, handleTapEvent, self); ++ if (!eventsTap) { ++ warn_report("Could not create event tap, system key combos will not be captured.\n"); ++ return; ++ } else { ++ COCOA_DEBUG("Global events tap created! Will capture system key combos.\n"); ++ } ++ ++ CFRunLoopRef runLoop = CFRunLoopGetCurrent(); ++ if (!runLoop) { ++ warn_report("Could not obtain current CF RunLoop, system key combos will not be captured.\n"); ++ return; ++ } ++ ++ CFRunLoopSourceRef tapEventsSrc = CFMachPortCreateRunLoopSource(kCFAllocatorDefault, eventsTap, 0); ++ if (!tapEventsSrc ) { ++ warn_report("Could not obtain current CF RunLoop, system key combos will not be captured.\n"); ++ return; ++ } ++ ++ CFRunLoopAddSource(runLoop, tapEventsSrc, kCFRunLoopDefaultMode); ++ CFRelease(tapEventsSrc); ++} ++ ++- (void) toggleKey: (int)keycode { ++ qkbd_state_key_event(kbd, keycode, !qkbd_state_key_get(kbd, keycode)); ++} ++ ++// Does the work of sending input to the monitor ++- (void) handleMonitorInput:(NSEvent *)event ++{ ++ int keysym = 0; ++ int control_key = 0; ++ ++ // if the control key is down ++ if ([event modifierFlags] & NSEventModifierFlagControl) { ++ control_key = 1; ++ } ++ ++ /* translates Macintosh keycodes to QEMU's keysym */ ++ ++ int without_control_translation[] = { ++ [0 ... 0xff] = 0, // invalid key ++ ++ [kVK_UpArrow] = QEMU_KEY_UP, ++ [kVK_DownArrow] = QEMU_KEY_DOWN, ++ [kVK_RightArrow] = QEMU_KEY_RIGHT, ++ [kVK_LeftArrow] = QEMU_KEY_LEFT, ++ [kVK_Home] = QEMU_KEY_HOME, ++ [kVK_End] = QEMU_KEY_END, ++ [kVK_PageUp] = QEMU_KEY_PAGEUP, ++ [kVK_PageDown] = QEMU_KEY_PAGEDOWN, ++ [kVK_ForwardDelete] = QEMU_KEY_DELETE, ++ [kVK_Delete] = QEMU_KEY_BACKSPACE, ++ }; ++ ++ int with_control_translation[] = { ++ [0 ... 0xff] = 0, // invalid key ++ ++ [kVK_UpArrow] = QEMU_KEY_CTRL_UP, ++ [kVK_DownArrow] = QEMU_KEY_CTRL_DOWN, ++ [kVK_RightArrow] = QEMU_KEY_CTRL_RIGHT, ++ [kVK_LeftArrow] = QEMU_KEY_CTRL_LEFT, ++ [kVK_Home] = QEMU_KEY_CTRL_HOME, ++ [kVK_End] = QEMU_KEY_CTRL_END, ++ [kVK_PageUp] = QEMU_KEY_CTRL_PAGEUP, ++ [kVK_PageDown] = QEMU_KEY_CTRL_PAGEDOWN, ++ }; ++ ++ if (control_key != 0) { /* If the control key is being used */ ++ if ([event keyCode] < ARRAY_SIZE(with_control_translation)) { ++ keysym = with_control_translation[[event keyCode]]; ++ } ++ } else { ++ if ([event keyCode] < ARRAY_SIZE(without_control_translation)) { ++ keysym = without_control_translation[[event keyCode]]; ++ } ++ } ++ ++ // if not a key that needs translating ++ if (keysym == 0) { ++ NSString *ks = [event characters]; ++ if ([ks length] > 0) { ++ keysym = [ks characterAtIndex:0]; ++ } ++ } ++ ++ if (keysym) { ++ kbd_put_keysym(keysym); ++ } ++} ++ ++- (bool) handleEvent:(NSEvent *)event ++{ ++ if(!qatomic_read(&screen->inited)) { ++ /* ++ * Just let OSX have all events that arrive before ++ * applicationDidFinishLaunching. ++ * This avoids a deadlock on the iothread lock, which cocoa_display_init() ++ * will not drop until after the app_started_sem is posted. (In theory ++ * there should not be any such events, but OSX Catalina now emits some.) ++ */ ++ return false; ++ } ++ ++ qemu_mutex_lock_iothread(); ++ bool handled = [self handleEventLocked:event]; ++ qemu_mutex_unlock_iothread(); ++ return handled; ++} ++ ++- (bool) handleEventLocked:(NSEvent *)event ++{ ++ /* Return true if we handled the event, false if it should be given to OSX */ ++ COCOA_DEBUG("QemuCocoaView: handleEvent\n"); ++ int keycode = 0; ++ NSUInteger modifiers = [event modifierFlags]; ++ ++ /* ++ * Check -[NSEvent modifierFlags] here. ++ * ++ * There is a NSEventType for an event notifying the change of ++ * -[NSEvent modifierFlags], NSEventTypeFlagsChanged but these operations ++ * are performed for any events because a modifier state may change while ++ * the application is inactive (i.e. no events fire) and we don't want to ++ * wait for another modifier state change to detect such a change. ++ * ++ * NSEventModifierFlagCapsLock requires a special treatment. The other flags ++ * are handled in similar manners. ++ * ++ * NSEventModifierFlagCapsLock ++ * --------------------------- ++ * ++ * If CapsLock state is changed, "up" and "down" events will be fired in ++ * sequence, effectively updates CapsLock state on the guest. ++ * ++ * The other flags ++ * --------------- ++ * ++ * If a flag is not set, fire "up" events for all keys which correspond to ++ * the flag. Note that "down" events are not fired here because the flags ++ * checked here do not tell what exact keys are down. ++ * ++ * If one of the keys corresponding to a flag is down, we rely on ++ * -[NSEvent keyCode] of an event whose -[NSEvent type] is ++ * NSEventTypeFlagsChanged to know the exact key which is down, which has ++ * the following two downsides: ++ * - It does not work when the application is inactive as described above. ++ * - It malfactions *after* the modifier state is changed while the ++ * application is inactive. It is because -[NSEvent keyCode] does not tell ++ * if the key is up or down, and requires to infer the current state from ++ * the previous state. It is still possible to fix such a malfanction by ++ * completely leaving your hands from the keyboard, which hopefully makes ++ * this implementation usable enough. ++ */ ++ if (!!(modifiers & NSEventModifierFlagCapsLock) != ++ qkbd_state_modifier_get(kbd, QKBD_MOD_CAPSLOCK)) { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_CAPS_LOCK, true); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_CAPS_LOCK, false); ++ } ++ ++ if (!(modifiers & NSEventModifierFlagShift)) { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_SHIFT, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_SHIFT_R, false); ++ } ++ if (!(modifiers & NSEventModifierFlagControl)) { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_CTRL, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_CTRL_R, false); ++ } ++ if (!(modifiers & NSEventModifierFlagOption)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); ++ } else { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); ++ } ++ } ++ if (!(modifiers & NSEventModifierFlagCommand)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_ALT, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_ALT_R, false); ++ } else { ++ qkbd_state_key_event(kbd, Q_KEY_CODE_META_L, false); ++ qkbd_state_key_event(kbd, Q_KEY_CODE_META_R, false); ++ } ++ } ++ ++ switch ([event type]) { ++ case NSEventTypeFlagsChanged: ++ switch ([event keyCode]) { ++ case kVK_Shift: ++ if (!!(modifiers & NSEventModifierFlagShift)) { ++ [self toggleKey:Q_KEY_CODE_SHIFT]; ++ } ++ return true; ++ ++ case kVK_RightShift: ++ if (!!(modifiers & NSEventModifierFlagShift)) { ++ [self toggleKey:Q_KEY_CODE_SHIFT_R]; ++ } ++ return true; ++ ++ case kVK_Control: ++ if (!!(modifiers & NSEventModifierFlagControl)) { ++ [self toggleKey:Q_KEY_CODE_CTRL]; ++ } ++ return true; ++ ++ case kVK_RightControl: ++ if (!!(modifiers & NSEventModifierFlagControl)) { ++ [self toggleKey:Q_KEY_CODE_CTRL_R]; ++ } ++ return true; ++ ++ case kVK_Option: ++ if (!!(modifiers & NSEventModifierFlagOption)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ [self toggleKey:Q_KEY_CODE_META_L]; ++ } else { ++ [self toggleKey:Q_KEY_CODE_ALT]; ++ } ++ } ++ return true; ++ ++ case kVK_RightOption: ++ if (!!(modifiers & NSEventModifierFlagOption)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ [self toggleKey:Q_KEY_CODE_META_R]; ++ } else { ++ [self toggleKey:Q_KEY_CODE_ALT_R]; ++ } ++ } ++ return true; ++ ++ /* Don't pass command key changes to guest unless mouse is grabbed */ ++ case kVK_Command: ++ if (isMouseGrabbed && ++ !!(modifiers & NSEventModifierFlagCommand)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ [self toggleKey:Q_KEY_CODE_ALT]; ++ } else { ++ [self toggleKey:Q_KEY_CODE_META_L]; ++ } ++ } ++ return true; ++ ++ case kVK_RightCommand: ++ if (isMouseGrabbed && ++ !!(modifiers & NSEventModifierFlagCommand)) { ++ if ([self isSwapOptionCommandEnabled]) { ++ [self toggleKey:Q_KEY_CODE_ALT_R]; ++ } else { ++ [self toggleKey:Q_KEY_CODE_META_R]; ++ } ++ } ++ return true; ++ ++ default: ++ return true; ++ } ++ case NSEventTypeKeyDown: ++ keycode = cocoa_keycode_to_qemu([event keyCode]); ++ ++ // forward command key combos to the host UI unless the mouse is grabbed ++ if (!isMouseGrabbed && ([event modifierFlags] & NSEventModifierFlagCommand)) { ++ return false; ++ } ++ ++ // default ++ ++ // handle control + alt Key Combos (ctrl+alt+[1..9,g] is reserved for QEMU) ++ if (([event modifierFlags] & NSEventModifierFlagControl) && ([event modifierFlags] & NSEventModifierFlagOption)) { ++ NSString *keychar = [event charactersIgnoringModifiers]; ++ if ([keychar length] == 1) { ++ char key = [keychar characterAtIndex:0]; ++ switch (key) { ++ ++ // enable graphic console ++ case '1' ... '9': ++ console_select(key - '0' - 1); /* ascii math */ ++ return true; ++ ++ // release the mouse grab ++ case 'g': ++ [self ungrabMouseLocked]; ++ return true; ++ } ++ } ++ } ++ ++ if (qemu_console_is_graphic(NULL)) { ++ qkbd_state_key_event(kbd, keycode, true); ++ } else { ++ [self handleMonitorInput: event]; ++ } ++ return true; ++ case NSEventTypeKeyUp: ++ keycode = cocoa_keycode_to_qemu([event keyCode]); ++ ++ // don't pass the guest a spurious key-up if we treated this ++ // command-key combo as a host UI action ++ if (!isMouseGrabbed && ([event modifierFlags] & NSEventModifierFlagCommand)) { ++ return true; ++ } ++ ++ if (qemu_console_is_graphic(NULL)) { ++ qkbd_state_key_event(kbd, keycode, false); ++ } ++ return true; ++ case NSEventTypeScrollWheel: ++ /* ++ * Send wheel events to the guest regardless of window focus. ++ * This is in-line with standard Mac OS X UI behaviour. ++ */ ++ ++ /* ++ * When deltaY is zero, it means that this scrolling event was ++ * either horizontal, or so fine that it only appears in ++ * scrollingDeltaY. So we drop the event. ++ */ ++ if ([event deltaY] != 0) { ++ /* Determine if this is a scroll up or scroll down event */ ++ int buttons = ([event deltaY] > 0) ? ++ INPUT_BUTTON_WHEEL_UP : INPUT_BUTTON_WHEEL_DOWN; ++ qemu_input_queue_btn(screen->dcl.con, buttons, true); ++ qemu_input_event_sync(); ++ qemu_input_queue_btn(screen->dcl.con, buttons, false); ++ qemu_input_event_sync(); ++ } ++ /* ++ * Since deltaY also reports scroll wheel events we prevent mouse ++ * movement code from executing. ++ */ ++ return true; ++ default: ++ return false; ++ } ++} ++ ++- (void) handleMouseEvent:(NSEvent *)event ++{ ++ if (!isMouseGrabbed) { ++ return; ++ } ++ ++ qemu_mutex_lock_iothread(); ++ ++ if (isAbsoluteEnabled) { ++ CGFloat d = (CGFloat)screen_height / [self frame].size.height; ++ NSPoint p = [event locationInWindow]; ++ // Note that the origin for Cocoa mouse coords is bottom left, not top left. ++ qemu_input_queue_abs(screen->dcl.con, INPUT_AXIS_X, p.x * d, 0, screen_width); ++ qemu_input_queue_abs(screen->dcl.con, INPUT_AXIS_Y, screen_height - p.y * d, 0, screen_height); ++ } else { ++ CGFloat d = (CGFloat)screen_height / [self convertSizeToBacking:[self frame].size].height; ++ qemu_input_queue_rel(screen->dcl.con, INPUT_AXIS_X, [event deltaX] * d); ++ qemu_input_queue_rel(screen->dcl.con, INPUT_AXIS_Y, [event deltaY] * d); ++ } ++ ++ qemu_input_event_sync(); ++ ++ qemu_mutex_unlock_iothread(); ++} ++ ++- (void) handleMouseEvent:(NSEvent *)event button:(InputButton)button down:(bool)down ++{ ++ if (!isMouseGrabbed) { ++ return; ++ } ++ ++ qemu_mutex_lock_iothread(); ++ qemu_input_queue_btn(screen->dcl.con, button, down); ++ qemu_mutex_unlock_iothread(); ++ ++ [self handleMouseEvent:event]; ++} ++ ++- (void) mouseExited:(NSEvent *)event ++{ ++ if (isAbsoluteEnabled && isMouseGrabbed) { ++ [self ungrabMouse]; ++ } ++} ++ ++- (void) mouseEntered:(NSEvent *)event ++{ ++ if (isAbsoluteEnabled && !isMouseGrabbed) { ++ [self grabMouse]; ++ } ++} ++ ++- (void) mouseMoved:(NSEvent *)event ++{ ++ [self handleMouseEvent:event]; ++} ++ ++- (void) mouseDown:(NSEvent *)event ++{ ++ [self handleMouseEvent:event button:INPUT_BUTTON_LEFT down:true]; ++} ++ ++- (void) rightMouseDown:(NSEvent *)event ++{ ++ [self handleMouseEvent:event button:INPUT_BUTTON_RIGHT down:true]; ++} ++ ++- (void) otherMouseDown:(NSEvent *)event ++{ ++ [self handleMouseEvent:event button:INPUT_BUTTON_MIDDLE down:true]; ++} ++ ++- (void) mouseDragged:(NSEvent *)event ++{ ++ [self handleMouseEvent:event]; ++} ++ ++- (void) rightMouseDragged:(NSEvent *)event ++{ ++ [self handleMouseEvent:event]; ++} ++ ++- (void) otherMouseDragged:(NSEvent *)event ++{ ++ [self handleMouseEvent:event]; ++} ++ ++- (void) mouseUp:(NSEvent *)event ++{ ++ if (!isMouseGrabbed) { ++ [self grabMouse]; ++ } ++ ++ [self handleMouseEvent:event button:INPUT_BUTTON_LEFT down:false]; ++} ++ ++- (void) rightMouseUp:(NSEvent *)event ++{ ++ [self handleMouseEvent:event button:INPUT_BUTTON_RIGHT down:false]; ++} ++ ++- (void) otherMouseUp:(NSEvent *)event ++{ ++ [self handleMouseEvent:event button:INPUT_BUTTON_MIDDLE down:false]; ++} ++ ++- (void) grabMouse ++{ ++ COCOA_DEBUG("QemuCocoaView: grabMouse\n"); ++ ++ if (qemu_name) ++ [[self window] setTitle:[NSString stringWithFormat:@"QEMU %s - (Press ctrl + alt + g to release Mouse)", qemu_name]]; ++ else ++ [[self window] setTitle:@"QEMU - (Press ctrl + alt + g to release Mouse)"]; ++ [self hideCursor]; ++ CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); ++ isMouseGrabbed = TRUE; // while isMouseGrabbed = TRUE, QemuCocoaApp sends all events to [cocoaView handleEvent:] ++} ++ ++- (void) ungrabMouse ++{ ++ qemu_mutex_lock_iothread(); ++ [self ungrabMouseLocked]; ++ qemu_mutex_unlock_iothread(); ++} ++ ++- (void) ungrabMouseLocked ++{ ++ COCOA_DEBUG("QemuCocoaView: ungrabMouseLocked\n"); ++ ++ if (qemu_name) ++ [[self window] setTitle:[NSString stringWithFormat:@"QEMU %s", qemu_name]]; ++ else ++ [[self window] setTitle:@"QEMU"]; ++ [self unhideCursor]; ++ CGAssociateMouseAndMouseCursorPosition(TRUE); ++ isMouseGrabbed = FALSE; ++ [self raiseAllButtonsLocked]; ++} ++ ++- (void) setAbsoluteEnabled:(BOOL)tIsAbsoluteEnabled { ++ isAbsoluteEnabled = tIsAbsoluteEnabled; ++ if (isMouseGrabbed) { ++ CGAssociateMouseAndMouseCursorPosition(isAbsoluteEnabled); ++ } ++} ++- (BOOL) isMouseGrabbed {return isMouseGrabbed;} ++- (BOOL) isAbsoluteEnabled {return isAbsoluteEnabled;} ++- (BOOL) isSwapOptionCommandEnabled {return screen->swap_option_command;} ++ ++- (void) raiseAllButtonsLocked ++{ ++ qemu_input_queue_btn(screen->dcl.con, INPUT_BUTTON_LEFT, false); ++ qemu_input_queue_btn(screen->dcl.con, INPUT_BUTTON_RIGHT, false); ++ qemu_input_queue_btn(screen->dcl.con, INPUT_BUTTON_MIDDLE, false); ++} ++ ++- (void) setNeedsDisplayForCursorX:(int)x ++ y:(int)y ++ width:(int)width ++ height:(int)height ++ screenHeight:(int)given_screen_height ++{ ++ CGRect clip_rect = compute_cursor_clip_rect(given_screen_height, x, y, ++ width, height); ++ CGRect draw_rect = [self convertCursorClipRectToDraw:clip_rect ++ screenHeight:given_screen_height ++ mouseX:x ++ mouseY:y]; ++ [self setNeedsDisplayInRect:draw_rect]; ++} ++ ++/* Displays the word pause on the screen */ ++- (void)displayPause ++{ ++ /* Coordinates have to be calculated each time because the window can change its size */ ++ int xCoord, yCoord, width, height; ++ xCoord = ([[self window] frame].size.width - [pauseLabel frame].size.width)/2; ++ yCoord = [[self window] frame].size.height - [pauseLabel frame].size.height - ([pauseLabel frame].size.height * .5); ++ width = [pauseLabel frame].size.width; ++ height = [pauseLabel frame].size.height; ++ [pauseLabel setFrame: NSMakeRect(xCoord, yCoord, width, height)]; ++ [self addSubview: pauseLabel]; ++} ++ ++/* Removes the word pause from the screen */ ++- (void)removePause ++{ ++ [pauseLabel removeFromSuperview]; ++} ++@end +diff --git a/ui/console.c b/ui/console.c +index 1103b65314..7a66ece151 100644 +--- a/ui/console.c ++++ b/ui/console.c +@@ -127,7 +127,7 @@ struct QemuConsole { + DisplayState *ds; + DisplaySurface *surface; + int dcls; +- DisplayChangeListener *gl; ++ void *dg; + bool gl_block; + int window_id; + +@@ -184,6 +184,7 @@ struct DisplayState { + QLIST_HEAD(, DisplayChangeListener) listeners; + }; + ++static const DisplayGLOps *display_gl_ops; + static DisplayState *display_state; + static QemuConsole *active_console; + static QTAILQ_HEAD(, QemuConsole) consoles = +@@ -203,7 +204,6 @@ static void gui_update(void *opaque) + uint64_t dcl_interval; + DisplayState *ds = opaque; + DisplayChangeListener *dcl; +- QemuConsole *con; + + ds->refreshing = true; + dpy_refresh(ds); +@@ -218,11 +218,6 @@ static void gui_update(void *opaque) + } + if (ds->update_interval != interval) { + ds->update_interval = interval; +- QTAILQ_FOREACH(con, &consoles, next) { +- if (con->hw_ops->update_interval) { +- con->hw_ops->update_interval(con->hw, interval); +- } +- } + trace_console_refresh(interval); + } + ds->last_update = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); +@@ -1109,8 +1104,14 @@ void console_select(unsigned int index) + } + } + if (s->surface) { +- dpy_gfx_update(s, 0, 0, surface_width(s->surface), +- surface_height(s->surface)); ++ int width = surface_width(s->surface); ++ int height = surface_height(s->surface); ++ if (display_gl_ops && ++ display_gl_ops->dpy_gl_scanout_get_enabled(s->dg)) { ++ dpy_gl_update(s, 0, 0, width, height); ++ } else { ++ dpy_gfx_update(s, 0, 0, width, height); ++ } + } + } + if (ds->have_text) { +@@ -1460,26 +1461,30 @@ void qemu_free_displaysurface(DisplaySurface *surface) + g_free(surface); + } + +-bool console_has_gl(QemuConsole *con) ++bool console_has_gl(void) ++{ ++ return display_gl_ops != NULL; ++} ++ ++void console_set_displayglcontext(QemuConsole *con, void *dg) + { +- return con->gl != NULL; ++ con->dg = dg; + } + +-static bool displaychangelistener_has_dmabuf(DisplayChangeListener *dcl) ++static bool console_has_dmabuf(QemuConsole *con) + { +- if (dcl->ops->dpy_has_dmabuf) { +- return dcl->ops->dpy_has_dmabuf(dcl); ++ if (display_gl_ops->dpy_has_dmabuf) { ++ return display_gl_ops->dpy_has_dmabuf(con->dg); + } + +- if (dcl->ops->dpy_gl_scanout_dmabuf) { ++ if (display_gl_ops->dpy_gl_scanout_dmabuf) { + return true; + } + + return false; + } + +-static bool dpy_compatible_with(QemuConsole *con, +- DisplayChangeListener *dcl, Error **errp) ++static bool dpy_compatible_with(QemuConsole *con, Error **errp) + { + ERRP_GUARD(); + int flags; +@@ -1487,14 +1492,13 @@ static bool dpy_compatible_with(QemuConsole *con, + flags = con->hw_ops->get_flags ? con->hw_ops->get_flags(con->hw) : 0; + + if (flags & GRAPHIC_FLAGS_GL && +- !console_has_gl(con)) { ++ !console_has_gl()) { + error_setg(errp, "The console requires a GL context."); + return false; +- + } + + if (flags & GRAPHIC_FLAGS_DMABUF && +- !displaychangelistener_has_dmabuf(dcl)) { ++ !console_has_dmabuf(con)) { + error_setg(errp, "The console requires display DMABUF support."); + return false; + } +@@ -1502,6 +1506,16 @@ static bool dpy_compatible_with(QemuConsole *con, + return true; + } + ++void register_displayglops(const DisplayGLOps *dg_ops) ++{ ++ if (display_gl_ops) { ++ error_report("can't register two opengl operators"); ++ exit(1); ++ } ++ ++ display_gl_ops = dg_ops; ++} ++ + void register_displaychangelistener(DisplayChangeListener *dcl) + { + static const char nodev[] = +@@ -1512,20 +1526,18 @@ void register_displaychangelistener(DisplayChangeListener *dcl) + + assert(!dcl->ds); + +- if (dcl->ops->dpy_gl_ctx_create) { +- /* display has opengl support */ +- assert(dcl->con); +- if (dcl->con->gl) { +- fprintf(stderr, "can't register two opengl displays (%s, %s)\n", +- dcl->ops->dpy_name, dcl->con->gl->ops->dpy_name); ++ if (dcl->con) { ++ if (!dpy_compatible_with(dcl->con, &err)) { ++ error_report_err(err); + exit(1); + } +- dcl->con->gl = dcl; +- } +- +- if (dcl->con && !dpy_compatible_with(dcl->con, dcl, &err)) { +- error_report_err(err); +- exit(1); ++ } else { ++ QTAILQ_FOREACH(con, &consoles, next) { ++ if (!dpy_compatible_with(con, &err)) { ++ error_report_err(err); ++ exit(1); ++ } ++ } + } + + trace_displaychangelistener_register(dcl, dcl->ops->dpy_name); +@@ -1842,86 +1854,90 @@ bool dpy_cursor_define_supported(QemuConsole *con) + QEMUGLContext dpy_gl_ctx_create(QemuConsole *con, + struct QEMUGLParams *qparams) + { +- assert(con->gl); +- return con->gl->ops->dpy_gl_ctx_create(con->gl, qparams); ++ assert(display_gl_ops); ++ return display_gl_ops->dpy_gl_ctx_create(con->dg, qparams); + } + + void dpy_gl_ctx_destroy(QemuConsole *con, QEMUGLContext ctx) + { +- assert(con->gl); +- con->gl->ops->dpy_gl_ctx_destroy(con->gl, ctx); ++ assert(display_gl_ops); ++ display_gl_ops->dpy_gl_ctx_destroy(con->dg, ctx); + } + + int dpy_gl_ctx_make_current(QemuConsole *con, QEMUGLContext ctx) + { +- assert(con->gl); +- return con->gl->ops->dpy_gl_ctx_make_current(con->gl, ctx); ++ assert(display_gl_ops); ++ return display_gl_ops->dpy_gl_ctx_make_current(con->dg, ctx); + } + + void dpy_gl_scanout_disable(QemuConsole *con) + { +- assert(con->gl); +- con->gl->ops->dpy_gl_scanout_disable(con->gl); ++ assert(display_gl_ops); ++ display_gl_ops->dpy_gl_scanout_disable(con->dg); + } + + void dpy_gl_scanout_texture(QemuConsole *con, + uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) + { +- assert(con->gl); +- con->gl->ops->dpy_gl_scanout_texture(con->gl, backing_id, +- backing_y_0_top, +- backing_width, backing_height, +- x, y, width, height); ++ assert(display_gl_ops); ++ display_gl_ops->dpy_gl_scanout_texture(con->dg, backing_id, backing_borrow, ++ x, y, width, height); + } + + void dpy_gl_scanout_dmabuf(QemuConsole *con, + QemuDmaBuf *dmabuf) + { +- assert(con->gl); +- con->gl->ops->dpy_gl_scanout_dmabuf(con->gl, dmabuf); ++ assert(display_gl_ops); ++ display_gl_ops->dpy_gl_scanout_dmabuf(con->dg, dmabuf); + } + + void dpy_gl_cursor_dmabuf(QemuConsole *con, QemuDmaBuf *dmabuf, + bool have_hot, uint32_t hot_x, uint32_t hot_y) + { +- assert(con->gl); ++ assert(display_gl_ops); + +- if (con->gl->ops->dpy_gl_cursor_dmabuf) { +- con->gl->ops->dpy_gl_cursor_dmabuf(con->gl, dmabuf, +- have_hot, hot_x, hot_y); ++ if (display_gl_ops->dpy_gl_cursor_dmabuf) { ++ display_gl_ops->dpy_gl_cursor_dmabuf(con->dg, dmabuf, ++ have_hot, hot_x, hot_y); + } + } + + void dpy_gl_cursor_position(QemuConsole *con, + uint32_t pos_x, uint32_t pos_y) + { +- assert(con->gl); ++ assert(display_gl_ops); + +- if (con->gl->ops->dpy_gl_cursor_position) { +- con->gl->ops->dpy_gl_cursor_position(con->gl, pos_x, pos_y); ++ if (display_gl_ops->dpy_gl_cursor_position) { ++ display_gl_ops->dpy_gl_cursor_position(con->dg, pos_x, pos_y); + } + } + + void dpy_gl_release_dmabuf(QemuConsole *con, + QemuDmaBuf *dmabuf) + { +- assert(con->gl); ++ assert(display_gl_ops); + +- if (con->gl->ops->dpy_gl_release_dmabuf) { +- con->gl->ops->dpy_gl_release_dmabuf(con->gl, dmabuf); ++ if (display_gl_ops->dpy_gl_release_dmabuf) { ++ display_gl_ops->dpy_gl_release_dmabuf(con->dg, dmabuf); + } + } + + void dpy_gl_update(QemuConsole *con, + uint32_t x, uint32_t y, uint32_t w, uint32_t h) + { +- assert(con->gl); +- con->gl->ops->dpy_gl_update(con->gl, x, y, w, h); ++ DisplayChangeListener *dcl; ++ ++ QLIST_FOREACH(dcl, &con->ds->listeners, next) { ++ if (con != (dcl->con ? dcl->con : active_console)) { ++ continue; ++ } ++ if (dcl->ops->dpy_gl_update) { ++ dcl->ops->dpy_gl_update(dcl, x, y, w, h); ++ } ++} + } + + /***********************************************************/ +@@ -2032,7 +2048,7 @@ void graphic_console_close(QemuConsole *con) + object_property_set_link(OBJECT(con), "device", NULL, &error_abort); + graphic_console_set_hwops(con, &unused_ops, NULL); + +- if (con->gl) { ++ if (display_gl_ops) { + dpy_gl_scanout_disable(con); + } + surface = qemu_create_placeholder_surface(width, height, unplugged); +diff --git a/ui/egl-context.c b/ui/egl-context.c +index 368ffa49d8..07c4c34ec4 100644 +--- a/ui/egl-context.c ++++ b/ui/egl-context.c +@@ -1,8 +1,7 @@ + #include "qemu/osdep.h" + #include "ui/egl-context.h" + +-QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params) ++QEMUGLContext qemu_egl_create_context(void *dg, QEMUGLParams *params) + { + EGLContext ctx; + EGLint ctx_att_core[] = { +@@ -24,13 +23,12 @@ QEMUGLContext qemu_egl_create_context(DisplayChangeListener *dcl, + return ctx; + } + +-void qemu_egl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) ++void qemu_egl_destroy_context(void *dg, QEMUGLContext ctx) + { + eglDestroyContext(qemu_egl_display, ctx); + } + +-int qemu_egl_make_context_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx) ++int qemu_egl_make_context_current(void *dg, QEMUGLContext ctx) + { + return eglMakeCurrent(qemu_egl_display, + EGL_NO_SURFACE, EGL_NO_SURFACE, ctx); +diff --git a/ui/egl-headless.c b/ui/egl-headless.c +index 75404e0e87..183e98a365 100644 +--- a/ui/egl-headless.c ++++ b/ui/egl-headless.c +@@ -38,37 +38,39 @@ static void egl_gfx_switch(DisplayChangeListener *dcl, + edpy->ds = new_surface; + } + +-static QEMUGLContext egl_create_context(DisplayChangeListener *dcl, ++static QEMUGLContext egl_create_context(void *dg, + QEMUGLParams *params) + { + eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, + qemu_egl_rn_ctx); +- return qemu_egl_create_context(dcl, params); ++ return qemu_egl_create_context(dg, params); + } + +-static void egl_scanout_disable(DisplayChangeListener *dcl) ++static bool egl_scanout_get_enabled(void *dg) + { +- egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); ++ return ((egl_dpy *)dg)->guest_fb.texture != 0; ++} + ++static void egl_scanout_disable(void *dg) ++{ ++ egl_dpy *edpy = dg; + egl_fb_destroy(&edpy->guest_fb); + egl_fb_destroy(&edpy->blit_fb); + } + +-static void egl_scanout_texture(DisplayChangeListener *dcl, +- uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, +- uint32_t x, uint32_t y, +- uint32_t w, uint32_t h) ++static void egl_scanout_imported_texture(void *dg, ++ uint32_t backing_texture, ++ bool backing_y_0_top, ++ uint32_t backing_width, ++ uint32_t backing_height) + { +- egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); ++ egl_dpy *edpy = dg; + + edpy->y_0_top = backing_y_0_top; + + /* source framebuffer */ + egl_fb_setup_for_tex(&edpy->guest_fb, +- backing_width, backing_height, backing_id, false); ++ backing_width, backing_height, backing_texture, false); + + /* dest framebuffer */ + if (edpy->blit_fb.width != backing_width || +@@ -78,24 +80,40 @@ static void egl_scanout_texture(DisplayChangeListener *dcl, + } + } + +-static void egl_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++static void egl_scanout_texture(void *dg, ++ uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrower, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) ++{ ++ bool backing_y_0_top; ++ uint32_t backing_width; ++ uint32_t backing_height; ++ ++ GLuint backing_texture = backing_borrower(backing_id, &backing_y_0_top, ++ &backing_width, &backing_height); ++ if (backing_texture) { ++ egl_scanout_imported_texture(dg, backing_texture, backing_y_0_top, ++ backing_width, backing_height); ++ } ++} ++ ++static void egl_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { + egl_dmabuf_import_texture(dmabuf); + if (!dmabuf->texture) { + return; + } + +- egl_scanout_texture(dcl, dmabuf->texture, +- false, dmabuf->width, dmabuf->height, +- 0, 0, dmabuf->width, dmabuf->height); ++ egl_scanout_imported_texture(dg, dmabuf->texture, ++ false, dmabuf->width, dmabuf->height); + } + +-static void egl_cursor_dmabuf(DisplayChangeListener *dcl, ++static void egl_cursor_dmabuf(void *dg, + QemuDmaBuf *dmabuf, bool have_hot, + uint32_t hot_x, uint32_t hot_y) + { +- egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); ++ egl_dpy *edpy = dg; + + if (dmabuf) { + egl_dmabuf_import_texture(dmabuf); +@@ -109,17 +127,16 @@ static void egl_cursor_dmabuf(DisplayChangeListener *dcl, + } + } + +-static void egl_cursor_position(DisplayChangeListener *dcl, ++static void egl_cursor_position(void *dg, + uint32_t pos_x, uint32_t pos_y) + { +- egl_dpy *edpy = container_of(dcl, egl_dpy, dcl); ++ egl_dpy *edpy = dg; + + edpy->pos_x = pos_x; + edpy->pos_y = pos_y; + } + +-static void egl_release_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++static void egl_release_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { + egl_dmabuf_release_texture(dmabuf); + } +@@ -151,22 +168,26 @@ static void egl_scanout_flush(DisplayChangeListener *dcl, + dpy_gfx_update(edpy->dcl.con, x, y, w, h); + } + +-static const DisplayChangeListenerOps egl_ops = { ++static const DisplayGLOps dg_egl_ops = { ++ .dpy_gl_ctx_create = egl_create_context, ++ .dpy_gl_ctx_destroy = qemu_egl_destroy_context, ++ .dpy_gl_ctx_make_current = qemu_egl_make_context_current, ++ ++ .dpy_gl_scanout_get_enabled = egl_scanout_get_enabled, ++ .dpy_gl_scanout_disable = egl_scanout_disable, ++ .dpy_gl_scanout_texture = egl_scanout_texture, ++ .dpy_gl_scanout_dmabuf = egl_scanout_dmabuf, ++ .dpy_gl_cursor_dmabuf = egl_cursor_dmabuf, ++ .dpy_gl_cursor_position = egl_cursor_position, ++ .dpy_gl_release_dmabuf = egl_release_dmabuf, ++}; ++ ++static const DisplayChangeListenerOps dcl_egl_ops = { + .dpy_name = "egl-headless", + .dpy_refresh = egl_refresh, + .dpy_gfx_update = egl_gfx_update, + .dpy_gfx_switch = egl_gfx_switch, + +- .dpy_gl_ctx_create = egl_create_context, +- .dpy_gl_ctx_destroy = qemu_egl_destroy_context, +- .dpy_gl_ctx_make_current = qemu_egl_make_context_current, +- +- .dpy_gl_scanout_disable = egl_scanout_disable, +- .dpy_gl_scanout_texture = egl_scanout_texture, +- .dpy_gl_scanout_dmabuf = egl_scanout_dmabuf, +- .dpy_gl_cursor_dmabuf = egl_cursor_dmabuf, +- .dpy_gl_cursor_position = egl_cursor_position, +- .dpy_gl_release_dmabuf = egl_release_dmabuf, + .dpy_gl_update = egl_scanout_flush, + }; + +@@ -187,6 +208,8 @@ static void egl_headless_init(DisplayState *ds, DisplayOptions *opts) + exit(1); + } + ++ register_displayglops(&dg_egl_ops); ++ + for (idx = 0;; idx++) { + con = qemu_console_lookup_by_index(idx); + if (!con || !qemu_console_is_graphic(con)) { +@@ -195,8 +218,9 @@ static void egl_headless_init(DisplayState *ds, DisplayOptions *opts) + + edpy = g_new0(egl_dpy, 1); + edpy->dcl.con = con; +- edpy->dcl.ops = &egl_ops; ++ edpy->dcl.ops = &dcl_egl_ops; + edpy->gls = qemu_gl_init_shader(); ++ console_set_displayglcontext(con, edpy); + register_displaychangelistener(&edpy->dcl); + } + } +@@ -214,6 +238,4 @@ static void register_egl(void) + + type_init(register_egl); + +-#ifdef CONFIG_OPENGL + module_dep("ui-opengl"); +-#endif +diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c +index 6d0cb2b5cb..ac30d990ec 100644 +--- a/ui/egl-helpers.c ++++ b/ui/egl-helpers.c +@@ -291,7 +291,7 @@ void egl_dmabuf_release_texture(QemuDmaBuf *dmabuf) + + /* ---------------------------------------------------------------------- */ + +-EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win) ++EGLSurface qemu_egl_init_surface(EGLContext ectx, EGLNativeWindowType win) + { + EGLSurface esurface; + EGLBoolean b; +@@ -315,6 +315,70 @@ EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win) + + /* ---------------------------------------------------------------------- */ + ++static int qemu_egl_init_dpy(EGLDisplay dpy, DisplayGLMode mode) ++{ ++ static const EGLint conf_att_core[] = { ++ EGL_SURFACE_TYPE, EGL_WINDOW_BIT, ++ EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, ++ EGL_RED_SIZE, 5, ++ EGL_GREEN_SIZE, 5, ++ EGL_BLUE_SIZE, 5, ++ EGL_ALPHA_SIZE, 0, ++ EGL_NONE, ++ }; ++ static const EGLint conf_att_gles[] = { ++ EGL_SURFACE_TYPE, EGL_WINDOW_BIT, ++ EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, ++ EGL_RED_SIZE, 5, ++ EGL_GREEN_SIZE, 5, ++ EGL_BLUE_SIZE, 5, ++ EGL_ALPHA_SIZE, 0, ++ EGL_NONE, ++ }; ++ EGLint major, minor; ++ EGLBoolean b; ++ EGLint n; ++ bool gles = (mode == DISPLAYGL_MODE_ES); ++ ++ qemu_egl_display = dpy; ++ ++ b = eglInitialize(qemu_egl_display, &major, &minor); ++ if (b == EGL_FALSE) { ++ error_report("egl: eglInitialize failed"); ++ return -1; ++ } ++ ++ b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API); ++ if (b == EGL_FALSE) { ++ error_report("egl: eglBindAPI failed (%s mode)", ++ gles ? "gles" : "core"); ++ return -1; ++ } ++ ++ b = eglChooseConfig(qemu_egl_display, ++ gles ? conf_att_gles : conf_att_core, ++ &qemu_egl_config, 1, &n); ++ if (b == EGL_FALSE || n != 1) { ++ error_report("egl: eglChooseConfig failed (%s mode)", ++ gles ? "gles" : "core"); ++ return -1; ++ } ++ ++ qemu_egl_mode = gles ? DISPLAYGL_MODE_ES : DISPLAYGL_MODE_CORE; ++ return 0; ++} ++ ++int qemu_egl_init_dpy_cocoa(DisplayGLMode mode) ++{ ++ EGLDisplay dpy = eglGetDisplay(EGL_DEFAULT_DISPLAY); ++ if (dpy == EGL_NO_DISPLAY) { ++ error_report("egl: eglGetDisplay failed"); ++ return -1; ++ } ++ ++ return qemu_egl_init_dpy(dpy, mode); ++} ++ + #if defined(CONFIG_X11) || defined(CONFIG_GBM) + + /* +@@ -345,8 +409,9 @@ EGLSurface qemu_egl_init_surface_x11(EGLContext ectx, EGLNativeWindowType win) + * platform extensions (EGL_KHR_platform_gbm and friends) yet it doesn't seem + * like mesa will be able to advertise these (even though it can do EGL 1.5). + */ +-static EGLDisplay qemu_egl_get_display(EGLNativeDisplayType native, +- EGLenum platform) ++static int qemu_egl_init_dpy_platform(EGLNativeDisplayType native, ++ EGLenum platform, ++ DisplayGLMode mode) + { + EGLDisplay dpy = EGL_NO_DISPLAY; + +@@ -363,83 +428,30 @@ static EGLDisplay qemu_egl_get_display(EGLNativeDisplayType native, + /* fallback */ + dpy = eglGetDisplay(native); + } +- return dpy; +-} + +-static int qemu_egl_init_dpy(EGLNativeDisplayType dpy, +- EGLenum platform, +- DisplayGLMode mode) +-{ +- static const EGLint conf_att_core[] = { +- EGL_SURFACE_TYPE, EGL_WINDOW_BIT, +- EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, +- EGL_RED_SIZE, 5, +- EGL_GREEN_SIZE, 5, +- EGL_BLUE_SIZE, 5, +- EGL_ALPHA_SIZE, 0, +- EGL_NONE, +- }; +- static const EGLint conf_att_gles[] = { +- EGL_SURFACE_TYPE, EGL_WINDOW_BIT, +- EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, +- EGL_RED_SIZE, 5, +- EGL_GREEN_SIZE, 5, +- EGL_BLUE_SIZE, 5, +- EGL_ALPHA_SIZE, 0, +- EGL_NONE, +- }; +- EGLint major, minor; +- EGLBoolean b; +- EGLint n; +- bool gles = (mode == DISPLAYGL_MODE_ES); +- +- qemu_egl_display = qemu_egl_get_display(dpy, platform); +- if (qemu_egl_display == EGL_NO_DISPLAY) { ++ if (dpy == EGL_NO_DISPLAY) { + error_report("egl: eglGetDisplay failed"); + return -1; + } + +- b = eglInitialize(qemu_egl_display, &major, &minor); +- if (b == EGL_FALSE) { +- error_report("egl: eglInitialize failed"); +- return -1; +- } +- +- b = eglBindAPI(gles ? EGL_OPENGL_ES_API : EGL_OPENGL_API); +- if (b == EGL_FALSE) { +- error_report("egl: eglBindAPI failed (%s mode)", +- gles ? "gles" : "core"); +- return -1; +- } +- +- b = eglChooseConfig(qemu_egl_display, +- gles ? conf_att_gles : conf_att_core, +- &qemu_egl_config, 1, &n); +- if (b == EGL_FALSE || n != 1) { +- error_report("egl: eglChooseConfig failed (%s mode)", +- gles ? "gles" : "core"); +- return -1; +- } +- +- qemu_egl_mode = gles ? DISPLAYGL_MODE_ES : DISPLAYGL_MODE_CORE; +- return 0; ++ return qemu_egl_init_dpy(dpy, mode); + } + + int qemu_egl_init_dpy_x11(EGLNativeDisplayType dpy, DisplayGLMode mode) + { + #ifdef EGL_KHR_platform_x11 +- return qemu_egl_init_dpy(dpy, EGL_PLATFORM_X11_KHR, mode); ++ return qemu_egl_init_dpy_platform(dpy, EGL_PLATFORM_X11_KHR, mode); + #else +- return qemu_egl_init_dpy(dpy, 0, mode); ++ return qemu_egl_init_dpy_platform(dpy, 0, mode); + #endif + } + + int qemu_egl_init_dpy_mesa(EGLNativeDisplayType dpy, DisplayGLMode mode) + { + #ifdef EGL_MESA_platform_gbm +- return qemu_egl_init_dpy(dpy, EGL_PLATFORM_GBM_MESA, mode); ++ return qemu_egl_init_dpy_platform(dpy, EGL_PLATFORM_GBM_MESA, mode); + #else +- return qemu_egl_init_dpy(dpy, 0, mode); ++ return qemu_egl_init_dpy_platform(dpy, 0, mode); + #endif + } + +diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c +index 2a2e6d3a17..43be2a3610 100644 +--- a/ui/gtk-egl.c ++++ b/ui/gtk-egl.c +@@ -53,7 +53,7 @@ void gd_egl_init(VirtualConsole *vc) + } + + vc->gfx.ectx = qemu_egl_init_ctx(); +- vc->gfx.esurface = qemu_egl_init_surface_x11 ++ vc->gfx.esurface = qemu_egl_init_surface + (vc->gfx.ectx, (EGLNativeWindowType)x11_window); + + assert(vc->gfx.esurface); +@@ -116,8 +116,8 @@ void gd_egl_refresh(DisplayChangeListener *dcl) + { + VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + +- vc->gfx.dcl.update_interval = gd_monitor_update_interval( +- vc->window ? vc->window : vc->gfx.drawing_area); ++ gd_update_monitor_refresh_rate( ++ vc, vc->window ? vc->window : vc->gfx.drawing_area); + + if (!vc->gfx.esurface) { + gd_egl_init(vc); +@@ -164,33 +164,37 @@ void gd_egl_switch(DisplayChangeListener *dcl, + } + } + +-QEMUGLContext gd_egl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params) ++QEMUGLContext gd_egl_create_context(void *dg, QEMUGLParams *params) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); +- return qemu_egl_create_context(dcl, params); ++ return qemu_egl_create_context(dg, params); + } + +-void gd_egl_scanout_disable(DisplayChangeListener *dcl) ++bool gd_egl_scanout_get_enabled(void *dg) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ return ((VirtualConsole *)dg)->gfx.scanout_mode; ++} ++ ++void gd_egl_scanout_disable(void *dg) ++{ ++ VirtualConsole *vc = dg; + + vc->gfx.w = 0; + vc->gfx.h = 0; + gtk_egl_set_scanout_mode(vc, false); + } + +-void gd_egl_scanout_texture(DisplayChangeListener *dcl, +- uint32_t backing_id, bool backing_y_0_top, +- uint32_t backing_width, uint32_t backing_height, +- uint32_t x, uint32_t y, +- uint32_t w, uint32_t h) ++static void gd_egl_scanout_borrowed_texture(VirtualConsole *vc, ++ uint32_t backing_id, ++ bool backing_y_0_top, ++ uint32_t backing_width, ++ uint32_t backing_height, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); +- + vc->gfx.x = x; + vc->gfx.y = y; + vc->gfx.w = w; +@@ -205,8 +209,25 @@ void gd_egl_scanout_texture(DisplayChangeListener *dcl, + backing_id, false); + } + +-void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++void gd_egl_scanout_texture(void *dg, uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) ++{ ++ bool backing_y_0_top; ++ uint32_t backing_width; ++ uint32_t backing_height; ++ ++ GLuint backing_texture = backing_borrow(backing_id, &backing_y_0_top, ++ &backing_width, &backing_height); ++ if (backing_texture) { ++ gd_egl_scanout_borrowed_texture(dg, backing_texture, backing_y_0_top, ++ backing_width, backing_height, ++ x, y, w, h); ++ } ++} ++ ++void gd_egl_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { + #ifdef CONFIG_GBM + egl_dmabuf_import_texture(dmabuf); +@@ -214,18 +235,18 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, + return; + } + +- gd_egl_scanout_texture(dcl, dmabuf->texture, +- false, dmabuf->width, dmabuf->height, +- 0, 0, dmabuf->width, dmabuf->height); ++ gd_egl_scanout_borrowed_texture(dg, dmabuf->texture, ++ false, dmabuf->width, dmabuf->height, ++ 0, 0, dmabuf->width, dmabuf->height); + #endif + } + +-void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl, ++void gd_egl_cursor_dmabuf(void *dg, + QemuDmaBuf *dmabuf, bool have_hot, + uint32_t hot_x, uint32_t hot_y) + { + #ifdef CONFIG_GBM +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + if (dmabuf) { + egl_dmabuf_import_texture(dmabuf); +@@ -240,17 +261,15 @@ void gd_egl_cursor_dmabuf(DisplayChangeListener *dcl, + #endif + } + +-void gd_egl_cursor_position(DisplayChangeListener *dcl, +- uint32_t pos_x, uint32_t pos_y) ++void gd_egl_cursor_position(void *dg, uint32_t pos_x, uint32_t pos_y) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + vc->gfx.cursor_x = pos_x * vc->gfx.scale_x; + vc->gfx.cursor_y = pos_y * vc->gfx.scale_y; + } + +-void gd_egl_release_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++void gd_egl_release_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { + #ifdef CONFIG_GBM + egl_dmabuf_release_texture(dmabuf); +@@ -304,10 +323,9 @@ void gtk_egl_init(DisplayGLMode mode) + display_opengl = 1; + } + +-int gd_egl_make_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx) ++int gd_egl_make_current(void *dg, QEMUGLContext ctx) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + return eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, ctx); +diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c +index dd5783fec7..e5b198ac53 100644 +--- a/ui/gtk-gl-area.c ++++ b/ui/gtk-gl-area.c +@@ -139,10 +139,9 @@ void gd_gl_area_switch(DisplayChangeListener *dcl, + } + } + +-QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params) ++QEMUGLContext gd_gl_area_create_context(void *dg, QEMUGLParams *params) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + GdkWindow *window; + GdkGLContext *ctx; + GError *err = NULL; +@@ -168,21 +167,24 @@ QEMUGLContext gd_gl_area_create_context(DisplayChangeListener *dcl, + return ctx; + } + +-void gd_gl_area_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) ++void gd_gl_area_destroy_context(void *dg, QEMUGLContext ctx) + { + /* FIXME */ + } + +-void gd_gl_area_scanout_texture(DisplayChangeListener *dcl, +- uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, +- uint32_t x, uint32_t y, +- uint32_t w, uint32_t h) ++bool gd_gl_area_scanout_get_enabled(void *dg) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ return ((VirtualConsole *)dg)->gfx.scanout_mode; ++} + ++static void gd_gl_area_scanout_borrowed_texture(VirtualConsole *vc, ++ uint32_t backing_id, ++ bool backing_y_0_top, ++ uint32_t backing_width, ++ uint32_t backing_height, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) ++{ + vc->gfx.x = x; + vc->gfx.y = y; + vc->gfx.w = w; +@@ -201,11 +203,29 @@ void gd_gl_area_scanout_texture(DisplayChangeListener *dcl, + backing_id, false); + } + +-void gd_gl_area_scanout_disable(DisplayChangeListener *dcl) ++void gd_gl_area_scanout_texture(void *dg, ++ uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, ++ uint32_t x, uint32_t y, ++ uint32_t w, uint32_t h) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ bool backing_y_0_top; ++ uint32_t backing_width; ++ uint32_t backing_height; ++ ++ GLuint backing_texture = backing_borrow(backing_id, &backing_y_0_top, ++ &backing_width, &backing_height); ++ if (backing_texture) { ++ gd_gl_area_scanout_borrowed_texture(dg, backing_texture, ++ backing_y_0_top, ++ backing_width, backing_height, ++ x, y, w, h); ++ } ++} + +- gtk_gl_area_set_scanout_mode(vc, false); ++void gd_gl_area_scanout_disable(void *dg) ++{ ++ gtk_gl_area_set_scanout_mode(dg, false); + } + + void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, +@@ -216,11 +236,10 @@ void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, + gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area)); + } + +-void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++void gd_gl_area_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { + #ifdef CONFIG_GBM +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area)); + egl_dmabuf_import_texture(dmabuf); +@@ -228,9 +247,9 @@ void gd_gl_area_scanout_dmabuf(DisplayChangeListener *dcl, + return; + } + +- gd_gl_area_scanout_texture(dcl, dmabuf->texture, +- false, dmabuf->width, dmabuf->height, +- 0, 0, dmabuf->width, dmabuf->height); ++ gd_gl_area_scanout_borrowed_texture(dg, dmabuf->texture, ++ false, dmabuf->width, dmabuf->height, ++ 0, 0, dmabuf->width, dmabuf->height); + #endif + } + +@@ -239,8 +258,7 @@ void gtk_gl_area_init(void) + display_opengl = 1; + } + +-int gd_gl_area_make_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx) ++int gd_gl_area_make_current(void *dg, QEMUGLContext ctx) + { + gdk_gl_context_make_current(ctx); + return 0; +diff --git a/ui/gtk.c b/ui/gtk.c +index 376b4d528d..bb246dd74d 100644 +--- a/ui/gtk.c ++++ b/ui/gtk.c +@@ -341,7 +341,7 @@ static void gd_update_full_redraw(VirtualConsole *vc) + int ww, wh; + ww = gdk_window_get_width(gtk_widget_get_window(area)); + wh = gdk_window_get_height(gtk_widget_get_window(area)); +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + if (vc->gfx.gls && gtk_use_gl_area) { + gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area)); + return; +@@ -560,11 +560,11 @@ static const DisplayChangeListenerOps dcl_ops = { + }; + + +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + +-static bool gd_has_dmabuf(DisplayChangeListener *dcl) ++static bool gd_has_dmabuf(void *dg) + { +- VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); ++ VirtualConsole *vc = dg; + + if (gtk_use_gl_area && !gtk_widget_get_realized(vc->gfx.drawing_area)) { + /* FIXME: Assume it will work, actual check done after realize */ +@@ -577,6 +577,17 @@ static bool gd_has_dmabuf(DisplayChangeListener *dcl) + + /** DisplayState Callbacks (opengl version) **/ + ++static const DisplayGLOps dg_gl_area_ops = { ++ .dpy_gl_ctx_create = gd_gl_area_create_context, ++ .dpy_gl_ctx_destroy = gd_gl_area_destroy_context, ++ .dpy_gl_ctx_make_current = gd_gl_area_make_current, ++ .dpy_gl_scanout_get_enabled = gd_gl_area_scanout_get_enabled, ++ .dpy_gl_scanout_texture = gd_gl_area_scanout_texture, ++ .dpy_gl_scanout_disable = gd_gl_area_scanout_disable, ++ .dpy_gl_scanout_dmabuf = gd_gl_area_scanout_dmabuf, ++ .dpy_has_dmabuf = gd_has_dmabuf, ++}; ++ + static const DisplayChangeListenerOps dcl_gl_area_ops = { + .dpy_name = "gtk-egl", + .dpy_gfx_update = gd_gl_area_update, +@@ -586,14 +597,7 @@ static const DisplayChangeListenerOps dcl_gl_area_ops = { + .dpy_mouse_set = gd_mouse_set, + .dpy_cursor_define = gd_cursor_define, + +- .dpy_gl_ctx_create = gd_gl_area_create_context, +- .dpy_gl_ctx_destroy = gd_gl_area_destroy_context, +- .dpy_gl_ctx_make_current = gd_gl_area_make_current, +- .dpy_gl_scanout_texture = gd_gl_area_scanout_texture, +- .dpy_gl_scanout_disable = gd_gl_area_scanout_disable, +- .dpy_gl_update = gd_gl_area_scanout_flush, +- .dpy_gl_scanout_dmabuf = gd_gl_area_scanout_dmabuf, +- .dpy_has_dmabuf = gd_has_dmabuf, ++ .dpy_gl_update = gd_gl_area_scanout_flush, + }; + + #ifdef CONFIG_X11 +@@ -607,6 +611,10 @@ static const DisplayChangeListenerOps dcl_egl_ops = { + .dpy_mouse_set = gd_mouse_set, + .dpy_cursor_define = gd_cursor_define, + ++ .dpy_gl_update = gd_egl_scanout_flush, ++}; ++ ++static const DisplayGLOps dg_egl_ops = { + .dpy_gl_ctx_create = gd_egl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = gd_egl_make_current, +@@ -616,13 +624,12 @@ static const DisplayChangeListenerOps dcl_egl_ops = { + .dpy_gl_cursor_dmabuf = gd_egl_cursor_dmabuf, + .dpy_gl_cursor_position = gd_egl_cursor_position, + .dpy_gl_release_dmabuf = gd_egl_release_dmabuf, +- .dpy_gl_update = gd_egl_scanout_flush, + .dpy_has_dmabuf = gd_has_dmabuf, + }; + + #endif + +-#endif /* CONFIG_OPENGL */ ++#endif /* defined(CONFIG_OPENGL) && defined(CONFIG_EGL) */ + + /** QEMU Events **/ + +@@ -669,17 +676,26 @@ static gboolean gd_window_close(GtkWidget *widget, GdkEvent *event, + return TRUE; + } + +-static void gd_set_ui_info(VirtualConsole *vc, gint width, gint height) ++static void gd_set_ui_refresh_rate(VirtualConsole *vc, int refresh_rate) ++{ ++ QemuUIInfo info; ++ ++ info = *dpy_get_ui_info(vc->gfx.dcl.con); ++ info.refresh_rate = refresh_rate; ++ dpy_set_ui_info(vc->gfx.dcl.con, &info); ++} ++ ++static void gd_set_ui_size(VirtualConsole *vc, gint width, gint height) + { + QemuUIInfo info; + +- memset(&info, 0, sizeof(info)); ++ info = *dpy_get_ui_info(vc->gfx.dcl.con); + info.width = width; + info.height = height; + dpy_set_ui_info(vc->gfx.dcl.con, &info); + } + +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + + static gboolean gd_render_event(GtkGLArea *area, GdkGLContext *context, + void *opaque) +@@ -697,33 +713,32 @@ static void gd_resize_event(GtkGLArea *area, + { + VirtualConsole *vc = (void *)opaque; + +- gd_set_ui_info(vc, width, height); ++ gd_set_ui_size(vc, width, height); + } + + #endif + +-/* +- * If available, return the update interval of the monitor in ms, +- * else return 0 (the default update interval). +- */ +-int gd_monitor_update_interval(GtkWidget *widget) ++void gd_update_monitor_refresh_rate(VirtualConsole *vc, GtkWidget *widget) + { + #ifdef GDK_VERSION_3_22 + GdkWindow *win = gtk_widget_get_window(widget); ++ int refresh_rate; + + if (win) { + GdkDisplay *dpy = gtk_widget_get_display(widget); + GdkMonitor *monitor = gdk_display_get_monitor_at_window(dpy, win); +- int refresh_rate = gdk_monitor_get_refresh_rate(monitor); /* [mHz] */ +- +- if (refresh_rate) { +- /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ +- return MIN(1000 * 1000 / refresh_rate, +- GUI_REFRESH_INTERVAL_DEFAULT); +- } ++ refresh_rate = gdk_monitor_get_refresh_rate(monitor); /* [mHz] */ ++ } else { ++ refresh_rate = 0; + } ++ ++ gd_set_ui_refresh_rate(vc, refresh_rate); ++ ++ /* T = 1 / f = 1 [s*Hz] / f = 1000*1000 [ms*mHz] / f */ ++ vc->gfx.dcl.update_interval = refresh_rate ? ++ MIN(1000 * 1000 / refresh_rate, GUI_REFRESH_INTERVAL_DEFAULT) : ++ GUI_REFRESH_INTERVAL_DEFAULT; + #endif +- return 0; + } + + static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) +@@ -734,7 +749,7 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) + int ww, wh; + int fbw, fbh; + +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + if (vc->gfx.gls) { + if (gtk_use_gl_area) { + /* invoke render callback please */ +@@ -757,8 +772,7 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) + return FALSE; + } + +- vc->gfx.dcl.update_interval = +- gd_monitor_update_interval(vc->window ? vc->window : s->window); ++ gd_update_monitor_refresh_rate(vc, vc->window ? vc->window : s->window); + + fbw = surface_width(vc->gfx.ds); + fbh = surface_height(vc->gfx.ds); +@@ -1602,7 +1616,7 @@ static gboolean gd_configure(GtkWidget *widget, + { + VirtualConsole *vc = opaque; + +- gd_set_ui_info(vc, cfg->width, cfg->height); ++ gd_set_ui_size(vc, cfg->width, cfg->height); + return FALSE; + } + +@@ -1833,7 +1847,7 @@ static void gd_connect_vc_gfx_signals(VirtualConsole *vc) + { + g_signal_connect(vc->gfx.drawing_area, "draw", + G_CALLBACK(gd_draw_event), vc); +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + if (gtk_use_gl_area) { + /* wire up GtkGlArea events */ + g_signal_connect(vc->gfx.drawing_area, "render", +@@ -1947,7 +1961,7 @@ static GtkWidget *gd_create_menu_machine(GtkDisplayState *s) + return machine_menu; + } + +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + static void gl_area_realize(GtkGLArea *area, VirtualConsole *vc) + { + gtk_gl_area_make_current(area); +@@ -1970,7 +1984,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, + vc->gfx.scale_x = 1.0; + vc->gfx.scale_y = 1.0; + +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + if (display_opengl) { + if (gtk_use_gl_area) { + vc->gfx.drawing_area = gtk_gl_area_new(); +@@ -2025,6 +2039,7 @@ static GSList *gd_vc_gfx_init(GtkDisplayState *s, VirtualConsole *vc, + vc->gfx.kbd = qkbd_state_init(con); + vc->gfx.dcl.con = con; + ++ console_set_displayglcontext(con, vc); + register_displaychangelistener(&vc->gfx.dcl); + + gd_connect_vc_gfx_signals(vc); +@@ -2114,6 +2129,18 @@ static GtkWidget *gd_create_menu_view(GtkDisplayState *s) + separator = gtk_separator_menu_item_new(); + gtk_menu_shell_append(GTK_MENU_SHELL(view_menu), separator); + ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) ++ if (display_opengl) { ++ if (gtk_use_gl_area) { ++ register_displayglops(&dg_gl_area_ops); ++ } else { ++#ifdef CONFIG_X11 ++ register_displayglops(&dg_egl_ops); ++#endif ++ } ++ } ++#endif ++ + /* gfx */ + for (vc = 0;; vc++) { + con = qemu_console_lookup_by_index(vc); +@@ -2298,7 +2325,7 @@ static void early_gtk_display_init(DisplayOptions *opts) + + assert(opts->type == DISPLAY_TYPE_GTK); + if (opts->has_gl && opts->gl != DISPLAYGL_MODE_OFF) { +-#if defined(CONFIG_OPENGL) ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + #if defined(GDK_WINDOWING_WAYLAND) + if (GDK_IS_WAYLAND_DISPLAY(gdk_display_get_default())) { + gtk_use_gl_area = true; +@@ -2334,6 +2361,6 @@ static void register_gtk(void) + + type_init(register_gtk); + +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + module_dep("ui-opengl"); + #endif +diff --git a/ui/meson.build b/ui/meson.build +index a3a187d633..0879481141 100644 +--- a/ui/meson.build ++++ b/ui/meson.build +@@ -18,7 +18,9 @@ softmmu_ss.add([spice_headers, files('spice-module.c')]) + softmmu_ss.add(when: spice_protocol, if_true: files('vdagent.c')) + + softmmu_ss.add(when: 'CONFIG_LINUX', if_true: files('input-linux.c')) +-softmmu_ss.add(when: cocoa, if_true: files('cocoa.m')) ++softmmu_ss.add(when: cocoa, ++ if_true: files('cocoa/main.m', 'cocoa/pasteboard_type_owner.m', ++ 'cocoa/view.m', 'cocoa/app_controller.m')) + + vnc_ss = ss.source_set() + vnc_ss.add(files( +@@ -51,13 +53,15 @@ if config_host.has_key('CONFIG_OPENGL') + opengl_ss = ss.source_set() + opengl_ss.add(gbm) + opengl_ss.add(when: [opengl, pixman, 'CONFIG_OPENGL'], +- if_true: files('shader.c', 'console-gl.c', 'egl-helpers.c', 'egl-context.c')) ++ if_true: files('shader.c', 'console-gl.c')) ++ opengl_ss.add(when: [opengl, pixman, 'CONFIG_OPENGL', 'CONFIG_EGL'], ++ if_true: files('egl-helpers.c', 'egl-context.c')) + ui_modules += {'opengl' : opengl_ss} + endif + + if config_host.has_key('CONFIG_OPENGL') and gbm.found() + egl_headless_ss = ss.source_set() +- egl_headless_ss.add(when: [opengl, gbm, pixman, 'CONFIG_OPENGL'], ++ egl_headless_ss.add(when: [opengl, gbm, pixman, 'CONFIG_OPENGL', 'CONFIG_EGL'], + if_true: files('egl-headless.c')) + ui_modules += {'egl-headless' : egl_headless_ss} + endif +@@ -68,8 +72,10 @@ if gtk.found() + gtk_ss = ss.source_set() + gtk_ss.add(gtk, vte, pixman, files('gtk.c', 'gtk-clipboard.c')) + gtk_ss.add(when: x11, if_true: files('x_keymap.c')) +- gtk_ss.add(when: [opengl, 'CONFIG_OPENGL'], if_true: files('gtk-gl-area.c')) +- gtk_ss.add(when: [x11, opengl, 'CONFIG_OPENGL'], if_true: files('gtk-egl.c')) ++ gtk_ss.add(when: [opengl, 'CONFIG_OPENGL', 'CONFIG_EGL'], ++ if_true: files('gtk-gl-area.c')) ++ gtk_ss.add(when: [x11, opengl, 'CONFIG_OPENGL', 'CONFIG_EGL'], ++ if_true: files('gtk-egl.c')) + ui_modules += {'gtk' : gtk_ss} + endif + +@@ -82,7 +88,7 @@ if sdl.found() + 'sdl2-input.c', + 'sdl2.c', + )) +- sdl_ss.add(when: [opengl, 'CONFIG_OPENGL'], if_true: files('sdl2-gl.c')) ++ sdl_ss.add(when: [opengl, 'CONFIG_OPENGL', 'CONFIG_EGL'], if_true: files('sdl2-gl.c')) + sdl_ss.add(when: x11, if_true: files('x_keymap.c')) + ui_modules += {'sdl' : sdl_ss} + endif +diff --git a/ui/sdl2-gl.c b/ui/sdl2-gl.c +index a21d2deed9..3e32a1d184 100644 +--- a/ui/sdl2-gl.c ++++ b/ui/sdl2-gl.c +@@ -133,10 +133,9 @@ void sdl2_gl_redraw(struct sdl2_console *scon) + } + } + +-QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, +- QEMUGLParams *params) ++QEMUGLContext sdl2_gl_create_context(void *dg, QEMUGLParams *params) + { +- struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); ++ struct sdl2_console *scon = dg; + SDL_GLContext ctx; + + assert(scon->opengl); +@@ -168,17 +167,16 @@ QEMUGLContext sdl2_gl_create_context(DisplayChangeListener *dcl, + return (QEMUGLContext)ctx; + } + +-void sdl2_gl_destroy_context(DisplayChangeListener *dcl, QEMUGLContext ctx) ++void sdl2_gl_destroy_context(void *dg, QEMUGLContext ctx) + { + SDL_GLContext sdlctx = (SDL_GLContext)ctx; + + SDL_GL_DeleteContext(sdlctx); + } + +-int sdl2_gl_make_context_current(DisplayChangeListener *dcl, +- QEMUGLContext ctx) ++int sdl2_gl_make_context_current(void *dg, QEMUGLContext ctx) + { +- struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); ++ struct sdl2_console *scon = dg; + SDL_GLContext sdlctx = (SDL_GLContext)ctx; + + assert(scon->opengl); +@@ -186,9 +184,9 @@ int sdl2_gl_make_context_current(DisplayChangeListener *dcl, + return SDL_GL_MakeCurrent(scon->real_window, sdlctx); + } + +-void sdl2_gl_scanout_disable(DisplayChangeListener *dcl) ++void sdl2_gl_scanout_disable(void *dg) + { +- struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); ++ struct sdl2_console *scon = dg; + + assert(scon->opengl); + scon->w = 0; +@@ -196,17 +194,30 @@ void sdl2_gl_scanout_disable(DisplayChangeListener *dcl) + sdl2_set_scanout_mode(scon, false); + } + +-void sdl2_gl_scanout_texture(DisplayChangeListener *dcl, ++bool sdl2_gl_scanout_get_enabled(void *dg) ++{ ++ return ((struct sdl2_console *)dg)->scanout_mode; ++} ++ ++void sdl2_gl_scanout_texture(void *dg, + uint32_t backing_id, +- bool backing_y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) + { +- struct sdl2_console *scon = container_of(dcl, struct sdl2_console, dcl); ++ struct sdl2_console *scon = dg; ++ bool backing_y_0_top; ++ uint32_t backing_width; ++ uint32_t backing_height; + + assert(scon->opengl); ++ ++ GLuint backing_texture = backing_borrow(backing_id, &backing_y_0_top, ++ &backing_width, &backing_height); ++ if (!backing_texture) { ++ return; ++ } ++ + scon->x = x; + scon->y = y; + scon->w = w; +@@ -217,7 +228,7 @@ void sdl2_gl_scanout_texture(DisplayChangeListener *dcl, + + sdl2_set_scanout_mode(scon, true); + egl_fb_setup_for_tex(&scon->guest_fb, backing_width, backing_height, +- backing_id, false); ++ backing_texture, false); + } + + void sdl2_gl_scanout_flush(DisplayChangeListener *dcl, +diff --git a/ui/sdl2.c b/ui/sdl2.c +index 36d9010cb6..5a81e18170 100644 +--- a/ui/sdl2.c ++++ b/ui/sdl2.c +@@ -85,7 +85,7 @@ void sdl2_window_create(struct sdl2_console *scon) + if (scon->hidden) { + flags |= SDL_WINDOW_HIDDEN; + } +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + if (scon->opengl) { + flags |= SDL_WINDOW_OPENGL; + } +@@ -129,7 +129,7 @@ void sdl2_window_resize(struct sdl2_console *scon) + static void sdl2_redraw(struct sdl2_console *scon) + { + if (scon->opengl) { +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + sdl2_gl_redraw(scon); + #endif + } else { +@@ -768,7 +768,16 @@ static const DisplayChangeListenerOps dcl_2d_ops = { + .dpy_cursor_define = sdl_mouse_define, + }; + +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) ++static const DisplayGLOps dg_gl_ops = { ++ .dpy_gl_ctx_create = sdl2_gl_create_context, ++ .dpy_gl_ctx_destroy = sdl2_gl_destroy_context, ++ .dpy_gl_ctx_make_current = sdl2_gl_make_context_current, ++ .dpy_gl_scanout_get_enabled = sdl2_gl_scanout_get_enabled, ++ .dpy_gl_scanout_disable = sdl2_gl_scanout_disable, ++ .dpy_gl_scanout_texture = sdl2_gl_scanout_texture, ++}; ++ + static const DisplayChangeListenerOps dcl_gl_ops = { + .dpy_name = "sdl2-gl", + .dpy_gfx_update = sdl2_gl_update, +@@ -778,11 +787,6 @@ static const DisplayChangeListenerOps dcl_gl_ops = { + .dpy_mouse_set = sdl_mouse_warp, + .dpy_cursor_define = sdl_mouse_define, + +- .dpy_gl_ctx_create = sdl2_gl_create_context, +- .dpy_gl_ctx_destroy = sdl2_gl_destroy_context, +- .dpy_gl_ctx_make_current = sdl2_gl_make_context_current, +- .dpy_gl_scanout_disable = sdl2_gl_scanout_disable, +- .dpy_gl_scanout_texture = sdl2_gl_scanout_texture, + .dpy_gl_update = sdl2_gl_scanout_flush, + }; + #endif +@@ -791,7 +795,7 @@ static void sdl2_display_early_init(DisplayOptions *o) + { + assert(o->type == DISPLAY_TYPE_SDL); + if (o->has_gl && o->gl) { +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + display_opengl = 1; + #endif + } +@@ -834,6 +838,12 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) + + gui_fullscreen = o->has_full_screen && o->full_screen; + ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) ++ if (display_opengl) { ++ register_displayglops(&dg_gl_ops); ++ } ++#endif ++ + for (i = 0;; i++) { + QemuConsole *con = qemu_console_lookup_by_index(i); + if (!con) { +@@ -854,7 +864,7 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) + } + sdl2_console[i].idx = i; + sdl2_console[i].opts = o; +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + sdl2_console[i].opengl = display_opengl; + sdl2_console[i].dcl.ops = display_opengl ? &dcl_gl_ops : &dcl_2d_ops; + #else +@@ -863,6 +873,7 @@ static void sdl2_display_init(DisplayState *ds, DisplayOptions *o) + #endif + sdl2_console[i].dcl.con = con; + sdl2_console[i].kbd = qkbd_state_init(con); ++ console_set_displayglcontext(con, sdl2_console + i); + register_displaychangelistener(&sdl2_console[i].dcl); + + #if defined(SDL_VIDEO_DRIVER_WINDOWS) || defined(SDL_VIDEO_DRIVER_X11) +@@ -919,6 +930,6 @@ static void register_sdl1(void) + + type_init(register_sdl1); + +-#ifdef CONFIG_OPENGL ++#if defined(CONFIG_OPENGL) && defined(CONFIG_EGL) + module_dep("ui-opengl"); + #endif +diff --git a/ui/shader.c b/ui/shader.c +index e8b8d321b7..ae1784e7c4 100644 +--- a/ui/shader.c ++++ b/ui/shader.c +@@ -150,11 +150,19 @@ static GLuint qemu_gl_create_compile_link_program(const GLchar *vert_src, + QemuGLShader *qemu_gl_init_shader(void) + { + QemuGLShader *gls = g_new0(QemuGLShader, 1); +- ++ const char *header = epoxy_is_desktop_gl() ? "#version 140\n" : "#version 300 es\n"; ++ char vert_src[256]; ++ char frag_src[256]; ++ char *vert_src_body = stpcpy(vert_src, header); ++ char *frag_src_body = stpcpy(frag_src, header); ++ ++ strcpy(vert_src_body, texture_blit_vert_src); ++ strcpy(frag_src_body, texture_blit_frag_src); + gls->texture_blit_prog = qemu_gl_create_compile_link_program +- (texture_blit_vert_src, texture_blit_frag_src); ++ (vert_src, frag_src); ++ strcpy(vert_src_body, texture_blit_flip_vert_src); + gls->texture_blit_flip_prog = qemu_gl_create_compile_link_program +- (texture_blit_flip_vert_src, texture_blit_frag_src); ++ (vert_src, frag_src); + if (!gls->texture_blit_prog || !gls->texture_blit_flip_prog) { + exit(1); + } +diff --git a/ui/shader/texture-blit-flip.vert b/ui/shader/texture-blit-flip.vert +index ba081fa5a6..1e4ac4c947 100644 +--- a/ui/shader/texture-blit-flip.vert ++++ b/ui/shader/texture-blit-flip.vert +@@ -1,6 +1,3 @@ +- +-#version 300 es +- + in vec2 in_position; + out vec2 ex_tex_coord; + +diff --git a/ui/shader/texture-blit.frag b/ui/shader/texture-blit.frag +index bfa202c22b..bd296a2ffb 100644 +--- a/ui/shader/texture-blit.frag ++++ b/ui/shader/texture-blit.frag +@@ -1,6 +1,3 @@ +- +-#version 300 es +- + uniform sampler2D image; + in mediump vec2 ex_tex_coord; + out mediump vec4 out_frag_color; +diff --git a/ui/shader/texture-blit.vert b/ui/shader/texture-blit.vert +index 6fe2744d68..ae205f6377 100644 +--- a/ui/shader/texture-blit.vert ++++ b/ui/shader/texture-blit.vert +@@ -1,6 +1,3 @@ +- +-#version 300 es +- + in vec2 in_position; + out vec2 ex_tex_coord; + +diff --git a/ui/spice-app.c b/ui/spice-app.c +index 641f4a9d53..7e71e18da9 100644 +--- a/ui/spice-app.c ++++ b/ui/spice-app.c +@@ -27,6 +27,7 @@ + #include + + #include "ui/console.h" ++#include "ui/spice-display.h" + #include "qemu/config-file.h" + #include "qemu/option.h" + #include "qemu/cutils.h" +@@ -175,7 +176,7 @@ static void spice_app_display_early_init(DisplayOptions *opts) + qemu_opt_set(qopts, "addr", sock_path, &error_abort); + qemu_opt_set(qopts, "image-compression", "off", &error_abort); + qemu_opt_set(qopts, "streaming-video", "off", &error_abort); +-#ifdef CONFIG_OPENGL ++#ifdef HAVE_SPICE_GL + qemu_opt_set(qopts, "gl", opts->has_gl ? "on" : "off", &error_abort); + display_opengl = opts->has_gl; + #endif +diff --git a/ui/spice-core.c b/ui/spice-core.c +index 86d43783ac..0371055e6c 100644 +--- a/ui/spice-core.c ++++ b/ui/spice-core.c +@@ -1039,6 +1039,6 @@ static void spice_register_config(void) + opts_init(spice_register_config); + module_opts("spice"); + +-#ifdef CONFIG_OPENGL ++#ifdef HAVE_SPICE_GL + module_dep("ui-opengl"); + #endif +diff --git a/ui/spice-display.c b/ui/spice-display.c +index f59c69882d..94efe07c90 100644 +--- a/ui/spice-display.c ++++ b/ui/spice-display.c +@@ -909,17 +909,17 @@ static void spice_gl_switch(DisplayChangeListener *dcl, + } + } + +-static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener *dcl, ++static QEMUGLContext qemu_spice_gl_create_context(void *dg, + QEMUGLParams *params) + { + eglMakeCurrent(qemu_egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE, + qemu_egl_rn_ctx); +- return qemu_egl_create_context(dcl, params); ++ return qemu_egl_create_context(dg, params); + } + +-static void qemu_spice_gl_scanout_disable(DisplayChangeListener *dcl) ++static void qemu_spice_gl_scanout_disable(void *dg) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + + trace_qemu_spice_gl_scanout_disable(ssd->qxl.id); + spice_qxl_gl_scanout(&ssd->qxl, -1, 0, 0, 0, 0, false); +@@ -928,19 +928,25 @@ static void qemu_spice_gl_scanout_disable(DisplayChangeListener *dcl) + ssd->have_scanout = false; + } + +-static void qemu_spice_gl_scanout_texture(DisplayChangeListener *dcl, +- uint32_t tex_id, +- bool y_0_top, +- uint32_t backing_width, +- uint32_t backing_height, ++static void qemu_spice_gl_scanout_texture(void *dg, ++ uint32_t backing_id, ++ DisplayGLTextureBorrower backing_borrow, + uint32_t x, uint32_t y, + uint32_t w, uint32_t h) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + EGLint stride = 0, fourcc = 0; + int fd = -1; ++ bool y_0_top; ++ uint32_t backing_width; ++ uint32_t backing_height; ++ ++ GLuint tex_id = backing_borrow(backing_id, &y_0_top, ++ &backing_width, &backing_height); ++ if (!tex_id) { ++ return; ++ } + +- assert(tex_id); + fd = egl_get_fd_for_texture(tex_id, &stride, &fourcc, NULL); + if (fd < 0) { + fprintf(stderr, "%s: failed to get fd for texture\n", __func__); +@@ -956,10 +962,9 @@ static void qemu_spice_gl_scanout_texture(DisplayChangeListener *dcl, + ssd->have_scanout = true; + } + +-static void qemu_spice_gl_scanout_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++static void qemu_spice_gl_scanout_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + + ssd->guest_dmabuf = dmabuf; + ssd->guest_dmabuf_refresh = true; +@@ -968,11 +973,11 @@ static void qemu_spice_gl_scanout_dmabuf(DisplayChangeListener *dcl, + ssd->have_scanout = true; + } + +-static void qemu_spice_gl_cursor_dmabuf(DisplayChangeListener *dcl, ++static void qemu_spice_gl_cursor_dmabuf(void *dg, + QemuDmaBuf *dmabuf, bool have_hot, + uint32_t hot_x, uint32_t hot_y) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + + ssd->have_hot = have_hot; + ssd->hot_x = hot_x; +@@ -991,10 +996,10 @@ static void qemu_spice_gl_cursor_dmabuf(DisplayChangeListener *dcl, + } + } + +-static void qemu_spice_gl_cursor_position(DisplayChangeListener *dcl, ++static void qemu_spice_gl_cursor_position(void *dg, + uint32_t pos_x, uint32_t pos_y) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + + qemu_mutex_lock(&ssd->lock); + ssd->ptr_x = pos_x; +@@ -1002,10 +1007,9 @@ static void qemu_spice_gl_cursor_position(DisplayChangeListener *dcl, + qemu_mutex_unlock(&ssd->lock); + } + +-static void qemu_spice_gl_release_dmabuf(DisplayChangeListener *dcl, +- QemuDmaBuf *dmabuf) ++static void qemu_spice_gl_release_dmabuf(void *dg, QemuDmaBuf *dmabuf) + { +- SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); ++ SimpleSpiceDisplay *ssd = dg; + + if (ssd->guest_dmabuf == dmabuf) { + ssd->guest_dmabuf = NULL; +@@ -1105,7 +1109,10 @@ static const DisplayChangeListenerOps display_listener_gl_ops = { + .dpy_refresh = spice_gl_refresh, + .dpy_mouse_set = display_mouse_set, + .dpy_cursor_define = display_mouse_define, ++ .dpy_gl_update = qemu_spice_gl_update, ++}; + ++static const DisplayGLOps display_gl_ops = { + .dpy_gl_ctx_create = qemu_spice_gl_create_context, + .dpy_gl_ctx_destroy = qemu_egl_destroy_context, + .dpy_gl_ctx_make_current = qemu_egl_make_context_current, +@@ -1116,7 +1123,6 @@ static const DisplayChangeListenerOps display_listener_gl_ops = { + .dpy_gl_cursor_dmabuf = qemu_spice_gl_cursor_dmabuf, + .dpy_gl_cursor_position = qemu_spice_gl_cursor_position, + .dpy_gl_release_dmabuf = qemu_spice_gl_release_dmabuf, +- .dpy_gl_update = qemu_spice_gl_update, + }; + + #endif /* HAVE_SPICE_GL */ +@@ -1137,6 +1143,8 @@ static void qemu_spice_display_init_one(QemuConsole *con) + ssd->gls = qemu_gl_init_shader(); + ssd->have_surface = false; + ssd->have_scanout = false; ++ console_set_displayglcontext(con, ssd); ++ register_displayglops(&display_gl_ops); + } + #endif + ssd->dcl.con = con;