diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c index 1e7f423462df10ff7d7b09638d6006889e1e68c6..e083f24aa80747498192f19e1071949bc7b015ef 100644 --- a/accel/accel-blocker.c +++ b/accel/accel-blocker.c @@ -41,7 +41,7 @@ void accel_blocker_init(void) void accel_ioctl_begin(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -51,7 +51,7 @@ void accel_ioctl_begin(void) void accel_ioctl_end(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -62,7 +62,7 @@ void accel_ioctl_end(void) void accel_cpu_ioctl_begin(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu) void accel_cpu_ioctl_end(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void) * We allow to inhibit only when holding the BQL, so we can identify * when an inhibitor wants to issue an ioctl easily. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block further invocations of the ioctls outside the BQL. */ CPU_FOREACH(cpu) { diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index b75c919ac358677919598506ce55bacad35601f4..f4b0ec58900cf19264d2b548c1a0db8a6a55d945 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->neg.can_do_io = true; @@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { - qemu_mutex_unlock_iothread(); + bql_unlock(); #ifndef _WIN32 do { int sig; @@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg) #else qemu_sem_wait(&cpu->sem); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_wait_io_event(cpu); } while (!cpu->unplug); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index abe7adf7ee87c89532fdbb78d08748e1140340ae..8eabb696facb8993bc0f146c85ac377bf40f14ec 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg) hvf_vcpu_destroy(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 4c155f028a3bd4279c78abb1f54e608346b3182d..cfdb86de019d8cdbeeb54dbe9e4e18eeaecee4fb 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->neg.can_do_io = true; @@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg) kvm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } @@ -83,7 +83,7 @@ static bool kvm_vcpu_thread_is_idle(CPUState *cpu) static bool kvm_cpus_are_resettable(void) { - return !kvm_enabled() || kvm_cpu_check_are_resettable(); + return !kvm_enabled() || !kvm_state->guest_state_protected; } #ifdef TARGET_KVM_HAVE_GUEST_DEBUG diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index b1217cf92e41e2a901c90cec46e64da5703f6617..ad1b3fc7f85a76bbe8db9ab1f20dd5915b73626e 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -855,7 +855,7 @@ static void kvm_dirty_ring_flush(void) * should always be with BQL held, serialization is guaranteed. * However, let's be sure of it. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * First make sure to flush the hardware buffers by kicking all * vcpus out in a synchronous way. @@ -1450,9 +1450,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data) trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_dirty_ring_reap(s, NULL); - qemu_mutex_unlock_iothread(); + bql_unlock(); r->reaper_iteration++; } @@ -2749,14 +2749,9 @@ void kvm_flush_coalesced_mmio_buffer(void) s->coalesced_flush_in_progress = false; } -bool kvm_cpu_check_are_resettable(void) -{ - return kvm_arch_cpu_check_are_resettable(); -} - static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) { - if (!cpu->vcpu_dirty) { + if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { Error *err = NULL; int ret = kvm_arch_get_registers(cpu, &err); if (ret) { @@ -2776,7 +2771,7 @@ static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) void kvm_cpu_synchronize_state(CPUState *cpu) { - if (!cpu->vcpu_dirty) { + if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL); } } @@ -2823,7 +2818,13 @@ static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) void kvm_cpu_synchronize_post_init(CPUState *cpu) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); + if (!kvm_state->guest_state_protected) { + /* + * This runs before the machine_init_done notifiers, and is the last + * opportunity to synchronize the state of confidential guests. + */ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); + } } static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) @@ -2913,7 +2914,7 @@ int kvm_cpu_exec(CPUState *cpu) return EXCP_HLT; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); do { @@ -2958,11 +2959,11 @@ int kvm_cpu_exec(CPUState *cpu) #ifdef KVM_HAVE_MCE_INJECTION if (unlikely(have_sigbus_pending)) { - qemu_mutex_lock_iothread(); + bql_lock(); kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, pending_sigbus_addr); have_sigbus_pending = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -3032,7 +3033,7 @@ int kvm_cpu_exec(CPUState *cpu) * still full. Got kicked by KVM_RESET_DIRTY_RINGS. */ trace_kvm_dirty_ring_full(cpu->cpu_index); - qemu_mutex_lock_iothread(); + bql_lock(); /* * We throttle vCPU by making it sleep once it exit from kernel * due to dirty ring full. In the dirtylimit scenario, reaping @@ -3044,7 +3045,7 @@ int kvm_cpu_exec(CPUState *cpu) } else { kvm_dirty_ring_reap(kvm_state, NULL); } - qemu_mutex_unlock_iothread(); + bql_unlock(); dirtylimit_vcpu_execute(cpu); ret = 0; break; @@ -3060,9 +3061,9 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_SYSTEM_EVENT_CRASH: kvm_cpu_synchronize_state(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = 0; break; default: @@ -3079,7 +3080,7 @@ int kvm_cpu_exec(CPUState *cpu) } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); if (ret < 0) { cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); @@ -4189,3 +4190,8 @@ void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) query_stats_schema_vcpu(first_cpu, &stats_args); } } + +void kvm_mark_guest_state_protected(void) +{ + kvm_state->guest_state_protected = true; +} diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 6a4af14d325750dfd371a3ed3723d227058faef6..64f26d7df9b06a6008362f2d4ce9d0d8c95eff6e 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu) tcg_ctx->gen_tb = NULL; } #endif - if (qemu_mutex_iothread_locked()) { - qemu_mutex_unlock_iothread(); + if (bql_locked()) { + bql_unlock(); } assert_no_pages_locked(); } @@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu) #if defined(TARGET_I386) if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { X86CPU *x86_cpu = X86_CPU(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif /* TARGET_I386 */ if (!cpu_has_work(cpu)) { @@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) #else if (replay_exception()) { CPUClass *cc = CPU_GET_CLASS(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); cc->tcg_ops->do_interrupt(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu->exception_index = -1; if (unlikely(cpu->singlestep_enabled)) { @@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (unlikely(qatomic_read(&cpu->interrupt_request))) { int interrupt_request; - qemu_mutex_lock_iothread(); + bql_lock(); interrupt_request = cpu->interrupt_request; if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ @@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if !defined(CONFIG_USER_ONLY) @@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if defined(TARGET_I386) @@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); cpu_reset(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #endif /* !TARGET_I386 */ @@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, */ if (unlikely(cpu->singlestep_enabled)) { cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } cpu->exception_index = -1; @@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, } /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Finally, check if we need to exit to the main loop. */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index db3f93fda990c52984f438dad15f5b198b1a4fde..5698a9fd8e2c74363c978367f840dc3ea09eff7e 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, type, ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset); b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); - qemu_mutex_unlock_iothread(); + bql_unlock(); return int128_make128(b, a); } @@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, mmu_idx, ra, mr, mr_offset); ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, size - 8, mmu_idx, ra, mr, mr_offset + 8); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index b25685fb712ecdbd7379d026c7bf83c0818be50e..5824d92580fa79073139cdb824855c8c5a5f81ee 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -126,9 +126,9 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) * We're called without the iothread lock, so must take it while * we're calling timer handlers. */ - qemu_mutex_lock_iothread(); + bql_lock(); icount_notify_aio_contexts(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index fac80095bbd09944870a29312de0a906c63ca3c0..af7307013a54583b04de820d64a661c2a42a7bbf 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu.notifier); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg) do { if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); + bql_unlock(); r = tcg_cpus_exec(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); switch (r) { case EXCP_DEBUG: cpu_handle_guest_debug(cpu); @@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg) */ break; case EXCP_ATOMIC: - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); default: /* Ignore everything else? */ break; @@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg) } while (!cpu->unplug || cpu_can_run(cpu)); tcg_cpus_destroy(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_remove_force_rcu_notifier(&force_rcu.notifier); rcu_unregister_thread(); return NULL; diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 611932f3c3a8c953a3ff68612139696fed0e92e1..c4ea372a3f6422db58bc0e0aede24bcdbcf3757c 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg) /* Only used for icount_enabled() */ int64_t cpu_budget = 0; - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (icount_enabled()) { int cpu_count = rr_cpu_count(); @@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg) if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); + bql_unlock(); if (icount_enabled()) { icount_prepare_for_run(cpu, cpu_budget); } @@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg) if (icount_enabled()) { icount_process_data(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } else if (r == EXCP_ATOMIC) { - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); break; } } else if (cpu->stop) { diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 1b5729068202e461dfd430828a267ecf77bed600..813065c0ecb006aea50d4bc38d1bd48f7a355d4e 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index c1708afcb02fc768cde49b1fa41578384dea8fe1..55397efdd8cb587de5e5b186efeaecf5a58e551b 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -647,7 +647,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) void cpu_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } diff --git a/audio/coreaudio.m b/audio/coreaudio.m index 8cd129a27d02fa645b2d5231ee97425062e0f1bd..9d2db9883cb3953f672eacf6ad1391dfe6d65de4 100644 --- a/audio/coreaudio.m +++ b/audio/coreaudio.m @@ -547,7 +547,7 @@ static OSStatus handle_voice_change( { coreaudioVoiceOut *core = in_client_data; - qemu_mutex_lock_iothread(); + bql_lock(); if (core->outputDeviceID) { fini_out_device(core); @@ -557,7 +557,7 @@ static OSStatus handle_voice_change( update_device_playback_state(core); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c index 4e162d6789e86dc0de4ff3086b6e383fea059be4..735e2e1cf84670c891ba4571f09ec44256e64f82 100644 --- a/backends/hostmem-epc.c +++ b/backends/hostmem-epc.c @@ -17,31 +17,29 @@ #include "sysemu/hostmem.h" #include "hw/i386/hostmem-epc.h" -static void +static bool sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); if (fd < 0) { error_setg_errno(errp, errno, "failed to open /dev/sgx_vepc to alloc SGX EPC"); - return; + return false; } name = object_get_canonical_path(OBJECT(backend)); ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), - name, backend->size, ram_flags, - fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static void sgx_epc_backend_instance_init(Object *obj) diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 361d4a8103ef82cba0994b24bc989a65798741ef..ac3e433cbddbca395472f929fc6da8228316ef0e 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -36,24 +36,25 @@ struct HostMemoryBackendFile { OnOffAuto rom; }; -static void +static bool file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { #ifndef CONFIG_POSIX error_setg(errp, "backend '%s' not supported on this host", object_get_typename(OBJECT(backend))); + return false; #else HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend); + g_autofree gchar *name = NULL; uint32_t ram_flags; - gchar *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } if (!fb->mem_path) { error_setg(errp, "mem-path property not set"); - return; + return false; } switch (fb->rom) { @@ -65,18 +66,18 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) if (!fb->readonly) { error_setg(errp, "property 'rom' = 'on' is not supported with" " 'readonly' = 'off'"); - return; + return false; } break; case ON_OFF_AUTO_OFF: if (fb->readonly && backend->share) { error_setg(errp, "property 'rom' = 'off' is incompatible with" " 'readonly' = 'on' and 'share' = 'on'"); - return; + return false; } break; default: - assert(false); + g_assert_not_reached(); } name = host_memory_backend_get_name(backend); @@ -86,10 +87,9 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; ram_flags |= fb->is_pmem ? RAM_PMEM : 0; ram_flags |= RAM_NAMED_FILE; - memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, - backend->size, fb->align, ram_flags, - fb->mem_path, fb->offset, errp); - g_free(name); + return memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, + backend->size, fb->align, ram_flags, + fb->mem_path, fb->offset, errp); #endif } diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c index 3fc85c3db81bb71176cdfe65dbf0643e1504355b..3923ea9364d5ee993d758c96c6fc1ebf2fae89ec 100644 --- a/backends/hostmem-memfd.c +++ b/backends/hostmem-memfd.c @@ -31,17 +31,17 @@ struct HostMemoryBackendMemfd { bool seal; }; -static void +static bool memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, @@ -49,15 +49,14 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, errp); if (fd == -1) { - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static bool diff --git a/backends/hostmem-ram.c b/backends/hostmem-ram.c index b8e55cdbd0f89078b01bb1575416173ce909526b..d121249f0f457edccdb24d10b6a79da4e0c026ff 100644 --- a/backends/hostmem-ram.c +++ b/backends/hostmem-ram.c @@ -16,23 +16,23 @@ #include "qemu/module.h" #include "qom/object_interfaces.h" -static void +static bool ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, errp); - g_free(name); + return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), + name, backend->size, + ram_flags, errp); } static void diff --git a/backends/hostmem.c b/backends/hostmem.c index 747e7838c031c42aaab91c159cbaa9b96f7e5013..3f8eb936d70cd31d3ebe8cad2b7d56bf550da175 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -328,83 +328,83 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) void *ptr; uint64_t sz; - if (bc->alloc) { - bc->alloc(backend, &local_err); - if (local_err) { - goto out; - } + if (!bc->alloc) { + return; + } + if (!bc->alloc(backend, errp)) { + return; + } - ptr = memory_region_get_ram_ptr(&backend->mr); - sz = memory_region_size(&backend->mr); + ptr = memory_region_get_ram_ptr(&backend->mr); + sz = memory_region_size(&backend->mr); - if (backend->merge) { - qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); - } - if (!backend->dump) { - qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); - } + if (backend->merge) { + qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); + } + if (!backend->dump) { + qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); + } #ifdef CONFIG_NUMA - unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); - /* lastbit == MAX_NODES means maxnode = 0 */ - unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); - /* ensure policy won't be ignored in case memory is preallocated - * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so - * this doesn't catch hugepage case. */ - unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; - int mode = backend->policy; - - /* check for invalid host-nodes and policies and give more verbose - * error messages than mbind(). */ - if (maxnode && backend->policy == MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be empty for policy default," - " or you should explicitly specify a policy other" - " than default"); - return; - } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be set for policy %s", - HostMemPolicy_str(backend->policy)); - return; - } + unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); + /* lastbit == MAX_NODES means maxnode = 0 */ + unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); + /* ensure policy won't be ignored in case memory is preallocated + * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so + * this doesn't catch hugepage case. */ + unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; + int mode = backend->policy; + + /* check for invalid host-nodes and policies and give more verbose + * error messages than mbind(). */ + if (maxnode && backend->policy == MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be empty for policy default," + " or you should explicitly specify a policy other" + " than default"); + return; + } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be set for policy %s", + HostMemPolicy_str(backend->policy)); + return; + } - /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 - * as argument to mbind() due to an old Linux bug (feature?) which - * cuts off the last specified node. This means backend->host_nodes - * must have MAX_NODES+1 bits available. - */ - assert(sizeof(backend->host_nodes) >= - BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); - assert(maxnode <= MAX_NODES); + /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 + * as argument to mbind() due to an old Linux bug (feature?) which + * cuts off the last specified node. This means backend->host_nodes + * must have MAX_NODES+1 bits available. + */ + assert(sizeof(backend->host_nodes) >= + BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); + assert(maxnode <= MAX_NODES); #ifdef HAVE_NUMA_HAS_PREFERRED_MANY - if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { - /* - * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below - * silently picks the first node. - */ - mode = MPOL_PREFERRED_MANY; - } + if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { + /* + * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below + * silently picks the first node. + */ + mode = MPOL_PREFERRED_MANY; + } #endif - if (maxnode && - mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { - if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { - error_setg_errno(errp, errno, - "cannot bind memory to host NUMA nodes"); - return; - } + if (maxnode && + mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { + if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { + error_setg_errno(errp, errno, + "cannot bind memory to host NUMA nodes"); + return; } + } #endif - /* Preallocate memory after the NUMA policy has been instantiated. - * This is necessary to guarantee memory is allocated with - * specified NUMA policy in place. - */ - if (backend->prealloc) { - qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, - backend->prealloc_threads, - backend->prealloc_context, &local_err); - if (local_err) { - goto out; - } + /* Preallocate memory after the NUMA policy has been instantiated. + * This is necessary to guarantee memory is allocated with + * specified NUMA policy in place. + */ + if (backend->prealloc) { + qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, + backend->prealloc_threads, + backend->prealloc_context, &local_err); + if (local_err) { + goto out; } } out: diff --git a/cpu-common.c b/cpu-common.c index c81fd72d16d5c9e5f6f8af2d10b6f8f20e9cd3a6..ce78273af5971dda7ce4e3b1bcb68b3e41e66f91 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu) * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ - qemu_mutex_unlock_iothread(); + bql_unlock(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); - qemu_mutex_lock_iothread(); + bql_lock(); } else { wi->func(cpu, wi->data); } diff --git a/dump/dump.c b/dump/dump.c index 787059ac2c8118311124ccbce3f1839219827df8..0f9b37718eaeeab8607aa1ecf3f9780a746ae5b6 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -109,11 +109,11 @@ static int dump_cleanup(DumpState *s) s->guest_note = NULL; if (s->resume) { if (s->detached) { - qemu_mutex_lock_iothread(); + bql_lock(); } vm_start(); if (s->detached) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } migrate_del_blocker(&dump_migration_blocker); diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 9b9fe821d5e870f16b49b72445ec11fbab278dff..85aa006cf6f8a481722437c1dc1fdb4e43aa2783 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -3238,10 +3238,24 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_9_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(9, 1) + +static void virt_machine_9_0_options(MachineClass *mc) +{ + virt_machine_9_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len); +} +DEFINE_VIRT_MACHINE(9, 0) + static void virt_machine_8_2_options(MachineClass *mc) { + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(8, 2) +DEFINE_VIRT_MACHINE(8, 2) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 91317cc7ac7aa1c719445a36d45d91b8999e0a3e..7fecea8c2f14bc3be21f05df332fb5732ef7d2cc 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename) * BQL here if we need to. cpu_interrupt assumes it is held.*/ void cpu_reset_interrupt(CPUState *cpu, int mask) { - bool need_lock = !qemu_mutex_iothread_locked(); + bool need_lock = !bql_locked(); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu->interrupt_request &= ~mask; if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/hw/core/machine.c b/hw/core/machine.c index 86887393ca6a4c74b92cadb30aab5b1078ef259f..2dcf8454b8c24d6238614ddabd8d408a58707e6c 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -32,6 +32,12 @@ #include "hw/virtio/virtio-net.h" #include "audio/audio.h" +GlobalProperty hw_compat_8_2[] = {}; +GlobalProperty hw_compat_9_0[] = {}; +const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0); + +const size_t hw_compat_8_2_len = G_N_ELEMENTS(hw_compat_8_2); + GlobalProperty hw_compat_8_1[] = { { TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" }, { "ramfb", "x-migrate", "off" }, diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 5085a6fee3f1d4543b5947c1bc99cbc06982239d..7e310109cf80f03770897eb28148a5174ebc9acc 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) { bool use_iommu, pt; /* Whether we need to take the BQL on our own */ - bool take_bql = !qemu_mutex_iothread_locked(); + bool take_bql = !bql_locked(); assert(as); @@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) * it. We'd better make sure we have had it already, or, take it. */ if (take_bql) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Turn off first then on the other */ @@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) } if (take_bql) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return use_iommu; diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index 02b8cbf8dfffeb925668d476c3782316abbb1848..d7d15cfaf7877e9c730fcabda9efa2e0abba18e0 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level) * effect immediately. That just leaves interdomain loopback as the case * which uses the BH. */ - if (!qemu_mutex_iothread_locked()) { + if (!bql_locked()) { qemu_bh_schedule(s->gsi_bh); return; } @@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param) * We need the BQL because set_callback_pci_intx() may call into PCI code, * and because we may need to manipulate the old and new GSI levels. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); switch (type) { @@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, XenEvtchnPort *p = &s->port_table[port]; /* Because it *might* be a PIRQ port */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); switch (p->type) { case EVTCHNSTAT_closed: @@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void) return -ENOTSUP; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); @@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level) XenEvtchnState *s = xen_evtchn_singleton; int pirq; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) { return false; @@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, return; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(addr, data); @@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route, return 1; /* Not a PIRQ */ } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { @@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) return false; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index 39fda1b72c3f8ed8c807a65787795c494feb20e0..17222946389f93be9d4dac8a54d6049234ef632a 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa) return -ENOENT; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (s->shinfo_gpa) { /* If removing shinfo page, turn the kernel magic off first */ diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 6e651960b3ab5eb0cd1fc59b91496f048d352f5f..ae27889a3f82b60b0e81a664c1c91b09bd83a764 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token) { XenXenstoreState *s = opaque; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * If there's a response pending, we obviously can't scribble over diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index ca55aecc3b4f2472ab6b6010f225121fabc56862..61a772dfe6e579e1151f0cba86c26dc1c73db48d 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -175,7 +175,7 @@ static void microvm_devices_init(MicrovmMachineState *mms) &error_abort); isa_bus_register_input_irqs(isa_bus, x86ms->gsi); - ioapic_init_gsi(gsi_state, "machine"); + ioapic_init_gsi(gsi_state, OBJECT(mms)); if (ioapics > 1) { x86ms->ioapic2 = ioapic_init_secondary(gsi_state); } diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 36338cbf73545b15732c5239a199fb68847ee560..806009367c7becf0ed2227f0f8dc9433e674d1b0 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -78,6 +78,12 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_9_0[] = {}; +const size_t pc_compat_9_0_len = G_N_ELEMENTS(pc_compat_9_0); + +GlobalProperty pc_compat_8_2[] = {}; +const size_t pc_compat_8_2_len = G_N_ELEMENTS(pc_compat_8_2); + GlobalProperty pc_compat_8_1[] = {}; const size_t pc_compat_8_1_len = G_N_ELEMENTS(pc_compat_8_1); @@ -1886,11 +1892,6 @@ static void pc_machine_initfn(Object *obj) cxl_machine_init(obj, &pcms->cxl_devices_state); } -int pc_machine_kvm_type(MachineState *machine, const char *kvm_type) -{ - return 0; -} - static void pc_machine_reset(MachineState *machine, ShutdownCause reason) { CPUState *cs; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index eace8543358a85a0d1f1f75353bb0e601db3b0e0..45b1543de8eda9cb7c501daeaf3669616d79abf4 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -112,6 +112,7 @@ static void pc_init1(MachineState *machine, X86MachineState *x86ms = X86_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); + Object *phb = NULL; PCIBus *pci_bus = NULL; ISABus *isa_bus; Object *piix4_pm = NULL; @@ -195,8 +196,6 @@ static void pc_init1(MachineState *machine, } if (pcmc->pci_enabled) { - Object *phb; - pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); rom_memory = pci_memory; @@ -323,8 +322,8 @@ static void pc_init1(MachineState *machine, pc_i8259_create(isa_bus, gsi_state->i8259_irq); } - if (pcmc->pci_enabled) { - ioapic_init_gsi(gsi_state, "i440fx"); + if (phb) { + ioapic_init_gsi(gsi_state, phb); } if (tcg_enabled()) { @@ -545,13 +544,37 @@ static void pc_i440fx_machine_options(MachineClass *m) "Use a different south bridge than PIIX3"); } -static void pc_i440fx_8_2_machine_options(MachineClass *m) +static void pc_i440fx_9_1_machine_options(MachineClass *m) { pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = true; } +DEFINE_I440FX_MACHINE(v9_1, "pc-i440fx-9.1", NULL, + pc_i440fx_9_1_machine_options); + +static void pc_i440fx_9_0_machine_options(MachineClass *m) +{ + pc_i440fx_9_1_machine_options(m); + m->alias = NULL; + m->is_default = false; + + compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len); + compat_props_add(m->compat_props, pc_compat_9_0, pc_compat_9_0_len); +} + +DEFINE_I440FX_MACHINE(v9_0, "pc-i440fx-9.0", NULL, + pc_i440fx_9_0_machine_options); + +static void pc_i440fx_8_2_machine_options(MachineClass *m) +{ + pc_i440fx_9_0_machine_options(m); + + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); +} + DEFINE_I440FX_MACHINE(v8_2, "pc-i440fx-8.2", NULL, pc_i440fx_8_2_machine_options); @@ -560,8 +583,6 @@ static void pc_i440fx_8_1_machine_options(MachineClass *m) PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_8_2_machine_options(m); - m->alias = NULL; - m->is_default = false; pcmc->broken_32bit_mem_addr_check = true; compat_props_add(m->compat_props, hw_compat_8_1, hw_compat_8_1_len); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 3392b0c110f2b507ea0d46d5035e63b29dc6244c..e1b7dce0f2109bbb2af0f9310621c0d4bd6d5686 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -130,8 +130,7 @@ static void pc_q35_init(MachineState *machine) ISADevice *rtc_state; MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); - MemoryRegion *pci_memory; - MemoryRegion *rom_memory; + MemoryRegion *pci_memory = g_new(MemoryRegion, 1); GSIState *gsi_state; ISABus *isa_bus; int i; @@ -143,6 +142,8 @@ static void pc_q35_init(MachineState *machine) bool keep_pci_slot_hpc; uint64_t pci_hole64_size = 0; + assert(pcmc->pci_enabled); + /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping * also known as MMCFG). @@ -189,16 +190,6 @@ static void pc_q35_init(MachineState *machine) kvmclock_create(pcmc->kvmclock_create_always); } - /* pci enabled */ - if (pcmc->pci_enabled) { - pci_memory = g_new(MemoryRegion, 1); - memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); - rom_memory = pci_memory; - } else { - pci_memory = NULL; - rom_memory = system_memory; - } - pc_guest_info_init(pcms); if (pcmc->smbios_defaults) { @@ -212,14 +203,13 @@ static void pc_q35_init(MachineState *machine) /* create pci host bus */ phb = OBJECT(qdev_new(TYPE_Q35_HOST_DEVICE)); - if (pcmc->pci_enabled) { - pci_hole64_size = object_property_get_uint(phb, - PCI_HOST_PROP_PCI_HOLE64_SIZE, - &error_abort); - } + pci_hole64_size = object_property_get_uint(phb, + PCI_HOST_PROP_PCI_HOLE64_SIZE, + &error_abort); /* allocate ram and load rom/bios */ - pc_memory_init(pcms, system_memory, rom_memory, pci_hole64_size); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + pc_memory_init(pcms, system_memory, pci_memory, pci_hole64_size); object_property_add_child(OBJECT(machine), "q35", phb); object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM, @@ -245,7 +235,7 @@ static void pc_q35_init(MachineState *machine) pcms->bus = host_bus; /* irq lines */ - gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); + gsi_state = pc_gsi_create(&x86ms->gsi, true); /* create ISA bus */ lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), @@ -288,9 +278,7 @@ static void pc_q35_init(MachineState *machine) pc_i8259_create(isa_bus, gsi_state->i8259_irq); } - if (pcmc->pci_enabled) { - ioapic_init_gsi(gsi_state, "q35"); - } + ioapic_init_gsi(gsi_state, OBJECT(phb)); if (tcg_enabled()) { x86_register_ferr_irq(x86ms->gsi[13]); @@ -385,12 +373,33 @@ static void pc_q35_machine_options(MachineClass *m) machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); } -static void pc_q35_8_2_machine_options(MachineClass *m) +static void pc_q35_9_1_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; } +DEFINE_Q35_MACHINE(v9_1, "pc-q35-9.1", NULL, + pc_q35_9_1_machine_options); + +static void pc_q35_9_0_machine_options(MachineClass *m) +{ + pc_q35_9_1_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_9_0, hw_compat_9_0_len); + compat_props_add(m->compat_props, pc_compat_9_0, pc_compat_9_0_len); +} + +DEFINE_Q35_MACHINE(v9_0, "pc-q35-9.0", NULL, + pc_q35_9_0_machine_options); + +static void pc_q35_8_2_machine_options(MachineClass *m) +{ + pc_q35_9_0_machine_options(m); + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); +} + DEFINE_Q35_MACHINE(v8_2, "pc-q35-8.2", NULL, pc_q35_8_2_machine_options); diff --git a/hw/i386/x86-common.c b/hw/i386/x86-common.c index f6f28f84f54bb930e79ba23bf821878483ce9b49..ba8f34f1f6fd7f685b707ba1e5039b5ac95abacc 100644 --- a/hw/i386/x86-common.c +++ b/hw/i386/x86-common.c @@ -95,7 +95,7 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) * a literal `0` in configurations where kvm_* aren't defined) */ if (kvm_enabled() && x86ms->apic_id_limit > 255 && - (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) { + kvm_irqchip_in_kernel() && !kvm_enable_x2apic()) { error_report("current -smp configuration requires kernel " "irqchip and X2APIC API support."); exit(EXIT_FAILURE); @@ -105,6 +105,10 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) kvm_set_max_apic_id(x86ms->apic_id_limit); } + if (!kvm_irqchip_in_kernel()) { + apic_set_max_apic_id(x86ms->apic_id_limit); + } + possible_cpus = mc->possible_cpu_arch_ids(ms); for (i = 0; i < ms->smp.cpus; i++) { x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); @@ -472,20 +476,19 @@ void gsi_handler(void *opaque, int n, int level) } } -void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name) +void ioapic_init_gsi(GSIState *gsi_state, Object *parent) { DeviceState *dev; SysBusDevice *d; unsigned int i; - assert(parent_name); + assert(parent); if (kvm_ioapic_in_kernel()) { dev = qdev_new(TYPE_KVM_IOAPIC); } else { dev = qdev_new(TYPE_IOAPIC); } - object_property_add_child(object_resolve_path(parent_name, NULL), - "ioapic", OBJECT(dev)); + object_property_add_child(parent, "ioapic", OBJECT(dev)); d = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(d, &error_fatal); sysbus_mmio_map(d, 0, IO_APIC_DEFAULT_ADDRESS); diff --git a/hw/i386/x86-cpu.c b/hw/i386/x86-cpu.c index 9ef43f34c6e8fde25042a83c65446263c01223ef..ab2920522d18cfbaa06477e6aed3b35db9937576 100644 --- a/hw/i386/x86-cpu.c +++ b/hw/i386/x86-cpu.c @@ -44,7 +44,7 @@ static void pic_irq_request(void *opaque, int irq, int level) X86CPU *cpu = X86_CPU(cs); trace_x86_pic_interrupt(irq, level); - if (cpu->apic_state && !kvm_irqchip_in_kernel() && + if (cpu_is_apic_enabled(cpu->apic_state) && !kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) { CPU_FOREACH(cs) { cpu = X86_CPU(cs); diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 19f1e1b1d6e0265dab980fe196a5abb9840cafba..300b831d1d0a1cffac09cced652a1e1001e2549b 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -155,10 +155,10 @@ static void x86_nmi(NMIState *n, int cpu_index, Error **errp) CPU_FOREACH(cs) { X86CPU *cpu = X86_CPU(cs); - if (!cpu->apic_state) { - cpu_interrupt(cs, CPU_INTERRUPT_NMI); - } else { + if (cpu_is_apic_enabled(cpu->apic_state)) { apic_deliver_nmi(cpu->apic_state); + } else { + cpu_interrupt(cs, CPU_INTERRUPT_NMI); } } } @@ -347,6 +347,16 @@ static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name, qapi_free_SgxEPCList(list); } +static int x86_kvm_type(MachineState *ms, const char *vm_type) +{ + /* + * No x86 machine has a kvm-type property. If one is added that has + * it, it should call kvm_get_vm_type() directly or not use it at all. + */ + assert(vm_type == NULL); + return kvm_enabled() ? kvm_get_vm_type(ms) : 0; +} + static void x86_machine_initfn(Object *obj) { X86MachineState *x86ms = X86_MACHINE(obj); @@ -371,6 +381,7 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = x86_cpu_index_to_props; mc->get_default_cpu_node_id = x86_get_default_cpu_node_id; mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; + mc->kvm_type = x86_kvm_type; x86mc->save_tsc_khz = true; x86mc->fwcfg_dma_enabled = true; nc->nmi_monitor_handler = x86_nmi; diff --git a/hw/intc/apic.c b/hw/intc/apic.c index ac3d47d2318f3db374e214d2a3085f3e7d89650c..178fb26b4773b7df2e6c0519488ed7b119cad0c5 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -32,14 +32,13 @@ #include "qapi/error.h" #include "qom/object.h" -#define MAX_APICS 255 -#define MAX_APIC_WORDS 8 - #define SYNC_FROM_VAPIC 0x1 #define SYNC_TO_VAPIC 0x2 #define SYNC_ISR_IRR_TO_VAPIC 0x4 -static APICCommonState *local_apics[MAX_APICS + 1]; +static APICCommonState **local_apics; +static uint32_t max_apics; +static uint32_t max_apic_words; #define TYPE_APIC "apic" /*This is reusing the APICCommonState typedef from APIC_COMMON */ @@ -49,7 +48,19 @@ DECLARE_INSTANCE_CHECKER(APICCommonState, APIC, static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode); static void apic_update_irq(APICCommonState *s); static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, - uint8_t dest, uint8_t dest_mode); + uint32_t dest, uint8_t dest_mode); + +void apic_set_max_apic_id(uint32_t max_apic_id) +{ + int word_size = 32; + + /* round up the max apic id to next multiple of words */ + max_apics = (max_apic_id + word_size - 1) & ~(word_size - 1); + + local_apics = g_malloc0(sizeof(*local_apics) * max_apics); + max_apic_words = max_apics >> 5; +} + /* Find first bit starting from msb */ static int apic_fls_bit(uint32_t value) @@ -199,10 +210,10 @@ static void apic_external_nmi(APICCommonState *s) #define foreach_apic(apic, deliver_bitmask, code) \ {\ int __i, __j;\ - for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ + for (__i = 0; __i < max_apic_words; __i++) {\ uint32_t __mask = deliver_bitmask[__i];\ if (__mask) {\ - for(__j = 0; __j < 32; __j++) {\ + for (__j = 0; __j < 32; __j++) {\ if (__mask & (1U << __j)) {\ apic = local_apics[__i * 32 + __j];\ if (apic) {\ @@ -226,7 +237,7 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask, { int i, d; d = -1; - for(i = 0; i < MAX_APIC_WORDS; i++) { + for (i = 0; i < max_apic_words; i++) { if (deliver_bitmask[i]) { d = i * 32 + apic_ffs_bit(deliver_bitmask[i]); break; @@ -276,16 +287,25 @@ static void apic_bus_deliver(const uint32_t *deliver_bitmask, apic_set_irq(apic_iter, vector_num, trigger_mode) ); } -void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, - uint8_t vector_num, uint8_t trigger_mode) +static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode, + uint8_t delivery_mode, uint8_t vector_num, + uint8_t trigger_mode) { - uint32_t deliver_bitmask[MAX_APIC_WORDS]; + uint32_t *deliver_bitmask = g_malloc(max_apic_words * sizeof(uint32_t)); trace_apic_deliver_irq(dest, dest_mode, delivery_mode, vector_num, trigger_mode); apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); + g_free(deliver_bitmask); +} + +bool is_x2apic_mode(DeviceState *dev) +{ + APICCommonState *s = APIC(dev); + + return s->apicbase & MSR_IA32_APICBASE_EXTD; } static void apic_set_base(APICCommonState *s, uint64_t val) @@ -435,57 +455,123 @@ static void apic_eoi(APICCommonState *s) apic_update_irq(s); } -static int apic_find_dest(uint8_t dest) +static bool apic_match_dest(APICCommonState *apic, uint32_t dest) { - APICCommonState *apic = local_apics[dest]; - int i; + if (is_x2apic_mode(&apic->parent_obj)) { + return apic->initial_apic_id == dest; + } else { + return apic->id == (uint8_t)dest; + } +} - if (apic && apic->id == dest) - return dest; /* shortcut in case apic->id == local_apics[dest]->id */ +static void apic_find_dest(uint32_t *deliver_bitmask, uint32_t dest) +{ + APICCommonState *apic = NULL; + int i; - for (i = 0; i < MAX_APICS; i++) { + for (i = 0; i < max_apics; i++) { apic = local_apics[i]; - if (apic && apic->id == dest) - return i; - if (!apic) - break; + if (apic && apic_match_dest(apic, dest)) { + apic_set_bit(deliver_bitmask, i); + } } +} - return -1; +/* + * Deliver interrupt to x2APIC CPUs if it is x2APIC broadcast. + * Otherwise, deliver interrupt to xAPIC CPUs if it is xAPIC + * broadcast. + */ +static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask, + bool is_x2apic_broadcast) +{ + int i; + APICCommonState *apic_iter; + + for (i = 0; i < max_apics; i++) { + apic_iter = local_apics[i]; + if (apic_iter) { + bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj); + + if (is_x2apic_broadcast && apic_in_x2apic) { + apic_set_bit(deliver_bitmask, i); + } else if (!is_x2apic_broadcast && !apic_in_x2apic) { + apic_set_bit(deliver_bitmask, i); + } + } + } } static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask, - uint8_t dest, uint8_t dest_mode) + uint32_t dest, uint8_t dest_mode) { - APICCommonState *apic_iter; + APICCommonState *apic; int i; - if (dest_mode == 0) { - if (dest == 0xff) { - memset(deliver_bitmask, 0xff, MAX_APIC_WORDS * sizeof(uint32_t)); + memset(deliver_bitmask, 0x00, max_apic_words * sizeof(uint32_t)); + + /* + * x2APIC broadcast is delivered to all x2APIC CPUs regardless of + * destination mode. In case the destination mode is physical, it is + * broadcasted to all xAPIC CPUs too. Otherwise, if the destination + * mode is logical, we need to continue checking if xAPIC CPUs accepts + * the interrupt. + */ + if (dest == 0xffffffff) { + if (dest_mode == APIC_DESTMODE_PHYSICAL) { + memset(deliver_bitmask, 0xff, max_apic_words * sizeof(uint32_t)); + return; } else { - int idx = apic_find_dest(dest); - memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); - if (idx >= 0) - apic_set_bit(deliver_bitmask, idx); + apic_get_broadcast_bitmask(deliver_bitmask, true); + } + } + + if (dest_mode == APIC_DESTMODE_PHYSICAL) { + apic_find_dest(deliver_bitmask, dest); + /* Any APIC in xAPIC mode will interpret 0xFF as broadcast */ + if (dest == 0xff) { + apic_get_broadcast_bitmask(deliver_bitmask, false); } } else { - /* XXX: cluster mode */ - memset(deliver_bitmask, 0x00, MAX_APIC_WORDS * sizeof(uint32_t)); - for(i = 0; i < MAX_APICS; i++) { - apic_iter = local_apics[i]; - if (apic_iter) { - if (apic_iter->dest_mode == 0xf) { - if (dest & apic_iter->log_dest) - apic_set_bit(deliver_bitmask, i); - } else if (apic_iter->dest_mode == 0x0) { - if ((dest & 0xf0) == (apic_iter->log_dest & 0xf0) && - (dest & apic_iter->log_dest & 0x0f)) { + /* XXX: logical mode */ + for (i = 0; i < max_apics; i++) { + apic = local_apics[i]; + if (apic) { + /* x2APIC logical mode */ + if (apic->apicbase & MSR_IA32_APICBASE_EXTD) { + if ((dest >> 16) == (apic->extended_log_dest >> 16) && + (dest & apic->extended_log_dest & 0xffff)) { apic_set_bit(deliver_bitmask, i); } + continue; } - } else { - break; + + /* xAPIC logical mode */ + dest = (uint8_t)dest; + if (apic->dest_mode == APIC_DESTMODE_LOGICAL_FLAT) { + if (dest & apic->log_dest) { + apic_set_bit(deliver_bitmask, i); + } + } else if (apic->dest_mode == APIC_DESTMODE_LOGICAL_CLUSTER) { + /* + * In cluster model of xAPIC logical mode IPI, 4 higher + * bits are used as cluster address, 4 lower bits are + * the bitmask for local APICs in the cluster. The IPI + * is delivered to an APIC if the cluster address + * matches and the APIC's address bit in the cluster is + * set in bitmask of destination ID in IPI. + * + * The cluster address ranges from 0 - 14, the cluster + * address 15 (0xf) is the broadcast address to all + * clusters. + */ + if ((dest & 0xf0) == 0xf0 || + (dest & 0xf0) == (apic->log_dest & 0xf0)) { + if (dest & apic->log_dest & 0x0f) { + apic_set_bit(deliver_bitmask, i); + } + } + } } } } @@ -509,29 +595,36 @@ void apic_sipi(DeviceState *dev) s->wait_for_sipi = 0; } -static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, +static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, - uint8_t trigger_mode) + uint8_t trigger_mode, uint8_t dest_shorthand) { APICCommonState *s = APIC(dev); - uint32_t deliver_bitmask[MAX_APIC_WORDS]; - int dest_shorthand = (s->icr[0] >> 18) & 3; APICCommonState *apic_iter; + uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t); + uint32_t *deliver_bitmask = g_malloc(deliver_bitmask_size); + uint32_t current_apic_id; + + if (is_x2apic_mode(dev)) { + current_apic_id = s->initial_apic_id; + } else { + current_apic_id = s->id; + } switch (dest_shorthand) { case 0: apic_get_delivery_bitmask(deliver_bitmask, dest, dest_mode); break; case 1: - memset(deliver_bitmask, 0x00, sizeof(deliver_bitmask)); - apic_set_bit(deliver_bitmask, s->id); + memset(deliver_bitmask, 0x00, deliver_bitmask_size); + apic_set_bit(deliver_bitmask, current_apic_id); break; case 2: - memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); + memset(deliver_bitmask, 0xff, deliver_bitmask_size); break; case 3: - memset(deliver_bitmask, 0xff, sizeof(deliver_bitmask)); - apic_reset_bit(deliver_bitmask, s->id); + memset(deliver_bitmask, 0xff, deliver_bitmask_size); + apic_reset_bit(deliver_bitmask, current_apic_id); break; } @@ -555,6 +648,7 @@ static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode, } apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); + g_free(deliver_bitmask); } static bool apic_check_pic(APICCommonState *s) @@ -636,27 +730,26 @@ static void apic_timer(void *opaque) apic_timer_update(s, s->next_time); } -static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) +static int apic_register_read(int index, uint64_t *value) { DeviceState *dev; APICCommonState *s; uint32_t val; - int index; - - if (size < 4) { - return 0; - } + int ret = 0; dev = cpu_get_current_apic(); if (!dev) { - return 0; + return -1; } s = APIC(dev); - index = (addr >> 4) & 0xff; switch(index) { case 0x02: /* id */ - val = s->id << 24; + if (is_x2apic_mode(dev)) { + val = s->initial_apic_id; + } else { + val = s->id << 24; + } break; case 0x03: /* version */ val = s->version | ((APIC_LVT_NB - 1) << 16); @@ -679,10 +772,19 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) val = 0; break; case 0x0d: - val = s->log_dest << 24; + if (is_x2apic_mode(dev)) { + val = s->extended_log_dest; + } else { + val = s->log_dest << 24; + } break; case 0x0e: - val = (s->dest_mode << 28) | 0xfffffff; + if (is_x2apic_mode(dev)) { + val = 0; + ret = -1; + } else { + val = (s->dest_mode << 28) | 0xfffffff; + } break; case 0x0f: val = s->spurious_vec; @@ -718,17 +820,56 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) default: s->esr |= APIC_ESR_ILLEGAL_ADDRESS; val = 0; + ret = -1; break; } - trace_apic_mem_readl(addr, val); + + trace_apic_register_read(index, val); + *value = val; + return ret; +} + +static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val; + int index; + + if (size < 4) { + return 0; + } + + index = (addr >> 4) & 0xff; + apic_register_read(index, &val); + return val; } +int apic_msr_read(int index, uint64_t *val) +{ + DeviceState *dev; + + dev = cpu_get_current_apic(); + if (!dev) { + return -1; + } + + if (!is_x2apic_mode(dev)) { + return -1; + } + + return apic_register_read(index, val); +} + static void apic_send_msi(MSIMessage *msi) { uint64_t addr = msi->address; uint32_t data = msi->data; - uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + uint32_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; + /* + * The higher 3 bytes of destination id is stored in higher word of + * msi address. See x86_iommu_irq_to_msi_message() + */ + dest = dest | (addr >> 32); uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; @@ -737,38 +878,25 @@ static void apic_send_msi(MSIMessage *msi) apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); } -static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static int apic_register_write(int index, uint64_t val) { DeviceState *dev; APICCommonState *s; - int index = (addr >> 4) & 0xff; - - if (size < 4) { - return; - } - - if (addr > 0xfff || !index) { - /* MSI and MMIO APIC are at the same memory location, - * but actually not on the global bus: MSI is on PCI bus - * APIC is connected directly to the CPU. - * Mapping them on the global bus happens to work because - * MSI registers are reserved in APIC MMIO and vice versa. */ - MSIMessage msi = { .address = addr, .data = val }; - apic_send_msi(&msi); - return; - } dev = cpu_get_current_apic(); if (!dev) { - return; + return -1; } s = APIC(dev); - trace_apic_mem_writel(addr, val); + trace_apic_register_write(index, val); switch(index) { case 0x02: + if (is_x2apic_mode(dev)) { + return -1; + } + s->id = (val >> 24); break; case 0x03: @@ -788,9 +916,17 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, apic_eoi(s); break; case 0x0d: + if (is_x2apic_mode(dev)) { + return -1; + } + s->log_dest = val >> 24; break; case 0x0e: + if (is_x2apic_mode(dev)) { + return -1; + } + s->dest_mode = val >> 28; break; case 0x0f: @@ -802,13 +938,27 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, case 0x20 ... 0x27: case 0x28: break; - case 0x30: + case 0x30: { + uint32_t dest; + s->icr[0] = val; - apic_deliver(dev, (s->icr[1] >> 24) & 0xff, (s->icr[0] >> 11) & 1, + if (is_x2apic_mode(dev)) { + s->icr[1] = val >> 32; + dest = s->icr[1]; + } else { + dest = (s->icr[1] >> 24) & 0xff; + } + + apic_deliver(dev, dest, (s->icr[0] >> 11) & 1, (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), - (s->icr[0] >> 15) & 1); + (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3); break; + } case 0x31: + if (is_x2apic_mode(dev)) { + return -1; + } + s->icr[1] = val; break; case 0x32 ... 0x37: @@ -837,10 +987,70 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, s->count_shift = (v + 1) & 7; } break; + case 0x3f: { + int vector = val & 0xff; + + if (!is_x2apic_mode(dev)) { + return -1; + } + + /* + * Self IPI is identical to IPI with + * - Destination shorthand: 1 (Self) + * - Trigger mode: 0 (Edge) + * - Delivery mode: 0 (Fixed) + */ + apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1); + + break; + } default: s->esr |= APIC_ESR_ILLEGAL_ADDRESS; - break; + return -1; } + + return 0; +} + +static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + int index = (addr >> 4) & 0xff; + + if (size < 4) { + return; + } + + if (addr > 0xfff || !index) { + /* + * MSI and MMIO APIC are at the same memory location, + * but actually not on the global bus: MSI is on PCI bus + * APIC is connected directly to the CPU. + * Mapping them on the global bus happens to work because + * MSI registers are reserved in APIC MMIO and vice versa. + */ + MSIMessage msi = { .address = addr, .data = val }; + apic_send_msi(&msi); + return; + } + + apic_register_write(index, val); +} + +int apic_msr_write(int index, uint64_t val) +{ + DeviceState *dev; + + dev = cpu_get_current_apic(); + if (!dev) { + return -1; + } + + if (!is_x2apic_mode(dev)) { + return -1; + } + + return apic_register_write(index, val); } static void apic_pre_save(APICCommonState *s) @@ -871,12 +1081,6 @@ static void apic_realize(DeviceState *dev, Error **errp) { APICCommonState *s = APIC(dev); - if (s->id >= MAX_APICS) { - error_setg(errp, "%s initialization failed. APIC ID %d is invalid", - object_get_typename(OBJECT(dev)), s->id); - return; - } - if (kvm_enabled()) { warn_report("Userspace local APIC is deprecated for KVM."); warn_report("Do not use kernel-irqchip except for the -M isapc machine type."); @@ -893,7 +1097,16 @@ static void apic_realize(DeviceState *dev, Error **errp) s->io_memory.disable_reentrancy_guard = true; s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, apic_timer, s); - local_apics[s->id] = s; + + /* + * The --machine none does not call apic_set_max_apic_id before creating + * apic, so we need to call it here and set it to 1 which is the max cpus + * in machine none. + */ + if (!local_apics) { + apic_set_max_apic_id(1); + } + local_apics[s->initial_apic_id] = s; msi_nonbroken = true; } @@ -903,7 +1116,7 @@ static void apic_unrealize(DeviceState *dev) APICCommonState *s = APIC(dev); timer_free(s->timer); - local_apics[s->id] = NULL; + local_apics[s->initial_apic_id] = NULL; } static void apic_class_init(ObjectClass *klass, void *data) diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index bccb4241c2d49ecf7f771a8ce20f007a24c72b28..62e757dba2594e9d9b5cc515ba48d4397da47d29 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -63,6 +63,19 @@ uint64_t cpu_get_apic_base(DeviceState *dev) } } +bool cpu_is_apic_enabled(DeviceState *dev) +{ + APICCommonState *s; + + if (!dev) { + return false; + } + + s = APIC_COMMON(dev); + + return s->apicbase & MSR_IA32_APICBASE_ENABLE; +} + void cpu_set_apic_tpr(DeviceState *dev, uint8_t val) { APICCommonState *s; @@ -287,6 +300,10 @@ static void apic_common_realize(DeviceState *dev, Error **errp) } vmstate_register_with_alias_id(NULL, instance_id, &vmstate_apic_common, s, -1, 0, NULL); + + /* APIC LDR in x2APIC mode */ + s->extended_log_dest = ((s->initial_apic_id >> 4) << 16) | + (1 << (s->initial_apic_id & 0xf)); } static void apic_common_unrealize(DeviceState *dev) @@ -427,6 +444,11 @@ static void apic_common_set_id(Object *obj, Visitor *v, const char *name, return; } + if (value >= 255 && !cpu_has_x2apic_feature(&s->cpu->env)) { + error_setg(errp, "APIC ID %d requires x2APIC feature in CPU", value); + return; + } + s->initial_apic_id = value; s->id = (uint8_t)value; } diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 258dee1b808f962c447abbfdbd4bd535fcfaf784..6ac905364021b336a9020d46a23c69bea1fb1a0e 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs) ARMCPU *cpu = ARM_CPU(cs->cpu); CPUARMState *env = &cpu->env; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, cs->hppi.grp, cs->hppi.prio); diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index 74e02858d43e3596d669e0d1a26b2bc8e4c90f3c..93b8531ad0305f0b35474a7fa37cab6d06bdfd2b 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FlicIO *cur, *next; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & FLIC_PENDING_IO)) { return 0; } @@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic) { uint32_t tmp; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_SERVICE); tmp = flic->service_param; flic->service_param = 0; @@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) QEMUS390FlicIO *io; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) { return NULL; } @@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_MCHK_CR); flic->pending &= ~FLIC_PENDING_MCHK_CR; } @@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* multiplexing is good enough for sclp - kvm does it internally as well */ flic->service_param |= parm; flic->pending |= FLIC_PENDING_SERVICE; @@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FLICState *flic = s390_get_qemu_flic(fs); QEMUS390FlicIO *io; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); io = g_new0(QEMUS390FlicIO, 1); io->id = subchannel_id; io->nr = subchannel_nr; @@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->pending |= FLIC_PENDING_MCHK_CR; qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR); @@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic) bool qemu_s390_flic_has_any(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !!flic->pending; } @@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev) QEMUS390FlicIO *cur, *next; int isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->simm = 0; flic->nimm = 0; flic->pending = 0; diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 36ff71f947579bc893339758301aad4eae91c75c..1ef29d0256ad54d2351945d7c90f573c4dcd498b 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -14,8 +14,8 @@ cpu_get_apic_base(uint64_t val) "0x%016"PRIx64 # apic.c apic_local_deliver(int vector, uint32_t lvt) "vector %d delivery mode %d" apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode) "dest %d dest_mode %d delivery_mode %d vector %d trigger_mode %d" -apic_mem_readl(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" -apic_mem_writel(uint64_t addr, uint32_t val) "0x%"PRIx64" = 0x%08x" +apic_register_read(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64 +apic_register_write(uint8_t reg, uint64_t val) "register 0x%02x = 0x%"PRIx64 # ioapic.c ioapic_set_remote_irr(int n) "set remote irr for pin %d" diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 2e49e262ee0e90e1d2c40a623be087153accae71..33c00083d47c976abd6ccf3f3af882689e497b21 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -346,10 +346,24 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); +static void virt_machine_9_1_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE(9, 1, true) + +static void virt_machine_9_0_options(MachineClass *mc) +{ + virt_machine_9_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len); +} +DEFINE_VIRT_MACHINE(9, 0, false) + static void virt_machine_8_2_options(MachineClass *mc) { + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_VIRT_MACHINE(8, 2, true) +DEFINE_VIRT_MACHINE(8, 2, false) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/misc/edu.c b/hw/misc/edu.c index e64a246d3febf9c096c27cb299b83d6fa1a0477d..2a976ca2b151839248126c5c6eb3833832c5bf22 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque) smp_mb__after_rmw(); if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { - qemu_mutex_lock_iothread(); + bql_lock(); edu_raise_irq(edu, FACT_IRQ); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index a9c64d06ebcfe10d5b8dfb3a1bdd85a493f391f2..2b9bb075400710aa5c999c0bb5b1392680917f65 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX6SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0); DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c index 983251e86f7268683388dd0eb104a5321771e92c..77ad7a7eeffdc5b12e5430429af1d0f43260e56b 100644 --- a/hw/misc/imx7_src.c +++ b/hw/misc/imx7_src.c @@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX7SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0); diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 1e2b3baeb1a5143b21885d35d0fbf33a83a2cdf2..453fdb98198302cccd52f11f2a590666b6e9f48c 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev) void *page; void *tmpbuf = NULL; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); for (;;) { rc = netdev->tx_ring.req_cons; @@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size RING_IDX rc, rp; void *page; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) { return -1; @@ -354,7 +354,7 @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp) XenNetDev *netdev = XEN_NET_DEVICE(xendev); unsigned int port, rx_copy; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u", &netdev->tx_ring_ref) != 1) { @@ -425,7 +425,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp) trace_xen_netdev_disconnect(netdev->dev); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); netdev->tx_ring.sring = NULL; netdev->rx_ring.sring = NULL; diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 3203a4a72898c9eec0f801c5e7e1cd3fff4922c5..d84f3f977d99336fb8d952f863bd0fb45d481fb8 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -515,7 +515,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (FIELD_EX64(env->msr, MSR, PR)) { qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index be167710a3561fba9742676b330fe98471032000..b6581c16fc9f8a17b8f70290e44c4242a9b1008a 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); switch ((val >> 28) & 0x3) { case 0x0: @@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* PowerPC 40x internal IRQ controller */ diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index df1a93a58b9e7c21c2fd6a3a6e5eeb096faff28d..38f7e293b64a43e21940c24b3888e4a67ba404bf 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1304,7 +1304,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(!vhyp_cpu_in_nested(cpu)); @@ -4789,15 +4789,37 @@ static void spapr_machine_latest_class_options(MachineClass *mc) } \ type_init(spapr_machine_register_##suffix) +/* + * pseries-9.1 + */ +static void spapr_machine_9_1_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(9_1, "9.1", true); + +/* + * pseries-9.0 + */ +static void spapr_machine_9_0_class_options(MachineClass *mc) +{ + spapr_machine_9_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len); +} + +DEFINE_SPAPR_MACHINE(9_0, "9.0", false); + /* * pseries-8.2 */ static void spapr_machine_8_2_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_SPAPR_MACHINE(8_2, "8.2", true); +DEFINE_SPAPR_MACHINE(8_2, "8.2", false); /* * pseries-8.1 diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c index df5c4b9687350e11ef98d31229befebb623ed599..c2fda7ad2094afe9ff7c93da661104a13e918289 100644 --- a/hw/ppc/spapr_rng.c +++ b/hw/ppc/spapr_rng.c @@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr, while (hrdata.received < 8) { rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received, random_recv, &hrdata); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_sem_wait(&hrdata.sem); - qemu_mutex_lock_iothread(); + bql_lock(); } qemu_sem_destroy(&hrdata.sem); diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c index 278666317ef22cb8b8ed8d6832fbef81009cb3b3..fc1bbc0b61c8e92dc51702d036788c54e801b5fb 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_softmmu.c @@ -334,7 +334,7 @@ static void *hpt_prepare_thread(void *opaque) pending->ret = H_NO_MEM; } - qemu_mutex_lock_iothread(); + bql_lock(); if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) { /* Ready to go */ @@ -344,7 +344,7 @@ static void *hpt_prepare_thread(void *opaque) free_pending_hpt(pending); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return NULL; } diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c index 9bd98e82197e4dbdb0ad37bb519dec6f31e28d92..d04ac936212f2c0211af9bba9a88e166d4506c88 100644 --- a/hw/remote/mpqemu-link.c +++ b/hw/remote/mpqemu-link.c @@ -33,7 +33,7 @@ */ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) { - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); struct iovec send[2] = {}; int *fds = NULL; @@ -63,8 +63,8 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) * for IOThread case. * Also skip lock handling while in a co-routine in the main context. */ - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send), @@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds); } - if (iolock && !iothread && !qemu_in_coroutine()) { + if (drop_bql && !iothread && !qemu_in_coroutine()) { /* See above comment why skip locking here. */ - qemu_mutex_lock_iothread(); + bql_lock(); } return ret; @@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, size_t *nfds, Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = len }; - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); int ret = -1; @@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, */ assert(qemu_in_coroutine() || !iothread); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_lock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_lock(); } return (ret <= 0) ? ret : iov.iov_len; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 8b10c32a3c6eee5446ed3a0ee54433b8e577355e..d9b879e056ba04791ccee282ba1f2cd29201f88a 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index 8f5159d85dc6a491fe30a654f8dc54f4f09ab2ca..5c535d483e9f7bb0e665330c6b694cc19cc8904c 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp) goto out; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); guest_phys_blocks_init(&guest_phys_blocks); guest_phys_blocks_append(&guest_phys_blocks); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 57ee3be6b1f0f98634e08254a0d6f49186e3228a..a2df3297e4b98445b2767cbba544be5b679418e6 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -866,14 +866,38 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_9_1_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_9_1_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(9_1, "9.1", true); + +static void ccw_machine_9_0_instance_options(MachineState *machine) +{ + ccw_machine_9_1_instance_options(machine); +} + +static void ccw_machine_9_0_class_options(MachineClass *mc) +{ + ccw_machine_9_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_9_0, hw_compat_9_0_len); +} +DEFINE_CCW_MACHINE(9_0, "9.0", false); + static void ccw_machine_8_2_instance_options(MachineState *machine) { + ccw_machine_9_0_instance_options(machine); } static void ccw_machine_8_2_class_options(MachineClass *mc) { + ccw_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_CCW_MACHINE(8_2, "8.2", true); +DEFINE_CCW_MACHINE(8_2, "8.2", false); static void ccw_machine_8_1_instance_options(MachineState *machine) { diff --git a/hw/vfio/hct.c b/hw/vfio/hct.c index 8d0b47d264866985c426634e3c34897cdad6befe..4424a048ac35351f8eb048e886af25d3d115a92a 100644 --- a/hw/vfio/hct.c +++ b/hw/vfio/hct.c @@ -999,7 +999,7 @@ static int hct_migrate_precopy_notifier(NotifierWithReturn *notifier, void *data if (pnd->reason != PRECOPY_NOTIFY_SETUP) return 0; - qemu_mutex_unlock_iothread(); + bql_unlock(); /* [0]:magic [1]:version [2]:op [3]:sync_state */ msg[0] = HCT_MIG_MSG_MAGIC; @@ -1076,7 +1076,7 @@ static int hct_migrate_precopy_notifier(NotifierWithReturn *notifier, void *data ret = 0; exit: - qemu_mutex_lock_iothread(); + bql_lock(); return ret; } diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index 5449b6d7428df3e9604c880b630c6f0565bb1dc2..d22ca243293ef05ea4e6ebcabb18cd0c1948baeb 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -151,7 +151,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx) } if (ctx == qemu_get_aio_context()) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } else { return false; } diff --git a/include/exec/memory.h b/include/exec/memory.h index 3e65d8d9f54662424d88ad7b31945196554e2cbd..d371932d7d9e130b3bf76bd62d5a74cb7d61154c 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1321,8 +1321,10 @@ void memory_region_init_ram_nomigrate(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1381,8 +1383,10 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1409,8 +1413,10 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1505,8 +1511,10 @@ void memory_region_init_alias(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1614,8 +1622,10 @@ void memory_region_init_ram(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, diff --git a/include/hw/boards.h b/include/hw/boards.h index 9e7aa736379b7090bb10723aefef22dd6ee4185f..fd775b8747e058fea8f67d8f3ee4e6f2be999322 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -444,6 +444,12 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_9_0[]; +extern const size_t hw_compat_9_0_len; + +extern GlobalProperty hw_compat_8_2[]; +extern const size_t hw_compat_8_2_len; + extern GlobalProperty hw_compat_8_1[]; extern const size_t hw_compat_8_1_len; diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h index bdc15a7a73174f71c26f63510c6396bc4d3961a8..98a87b2ded428a4065dad7a9904f19350ea8338f 100644 --- a/include/hw/i386/apic.h +++ b/include/hw/i386/apic.h @@ -3,14 +3,14 @@ /* apic.c */ -void apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, - uint8_t vector_num, uint8_t trigger_mode); +void apic_set_max_apic_id(uint32_t max_apic_id); int apic_accept_pic_intr(DeviceState *s); void apic_deliver_pic_intr(DeviceState *s, int level); void apic_deliver_nmi(DeviceState *d); int apic_get_interrupt(DeviceState *s); void cpu_set_apic_base(DeviceState *s, uint64_t val); uint64_t cpu_get_apic_base(DeviceState *s); +bool cpu_is_apic_enabled(DeviceState *s); void cpu_set_apic_tpr(DeviceState *s, uint8_t val); uint8_t cpu_get_apic_tpr(DeviceState *s); void apic_init_reset(DeviceState *s); @@ -18,6 +18,9 @@ void apic_sipi(DeviceState *s); void apic_poll_irq(DeviceState *d); void apic_designate_bsp(DeviceState *d, bool bsp); int apic_get_highest_priority_irr(DeviceState *dev); +int apic_msr_read(int index, uint64_t *val); +int apic_msr_write(int index, uint64_t val); +bool is_x2apic_mode(DeviceState *d); /* pc.c */ DeviceState *cpu_get_current_apic(void); diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index 5f2ba24bfcd2ad8c3f83fa5ca7b1acd91e08b272..e796e6cae3b487fbbc6883f28781b8ec07b1e8ab 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -46,8 +46,10 @@ #define APIC_DM_EXTINT 7 /* APIC destination mode */ -#define APIC_DESTMODE_FLAT 0xf -#define APIC_DESTMODE_CLUSTER 1 +#define APIC_DESTMODE_PHYSICAL 0 +#define APIC_DESTMODE_LOGICAL 1 +#define APIC_DESTMODE_LOGICAL_FLAT 0xf +#define APIC_DESTMODE_LOGICAL_CLUSTER 0 #define APIC_TRIGGER_EDGE 0 #define APIC_TRIGGER_LEVEL 1 @@ -187,6 +189,7 @@ struct APICCommonState { DeviceState *vapic; hwaddr vapic_paddr; /* note: persistence via kvmvapic */ bool legacy_instance_id; + uint32_t extended_log_dest; }; typedef struct VAPICState { diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index c49e2970f1045aee7d8ec3a722bbefdfd6569430..ee5138cb9726f054b181720b64faf89bce60c928 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -211,6 +211,12 @@ void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids, /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_9_0[]; +extern const size_t pc_compat_9_0_len; + +extern GlobalProperty pc_compat_8_2[]; +extern const size_t pc_compat_8_2_len; + extern GlobalProperty pc_compat_8_1[]; extern const size_t pc_compat_8_1_len; @@ -310,15 +316,12 @@ extern const size_t pc_compat_1_5_len; extern GlobalProperty pc_compat_1_4[]; extern const size_t pc_compat_1_4_len; -int pc_machine_kvm_type(MachineState *machine, const char *vm_type); - #define DEFINE_PC_MACHINE(suffix, namestr, initfn, optsfn) \ static void pc_machine_##suffix##_class_init(ObjectClass *oc, void *data) \ { \ MachineClass *mc = MACHINE_CLASS(oc); \ optsfn(mc); \ mc->init = initfn; \ - mc->kvm_type = pc_machine_kvm_type; \ } \ static const TypeInfo pc_machine_type_##suffix = { \ .name = namestr TYPE_MACHINE_SUFFIX, \ diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index cb0baf24d9265d08159035037eed1b4d6db6b375..46fb9f195d0e077410701fcdc890f5d9701179d0 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -140,7 +140,7 @@ typedef struct GSIState { qemu_irq x86_allocate_cpu_irq(void); void gsi_handler(void *opaque, int n, int level); -void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name); +void ioapic_init_gsi(GSIState *gsi_state, Object *parent); DeviceState *ioapic_init_secondary(GSIState *gsi_state); /* pc_sysfw.c */ diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 68e70e61aa59c81256a46b7fae793a45ab99e8a2..72ebc0cb3a88fd10edb01036de11a877ee2f1709 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -248,19 +248,19 @@ GSource *iohandler_get_g_source(void); AioContext *iohandler_get_aio_context(void); /** - * qemu_mutex_iothread_locked: Return lock status of the main loop mutex. + * bql_locked: Return lock status of the Big QEMU Lock (BQL) * - * The main loop mutex is the coarsest lock in QEMU, and as such it + * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it * must always be taken outside other locks. This function helps * functions take different paths depending on whether the current - * thread is running within the main loop mutex. + * thread is running within the BQL. * * This function should never be used in the block layer, because * unit tests, block layer tools and qemu-storage-daemon do not * have a BQL. * Please instead refer to qemu_in_main_thread(). */ -bool qemu_mutex_iothread_locked(void); +bool bql_locked(void); /** * qemu_in_main_thread: return whether it's possible to safely access @@ -312,58 +312,57 @@ bool qemu_in_main_thread(void); } while (0) /** - * qemu_mutex_lock_iothread: Lock the main loop mutex. + * bql_lock: Lock the Big QEMU Lock (BQL). * - * This function locks the main loop mutex. The mutex is taken by + * This function locks the Big QEMU Lock (BQL). The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be taken + * external events (such as with select). The lock should be taken * by threads other than the main loop thread when calling * qemu_bh_new(), qemu_set_fd_handler() and basically all other * functions documented in this file. * - * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread + * NOTE: tools currently are single-threaded and bql_lock * is a no-op there. */ -#define qemu_mutex_lock_iothread() \ - qemu_mutex_lock_iothread_impl(__FILE__, __LINE__) -void qemu_mutex_lock_iothread_impl(const char *file, int line); +#define bql_lock() bql_lock_impl(__FILE__, __LINE__) +void bql_lock_impl(const char *file, int line); /** - * qemu_mutex_unlock_iothread: Unlock the main loop mutex. + * bql_unlock: Unlock the Big QEMU Lock (BQL). * - * This function unlocks the main loop mutex. The mutex is taken by + * This function unlocks the Big QEMU Lock. The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be unlocked + * external events (such as with select). The lock should be unlocked * as soon as possible by threads other than the main loop thread, * because it prevents the main loop from processing callbacks, * including timers and bottom halves. * - * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread + * NOTE: tools currently are single-threaded and bql_unlock * is a no-op there. */ -void qemu_mutex_unlock_iothread(void); +void bql_unlock(void); /** * QEMU_IOTHREAD_LOCK_GUARD * - * Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread. + * Wrap a block of code in a conditional bql_{lock,unlock}. */ typedef struct IOThreadLockAuto IOThreadLockAuto; static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file, int line) { - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { return NULL; } - qemu_mutex_lock_iothread_impl(file, line); + bql_lock_impl(file, line); /* Anything non-NULL causes the cleanup function to be called */ return (IOThreadLockAuto *)(uintptr_t)1; } static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock) diff --git a/include/qemu/thread.h b/include/qemu/thread.h index dd3822d7cee9010fb4e07158ee86c429c30315f2..fb74e21c08a7662d81b36cd0f543b4f6a8d934e7 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -47,7 +47,7 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, const char *f, int l); -extern QemuMutexLockFunc qemu_bql_mutex_lock_func; +extern QemuMutexLockFunc bql_mutex_lock_func; extern QemuMutexLockFunc qemu_mutex_lock_func; extern QemuMutexTrylockFunc qemu_mutex_trylock_func; extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 39326f1d4f9c0713fdcf0613623a941157cbeed8..0e411aaa29e1a8734c39cd9fb90046b012c4d5de 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -47,7 +47,15 @@ OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass, struct HostMemoryBackendClass { ObjectClass parent_class; - void (*alloc)(HostMemoryBackend *backend, Error **errp); + /** + * alloc: Allocate memory from backend. + * + * @backend: the #HostMemoryBackend. + * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. + */ + bool (*alloc)(HostMemoryBackend *backend, Error **errp); }; /** diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 8747a121fdf22d52fbb08abf4401fdd457fb9447..8673c27749a340fcfcbca9a81554c0a5cb0b91fd 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -573,22 +573,13 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); /* Notify resamplefd for EOI of specific interrupts. */ void kvm_resample_fd_notify(int gsi); -/** - * kvm_cpu_check_are_resettable - return whether CPUs can be reset - * - * Returns: true: CPUs are resettable - * false: CPUs are not resettable - */ -bool kvm_cpu_check_are_resettable(void); - -bool kvm_arch_cpu_check_are_resettable(void); - bool kvm_dirty_ring_enabled(void); uint32_t kvm_dirty_ring_size(void); int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, struct kvm_numa_info *numa_info); +void kvm_mark_guest_state_protected(void); /** * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 35aefb8d9a84fd86fef91e3c9d6b0f24af841cb4..31ac7eb00b5883f31e59e348662e9942e2e61939 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -87,6 +87,7 @@ struct KVMState bool kernel_irqchip_required; OnOffAuto kernel_irqchip_split; bool sync_mmu; + bool guest_state_protected; uint64_t manual_dirty_log_protect; /* The man page (and posix) say ioctl numbers are signed int, but * they're not. Linux, glibc and *BSD all treat ioctl numbers as diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc index 84b868f29464853b60cd070534d1d3a346692a6d..0e6f3940a9a155114f5967500a276059f0f67ca6 100644 --- a/memory_ldst.c.inc +++ b/memory_ldst.c.inc @@ -61,7 +61,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -130,7 +130,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -186,7 +186,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -234,7 +234,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -295,7 +295,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -339,7 +339,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -391,7 +391,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -435,7 +435,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -499,7 +499,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 24347ab0f756a61509954e1d9da103e15fc99242..92e031b6fa74d15623b5c1fffe8e25f4b44296bd 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -774,7 +774,7 @@ static void dirty_bitmap_state_pending(void *opaque, SaveBitmapState *dbms; uint64_t pending = 0; - qemu_mutex_lock_iothread(); + bql_lock(); QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) { uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap); @@ -784,7 +784,7 @@ static void dirty_bitmap_state_pending(void *opaque, pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran); } - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_dirty_bitmap_state_pending(pending); diff --git a/migration/block.c b/migration/block.c index a15f9bddcb9d8abaa8e9a31dd9f72ff732395b59..4a675b92bc9e4b4fb27cb89a467a52c6b5a5b6c1 100644 --- a/migration/block.c +++ b/migration/block.c @@ -269,7 +269,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) int64_t count; if (bmds->shared_base) { - qemu_mutex_lock_iothread(); + bql_lock(); aio_context_acquire(blk_get_aio_context(bb)); /* Skip unallocated sectors; intentionally treats failure or * partial sector as an allocated sector */ @@ -282,7 +282,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) cur_sector += count >> BDRV_SECTOR_BITS; } aio_context_release(blk_get_aio_context(bb)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (cur_sector >= total_sectors) { @@ -321,14 +321,14 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) * This is ugly and will disappear when we make bdrv_* thread-safe, * without the need to acquire the AioContext. */ - qemu_mutex_lock_iothread(); + bql_lock(); aio_context_acquire(blk_get_aio_context(bmds->blk)); bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE, nr_sectors * BDRV_SECTOR_SIZE); blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, 0, blk_mig_read_cb, blk); aio_context_release(blk_get_aio_context(bmds->blk)); - qemu_mutex_unlock_iothread(); + bql_unlock(); bmds->cur_sector = cur_sector + nr_sectors; return (bmds->cur_sector >= total_sectors); @@ -786,9 +786,9 @@ static int block_save_iterate(QEMUFile *f, void *opaque) /* Always called with iothread lock taken for * simplicity, block_save_complete also calls it. */ - qemu_mutex_lock_iothread(); + bql_lock(); ret = blk_mig_save_dirty_block(f, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (ret < 0) { return ret; @@ -860,9 +860,9 @@ static void block_state_pending(void *opaque, uint64_t *must_precopy, /* Estimate pending number of bytes to send */ uint64_t pending; - qemu_mutex_lock_iothread(); + bql_lock(); pending = get_remaining_dirty(); - qemu_mutex_unlock_iothread(); + bql_unlock(); blk_mig_lock(); pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + diff --git a/migration/colo.c b/migration/colo.c index 4447e349149a19bf1ed9969e923b1ea8b2296989..2a74efdd772cb23a0ff6fe861767fce6caf268d0 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -420,13 +420,13 @@ static int colo_do_checkpoint_transaction(MigrationState *s, qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); bioc->usage = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (failover_get_state() != FAILOVER_STATUS_NONE) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* * Failover request bh could be called after vm_stop_force_state(), @@ -435,23 +435,23 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (failover_get_state() != FAILOVER_STATUS_NONE) { goto out; } - qemu_mutex_lock_iothread(); + bql_lock(); replication_do_checkpoint_all(&local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } /* Note: device state is saved into buffer */ ret = qemu_save_device_state(fb); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { goto out; } @@ -504,9 +504,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s, ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); out: @@ -557,15 +557,15 @@ static void colo_process_checkpoint(MigrationState *s) fb = qemu_file_new_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + @@ -639,14 +639,14 @@ out: void migrate_start_colo_process(MigrationState *s) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_init(&s->colo_checkpoint_event, false); s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST, colo_checkpoint_notify, s); qemu_sem_init(&s->colo_exit_sem, 0); colo_process_checkpoint(s); - qemu_mutex_lock_iothread(); + bql_lock(); } static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, @@ -657,9 +657,9 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, Error *local_err = NULL; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* FIXME: This is unnecessary for periodic checkpoint mode */ @@ -677,10 +677,10 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); cpu_synchronize_all_states(); ret = qemu_loadvm_state_main(mis->from_src_file, mis); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { error_setg(errp, "Load VM's live state (ram) error"); @@ -719,14 +719,14 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); vmstate_loading = true; colo_flush_ram_cache(); ret = qemu_load_device_state(fb); if (ret < 0) { error_setg(errp, "COLO: load device state failed"); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -734,7 +734,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -743,7 +743,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } /* Notify all filters of all NIC to do checkpoint */ @@ -752,13 +752,13 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } vmstate_loading = false; vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { @@ -851,14 +851,14 @@ static void *colo_process_incoming_thread(void *opaque) fb = qemu_file_new_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, @@ -920,7 +920,7 @@ int coroutine_fn colo_incoming_co(void) Error *local_err = NULL; QemuThread th; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!migration_incoming_colo_enabled()) { return 0; @@ -940,10 +940,10 @@ int coroutine_fn colo_incoming_co(void) qemu_coroutine_yield(); mis->colo_incoming_co = NULL; - qemu_mutex_unlock_iothread(); + bql_unlock(); /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&th); - qemu_mutex_lock_iothread(); + bql_lock(); /* We hold the global iothread lock, so it is safe here */ colo_release_ram_cache(); diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 036ac017fc91f111dcfca1b2dc34bf8be8cb0648..429d10c4d9ce91042f70fab745d624b2173f91c6 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -90,13 +90,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, void global_dirty_log_change(unsigned int flag, bool start) { - qemu_mutex_lock_iothread(); + bql_lock(); if (start) { memory_global_dirty_log_start(flag); } else { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -106,12 +106,12 @@ void global_dirty_log_change(unsigned int flag, bool start) */ static void global_dirty_log_sync(unsigned int flag, bool one_shot) { - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_sync(false); if (one_shot) { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) @@ -610,7 +610,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) int64_t start_time; DirtyPageRecord dirty_pages; - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); /* @@ -627,7 +627,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. */ dirtyrate_manual_reset_protect(); - qemu_mutex_unlock_iothread(); + bql_unlock(); record_dirtypages_bitmap(&dirty_pages, true); diff --git a/migration/migration.c b/migration/migration.c index d1c8ec3be6f4444ed511113b5cd0831fcadc5d77..4a1cf892c65f18c81556f2eff6ed2c52414498fe 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1313,12 +1313,12 @@ static void migrate_fd_cleanup(MigrationState *s) QEMUFile *tmp; trace_migrate_fd_cleanup(); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (s->migration_thread_running) { qemu_thread_join(&s->thread); s->migration_thread_running = false; } - qemu_mutex_lock_iothread(); + bql_lock(); multifd_send_shutdown(); qemu_mutex_lock(&s->qemu_file_lock); @@ -2436,7 +2436,7 @@ static int postcopy_start(MigrationState *ms, Error **errp) } trace_postcopy_start(); - qemu_mutex_lock_iothread(); + bql_lock(); trace_postcopy_start_set_run(); migration_downtime_start(ms); @@ -2545,7 +2545,7 @@ static int postcopy_start(MigrationState *ms, Error **errp) migration_downtime_end(ms); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (migrate_postcopy_ram()) { /* @@ -2586,7 +2586,7 @@ fail: error_report_err(local_err); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); return -1; } @@ -2620,14 +2620,14 @@ static int migration_maybe_pause(MigrationState *s, * wait for the 'pause_sem' semaphore. */ if (s->state != MIGRATION_STATUS_CANCELLING) { - qemu_mutex_unlock_iothread(); + bql_unlock(); migrate_set_state(&s->state, *current_active_state, MIGRATION_STATUS_PRE_SWITCHOVER); qemu_sem_wait(&s->pause_sem); migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, new_state); *current_active_state = new_state; - qemu_mutex_lock_iothread(); + bql_lock(); } return s->state == new_state ? 0 : -EINVAL; @@ -2638,7 +2638,7 @@ static int migration_completion_precopy(MigrationState *s, { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); migration_downtime_start(s); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); @@ -2666,7 +2666,7 @@ static int migration_completion_precopy(MigrationState *s, ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, s->block_inactive); out_unlock: - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2674,9 +2674,9 @@ static void migration_completion_postcopy(MigrationState *s) { trace_migration_completion_postcopy_end(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_complete_postcopy(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * Shutdown the postcopy fast path thread. This is only needed when dest @@ -2700,14 +2700,14 @@ static void migration_completion_failed(MigrationState *s, */ Error *local_err = NULL; - qemu_mutex_lock_iothread(); + bql_lock(); bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } else { s->block_inactive = false; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } migrate_set_state(&s->state, current_active_state, @@ -3147,7 +3147,7 @@ static void migration_iteration_finish(MigrationState *s) /* If we enabled cpu throttling for auto-converge, turn it off. */ cpu_throttle_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: migration_calculate_complete(s); @@ -3178,7 +3178,7 @@ static void migration_iteration_finish(MigrationState *s) break; } migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + bql_unlock(); } static void bg_migration_iteration_finish(MigrationState *s) @@ -3190,7 +3190,7 @@ static void bg_migration_iteration_finish(MigrationState *s) */ ram_write_tracking_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: migration_calculate_complete(s); @@ -3209,7 +3209,7 @@ static void bg_migration_iteration_finish(MigrationState *s) } migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -3335,9 +3335,9 @@ static void *migration_thread(void *opaque) goto out; } - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_header(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * If we opened the return path, we need to make sure dst has it @@ -3365,9 +3365,9 @@ static void *migration_thread(void *opaque) qemu_savevm_send_colo_enable(s->to_dst_file); } - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3479,10 +3479,10 @@ static void *bg_migration_thread(void *opaque) ram_write_tracking_prepare(); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_header(s->to_dst_file); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3492,7 +3492,7 @@ static void *bg_migration_thread(void *opaque) trace_migration_thread_setup_complete(); migration_downtime_start(s); - qemu_mutex_lock_iothread(); + bql_lock(); /* * If VM is currently in suspended state, then, to make a valid runstate @@ -3535,7 +3535,7 @@ static void *bg_migration_thread(void *opaque) s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); qemu_bh_schedule(s->vm_start_bh); - qemu_mutex_unlock_iothread(); + bql_unlock(); while (migration_is_active(s)) { MigIterateState iter_state = bg_migration_iteration_run(s); @@ -3564,7 +3564,7 @@ fail: if (early_fail) { migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); - qemu_mutex_unlock_iothread(); + bql_unlock(); } bg_migration_iteration_finish(s); diff --git a/migration/ram.c b/migration/ram.c index 9d49765bcf89fa053273a68da0a2c7978e3b1c84..bd2070e3232686c258c648a1425a6c36177fcc14 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3429,9 +3429,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque) migration_ops->ram_save_target_page = ram_save_target_page_legacy; } - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = multifd_send_sync_main(); - qemu_mutex_lock_iothread(); + bql_lock(); if (ret < 0) { return ret; } @@ -3688,11 +3688,11 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; if (!migration_in_postcopy() && remaining_size < s->threshold_size) { - qemu_mutex_lock_iothread(); + bql_lock(); WITH_RCU_READ_LOCK_GUARD() { migration_bitmap_sync_precopy(rs, false); } - qemu_mutex_unlock_iothread(); + bql_unlock(); remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; } @@ -3920,7 +3920,7 @@ void colo_incoming_start_dirty_log(void) { RAMBlock *block = NULL; /* For memory_global_dirty_log_start below. */ - qemu_mutex_lock_iothread(); + bql_lock(); qemu_mutex_lock_ramlist(); memory_global_dirty_log_sync(false); @@ -3934,7 +3934,7 @@ void colo_incoming_start_dirty_log(void) } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* It is need to hold the global lock to call this helper */ diff --git a/replay/replay-internal.c b/replay/replay-internal.c index 77d0c82327ed5aeeee63f446bddc147d592af7eb..3e08e381cbbf23b667e6f2a0d696e438c74c75c6 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -216,7 +216,7 @@ void replay_mutex_lock(void) { if (replay_mode != REPLAY_MODE_NONE) { unsigned long id; - g_assert(!qemu_mutex_iothread_locked()); + g_assert(!bql_locked()); g_assert(!replay_mutex_locked()); qemu_mutex_lock(&lock); id = mutex_tail++; diff --git a/semihosting/console.c b/semihosting/console.c index 5d61e8207e26a7703dac45968bfbc0fb548b4b1c..60102bbab6657035d41334d27e0f4742e7af5332 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -43,7 +43,7 @@ static SemihostingConsole console; static int console_can_read(void *opaque) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return (int)fifo8_num_free(&c->fifo); } @@ -58,7 +58,7 @@ static void console_wake_up(gpointer data, gpointer user_data) static void console_read(void *opaque, const uint8_t *buf, int size) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); while (size-- && !fifo8_is_full(&c->fifo)) { fifo8_push(&c->fifo, *buf++); } @@ -70,7 +70,7 @@ bool qemu_semihosting_console_ready(void) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !fifo8_is_empty(&c->fifo); } @@ -78,7 +78,7 @@ void qemu_semihosting_console_block_until_ready(CPUState *cs) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block if the fifo is completely empty. */ if (fifo8_is_empty(&c->fifo)) { diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c index 5b45b7fc8b905701f7b30fd769032dcc36f2b7e0..d7890e5581c5c29b69d18e9e27f509e453c739e5 100644 --- a/stubs/iothread-lock.c +++ b/stubs/iothread-lock.c @@ -1,15 +1,15 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { return false; } -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { } diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c index d9bb30a223d84e4b460dbdfaeec5da010659cd4e..786a9a5639c2fa502f693bec239bf0d0f16c1030 100644 --- a/system/cpu-throttle.c +++ b/system/cpu-throttle.c @@ -57,9 +57,9 @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) qemu_cond_timedwait_iothread(cpu->halt_cond, sleeptime_ns / SCALE_MS); } else { - qemu_mutex_unlock_iothread(); + bql_unlock(); g_usleep(sleeptime_ns / SCALE_US); - qemu_mutex_lock_iothread(); + bql_lock(); } sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } diff --git a/system/cpus.c b/system/cpus.c index 43b66f9fb17d69f76b35b4e96764c6d268044dd2..228761a15dd0d4dfd378eb0a7a83f79b78699dc6 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -65,7 +65,8 @@ #endif /* CONFIG_LINUX */ -static QemuMutex qemu_global_mutex; +/* The Big QEMU Lock (BQL) */ +static QemuMutex bql; /* * The chosen accelerator is supposed to register this. @@ -422,14 +423,14 @@ void qemu_init_cpu_loop(void) qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); - qemu_mutex_init(&qemu_global_mutex); + qemu_mutex_init(&bql); qemu_thread_get_self(&io_thread); } void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { - do_run_on_cpu(cpu, func, data, &qemu_global_mutex); + do_run_on_cpu(cpu, func, data, &bql); } static void qemu_cpu_stop(CPUState *cpu, bool exit) @@ -461,7 +462,7 @@ void qemu_wait_io_event(CPUState *cpu) slept = true; qemu_plugin_vcpu_idle_cb(cpu); } - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait(cpu->halt_cond, &bql); } if (slept) { qemu_plugin_vcpu_resume_cb(cpu); @@ -514,46 +515,46 @@ bool qemu_in_vcpu_thread(void) return current_cpu && qemu_cpu_is_self(current_cpu); } -QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked) +QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked) -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { - return get_iothread_locked(); + return get_bql_locked(); } bool qemu_in_main_thread(void) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. */ -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { - QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); + QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func); - g_assert(!qemu_mutex_iothread_locked()); - bql_lock(&qemu_global_mutex, file, line); - set_iothread_locked(true); + g_assert(!bql_locked()); + bql_lock_fn(&bql, file, line); + set_bql_locked(true); } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { - g_assert(qemu_mutex_iothread_locked()); - set_iothread_locked(false); - qemu_mutex_unlock(&qemu_global_mutex); + g_assert(bql_locked()); + set_bql_locked(false); + qemu_mutex_unlock(&bql); } void qemu_cond_wait_iothread(QemuCond *cond) { - qemu_cond_wait(cond, &qemu_global_mutex); + qemu_cond_wait(cond, &bql); } void qemu_cond_timedwait_iothread(QemuCond *cond, int ms) { - qemu_cond_timedwait(cond, &qemu_global_mutex, ms); + qemu_cond_timedwait(cond, &bql, ms); } /* signal CPU creation */ @@ -604,15 +605,15 @@ void pause_all_vcpus(void) replay_mutex_unlock(); while (!all_vcpus_paused()) { - qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_pause_cond, &bql); CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpu_resume(CPUState *cpu) @@ -641,9 +642,9 @@ void cpu_remove_sync(CPUState *cpu) cpu->stop = true; cpu->unplug = true; qemu_cpu_kick(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(cpu->thread); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpus_register_accel(const AccelOpsClass *ops) @@ -681,7 +682,7 @@ void qemu_init_vcpu(CPUState *cpu) cpus_accel->create_vcpu_thread(cpu); while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_cpu_cond, &bql); } } diff --git a/system/dirtylimit.c b/system/dirtylimit.c index 495c7a7082ff190ec12a0d13611bac260a1d0d83..b5607eb8c272a4b5456f3f903c6396dd307381f6 100644 --- a/system/dirtylimit.c +++ b/system/dirtylimit.c @@ -148,9 +148,9 @@ void vcpu_dirty_rate_stat_stop(void) { qatomic_set(&vcpu_dirty_rate_stat->running, 0); dirtylimit_state_unlock(); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(&vcpu_dirty_rate_stat->thread); - qemu_mutex_lock_iothread(); + bql_lock(); dirtylimit_state_lock(); } diff --git a/system/memory.c b/system/memory.c index 2ffb878eb8cc8b5d464922003620d65f524b0dad..39c772f3e10e44f16128a53e7f03e9839ee5c88c 100644 --- a/system/memory.c +++ b/system/memory.c @@ -1122,7 +1122,7 @@ void memory_region_transaction_commit(void) AddressSpace *as; assert(memory_region_transaction_depth); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); --memory_region_transaction_depth; if (!memory_region_transaction_depth) { @@ -1558,7 +1558,7 @@ void memory_region_init_ram_nomigrate(MemoryRegion *mr, memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); } -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1575,7 +1575,9 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } void memory_region_init_resizeable_ram(MemoryRegion *mr, @@ -1603,7 +1605,7 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, } #ifdef CONFIG_POSIX -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1626,10 +1628,12 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1650,7 +1654,9 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } #endif @@ -1701,14 +1707,19 @@ void memory_region_init_alias(MemoryRegion *mr, mr->alias_offset = offset; } -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { - memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); + if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, + size, 0, errp)) { + return false; + } mr->readonly = true; + + return true; } void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, @@ -3604,19 +3615,16 @@ void memory_region_init_ram(MemoryRegion *mr, vmstate_register_ram(mr, owner_dev); } -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_rom_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3626,6 +3634,8 @@ void memory_region_init_rom(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } void memory_region_init_rom_device(MemoryRegion *mr, diff --git a/system/physmem.c b/system/physmem.c index 9a33a2cc424cf95b1d04d082db108f253cea0770..2188b69e275363e4a077484e3220949c5c2a7719 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -2671,8 +2671,8 @@ bool prepare_mmio_access(MemoryRegion *mr) { bool release_lock = false; - if (!qemu_mutex_iothread_locked()) { - qemu_mutex_lock_iothread(); + if (!bql_locked()) { + bql_lock(); release_lock = true; } if (mr->flush_coalesced_mmio) { @@ -2753,7 +2753,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } @@ -2831,7 +2831,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } diff --git a/system/runstate.c b/system/runstate.c index 1e758909b65f30f588dd7e7525036bc4e7aecf4f..f02183c53add61cc05314fd0de1861555e66b0f3 100644 --- a/system/runstate.c +++ b/system/runstate.c @@ -830,7 +830,7 @@ void qemu_init_subsystems(void) qemu_init_cpu_list(); qemu_init_cpu_loop(); - qemu_mutex_lock_iothread(); + bql_lock(); atexit(qemu_run_exit_notifiers); diff --git a/system/watchpoint.c b/system/watchpoint.c index ba5ad13352c355f5c8de5d25d36efb7fb09b95c2..b76007ebf6b62855f420b72c8ae36c972d74ec04 100644 --- a/system/watchpoint.c +++ b/system/watchpoint.c @@ -155,9 +155,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, * Now raise the debug interrupt so that it will * trigger after the current instruction. */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index c078849403c7751e303a620d15f63da549f75919..8850381565971212dbd7dbdfc8fcdc72168ff0d1 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -88,7 +88,7 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, g_free(info); /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -99,7 +99,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, ARMCPU *target_cpu; struct CpuOnInfo *info; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, @@ -196,7 +196,7 @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, target_cpu_state->halted = 0; /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -205,7 +205,7 @@ int arm_set_cpu_on_and_reset(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); @@ -247,7 +247,7 @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_OFF; target_cpu_state->halted = 1; target_cpu_state->exception_index = EXCP_HLT; @@ -258,7 +258,7 @@ int arm_set_cpu_off(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); @@ -294,7 +294,7 @@ int arm_reset_cpu(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); diff --git a/target/arm/helper.c b/target/arm/helper.c index df1646de3ae8b6d46604fe069a1fddb3b3b85112..fd0103750a3eb4ae5b1749a78d53df5fb3f7df62 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -5839,7 +5839,7 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * VFIQ are masked unless running at EL0 or EL1, and HCR * can only be written at EL2. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); arm_cpu_update_vserr(cpu); @@ -11370,7 +11370,7 @@ void arm_cpu_do_interrupt(CPUState *cs) * BQL needs to be held for any modification of * cs->interrupt_request. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_call_pre_el_change_hook(cpu); diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 757e13b0f904e7390d431158342d9350457e583e..f8dffb815fa37978ffdfbca67a19a33cb9ec86e3 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -1718,9 +1718,9 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) * sleeping. */ qatomic_set_mb(&cpu->thread_kicked, false); - qemu_mutex_unlock_iothread(); + bql_unlock(); pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); - qemu_mutex_lock_iothread(); + bql_lock(); } static void hvf_wfi(CPUState *cpu) @@ -1821,7 +1821,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ @@ -1830,7 +1830,7 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t ec = syn_get_ec(syndrome); ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); switch (exit_reason) { case HV_EXIT_REASON_EXCEPTION: /* This is the main one, handle below. */ diff --git a/target/arm/kvm.c b/target/arm/kvm.c index fb0ced115f501da90a15ad318a5c62f2b6b0c923..7060532adf97c0b85f546491125832ffaa3450b5 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -888,7 +888,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) if (run->s.regs.device_irq_level != cpu->device_irq_level) { switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; - qemu_mutex_lock_iothread(); + bql_lock(); if (switched_level & KVM_ARM_DEV_EL1_VTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], @@ -917,7 +917,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) /* We also mark unknown levels as processed to not waste cycles */ cpu->device_irq_level = run->s.regs.device_irq_level; - qemu_mutex_unlock_iothread(); + bql_unlock(); } return MEMTXATTRS_UNSPECIFIED; @@ -980,6 +980,83 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, return -1; } +/** + * kvm_arm_handle_debug: + * @cs: CPUState + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + * + * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register + * + * To minimise translating between kernel and user-space the kernel + * ABI just provides user-space with the full exception syndrome + * register value to be decoded in QEMU. + */ +static bool kvm_arm_handle_debug(CPUState *cs, + struct kvm_debug_exit_arch *debug_exit) +{ + int hsr_ec = syn_get_ec(debug_exit->hsr); + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + switch (hsr_ec) { + case EC_SOFTWARESTEP: + if (cs->singlestep_enabled) { + return true; + } else { + /* + * The kernel should have suppressed the guest's ability to + * single step at this point so something has gone wrong. + */ + error_report("%s: guest single-step while debugging unsupported" + " (%"PRIx64", %"PRIx32")", + __func__, env->pc, debug_exit->hsr); + return false; + } + break; + case EC_AA64_BKPT: + if (kvm_find_sw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_BREAKPOINT: + if (find_hw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_WATCHPOINT: + { + CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); + if (wp) { + cs->watchpoint_hit = wp; + return true; + } + break; + } + default: + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", + __func__, debug_exit->hsr, env->pc); + } + + /* If we are not handling the debug exception it must belong to + * the guest. Let's re-use the existing TCG interrupt code to set + * everything up properly. + */ + cs->exception_index = EXCP_BKPT; + env->exception.syndrome = debug_exit->hsr; + env->exception.vaddress = debug_exit->far; + env->exception.target_el = 1; + bql_lock(); + arm_cpu_do_interrupt(cs); + bql_unlock(); + + return false; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { int ret = 0; @@ -1124,11 +1201,6 @@ int kvm_arch_msi_data_to_gsi(uint32_t data) return (data - 32) & 0xffff; } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return !virtcca_cvm_enabled(); -} - static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 583cf89d4e731623216bb85ada677ababfa29314..ef02037567ef6a4f00e3a1821ac2b6c89628abc5 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -1276,76 +1276,6 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) } } -/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register - * - * To minimise translating between kernel and user-space the kernel - * ABI just provides user-space with the full exception syndrome - * register value to be decoded in QEMU. - */ - -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) -{ - int hsr_ec = syn_get_ec(debug_exit->hsr); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* Ensure PC is synchronised */ - kvm_cpu_synchronize_state(cs); - - switch (hsr_ec) { - case EC_SOFTWARESTEP: - if (cs->singlestep_enabled) { - return true; - } else { - /* - * The kernel should have suppressed the guest's ability to - * single step at this point so something has gone wrong. - */ - error_report("%s: guest single-step while debugging unsupported" - " (%"PRIx64", %"PRIx32")", - __func__, env->pc, debug_exit->hsr); - return false; - } - break; - case EC_AA64_BKPT: - if (kvm_find_sw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_BREAKPOINT: - if (find_hw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_WATCHPOINT: - { - CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); - if (wp) { - cs->watchpoint_hit = wp; - return true; - } - break; - } - default: - error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", - __func__, debug_exit->hsr, env->pc); - } - - /* If we are not handling the debug exception it must belong to - * the guest. Let's re-use the existing TCG interrupt code to set - * everything up properly. - */ - cs->exception_index = EXCP_BKPT; - env->exception.syndrome = debug_exit->hsr; - env->exception.vaddress = debug_exit->far; - env->exception.target_el = 1; - qemu_mutex_lock_iothread(); - arm_cpu_do_interrupt(cs); - qemu_mutex_unlock_iothread(); - - return false; -} - #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 9fd0a520fe8e1aa7652562903a727af655dfc437..fb6b8ba3215100cbb51d04cec63c4b46cc449273 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -468,15 +468,6 @@ static inline void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, } #endif -/** - * kvm_arm_handle_debug: - * @cs: CPUState - * @debug_exit: debug part of the KVM exit structure - * - * Returns: TRUE if the debug exception was handled. - */ -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); - /** * kvm_arm_hw_debug_active: * @cs: CPU State diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 1762b058aecfc0e61e58a50160d0e2bef92787f4..0ecd3a36dad47d12a90fb9bcfd287347c3812182 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -772,9 +772,9 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, #if !TCG_OVERSIZED_GUEST # error "Unexpected configuration" #endif - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (ptw->out_be) { cur_val = ldq_be_p(host); @@ -788,7 +788,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, } } if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 8ad84623d37d93a3d56c3f42e770be342b705ff0..198b975f207c008d427a9c4ee7c82ea46aa8b72f 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -809,9 +809,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!return_to_aa64) { env->aarch64 = false; @@ -876,9 +876,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) */ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index a26adb75aa247359ccf8aae40c5aff1aa7cdecba..d1f1e02acc11a8f1964c47e218173c649c51e75a 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -373,8 +373,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); bool take_exception; - /* Take the iothread lock as we are going to touch the NVIC */ - qemu_mutex_lock_iothread(); + /* Take the BQL as we are going to touch the NVIC */ + bql_lock(); /* Check the background context had access to the FPU */ if (!v7m_cpacr_pass(env, is_secure, is_priv)) { @@ -428,7 +428,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) take_exception = !stacked_ok && armv7m_nvic_can_take_pending_exception(env->nvic); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (take_exception) { raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index ea08936a852b17d363ff58f4262edb57cbe3b376..34e706e0fd36ad20d098d06d6ec9cd7d84eeadc8 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -427,9 +427,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { uint32_t mask; - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, val, mask, CPSRWriteExceptionReturn); @@ -442,9 +442,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) env->regs[15] &= (env->thumb ? ~1 : ~3); arm_rebuild_hflags(env); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Access to user mode registers from privileged modes. */ @@ -803,9 +803,9 @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -817,9 +817,9 @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip) uint32_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -832,9 +832,9 @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -846,9 +846,9 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip) uint64_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c index 6c1239bb9685a00ef7d9ad883ba4c1a0e87ada8b..9080a91d9c696847373bf87b13297e1e057cf9e3 100644 --- a/target/arm/tcg/psci.c +++ b/target/arm/tcg/psci.c @@ -107,7 +107,7 @@ void arm_handle_psci_call(ARMCPU *cpu) } target_cpu = ARM_CPU(target_cpu_state); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); ret = target_cpu->power_state; break; default: diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 98e9d688f64070e7b93974c649a1a62df5ed8d23..efe638b36ed9022a386acca15f215e4c516e445b 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -84,17 +84,17 @@ void hppa_cpu_alarm_timer(void *opaque) void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIRR] &= ~val; - qemu_mutex_lock_iothread(); + bql_lock(); eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIEM] = val; - qemu_mutex_lock_iothread(); + bql_lock(); eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void hppa_cpu_do_interrupt(CPUState *cs) diff --git a/target/i386/confidential-guest.c b/target/i386/confidential-guest.c new file mode 100644 index 0000000000000000000000000000000000000000..b3727845adc3719e19b5a704a8ea33a53aebda7b --- /dev/null +++ b/target/i386/confidential-guest.c @@ -0,0 +1,33 @@ +/* + * QEMU Confidential Guest support + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Authors: + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" + +#include "confidential-guest.h" + +OBJECT_DEFINE_ABSTRACT_TYPE(X86ConfidentialGuest, + x86_confidential_guest, + X86_CONFIDENTIAL_GUEST, + CONFIDENTIAL_GUEST_SUPPORT) + +static void x86_confidential_guest_class_init(ObjectClass *oc, void *data) +{ +} + +static void x86_confidential_guest_init(Object *obj) +{ +} + +static void x86_confidential_guest_finalize(Object *obj) +{ +} diff --git a/target/i386/confidential-guest.h b/target/i386/confidential-guest.h new file mode 100644 index 0000000000000000000000000000000000000000..532e172a60b6a04719504b22f056cb0417533a62 --- /dev/null +++ b/target/i386/confidential-guest.h @@ -0,0 +1,59 @@ +/* + * x86-specific confidential guest methods. + * + * Copyright (c) 2024 Red Hat Inc. + * + * Authors: + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef TARGET_I386_CG_H +#define TARGET_I386_CG_H + +#include "qom/object.h" + +#include "exec/confidential-guest-support.h" + +#define TYPE_X86_CONFIDENTIAL_GUEST "x86-confidential-guest" + +OBJECT_DECLARE_TYPE(X86ConfidentialGuest, + X86ConfidentialGuestClass, + X86_CONFIDENTIAL_GUEST) + +struct X86ConfidentialGuest { + /* */ + ConfidentialGuestSupport parent_obj; +}; + +/** + * X86ConfidentialGuestClass: + * + * Class to be implemented by confidential-guest-support concrete objects + * for the x86 target. + */ +struct X86ConfidentialGuestClass { + /* */ + ConfidentialGuestSupportClass parent; + + /* */ + int (*kvm_type)(X86ConfidentialGuest *cg); +}; + +/** + * x86_confidential_guest_kvm_type: + * + * Calls #X86ConfidentialGuestClass.unplug callback of @plug_handler. + */ +static inline int x86_confidential_guest_kvm_type(X86ConfidentialGuest *cg) +{ + X86ConfidentialGuestClass *klass = X86_CONFIDENTIAL_GUEST_GET_CLASS(cg); + + if (klass->kvm_type) { + return klass->kvm_type(cg); + } else { + return 0; + } +} +#endif diff --git a/target/i386/cpu-sysemu.c b/target/i386/cpu-sysemu.c index 35d1cefcb39af4da5ee2a6b26a7fcf868803f760..18ed67c558aaaab5f8701d2ddcc9fa712ef08e20 100644 --- a/target/i386/cpu-sysemu.c +++ b/target/i386/cpu-sysemu.c @@ -235,6 +235,16 @@ void cpu_clear_apic_feature(CPUX86State *env) env->features[FEAT_1_EDX] &= ~CPUID_APIC; } +void cpu_set_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] |= CPUID_APIC; +} + +bool cpu_has_x2apic_feature(CPUX86State *env) +{ + return env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC; +} + bool cpu_is_bsp(X86CPU *cpu) { return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; @@ -281,11 +291,17 @@ void x86_cpu_apic_create(X86CPU *cpu, Error **errp) OBJECT(cpu->apic_state)); object_unref(OBJECT(cpu->apic_state)); - qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); /* TODO: convert to link<> */ apic = APIC_COMMON(cpu->apic_state); apic->cpu = cpu; apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; + + /* + * apic_common_set_id needs to check if the CPU has x2APIC + * feature in case APIC ID >= 255, so we need to set apic->cpu + * before setting APIC ID + */ + qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); } void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index a5eaeadb5f88d9d9f1439dd2ec6a6c573f932af3..ba22d59e6f413928556fff217e3c0f852659d2f3 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -567,6 +567,9 @@ typedef enum X86Seg { #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 #define MSR_IA32_VMX_VMFUNC 0x00000491 +#define MSR_APIC_START 0x00000800 +#define MSR_APIC_END 0x000008ff + #define XSTATE_FP_BIT 0 #define XSTATE_SSE_BIT 1 #define XSTATE_YMM_BIT 2 @@ -2389,8 +2392,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); void cpu_clear_apic_feature(CPUX86State *env); +void cpu_set_apic_feature(CPUX86State *env); void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); +bool cpu_has_x2apic_feature(CPUX86State *env); static inline bool x86_has_cpuid_0x1f(X86CPU *cpu) { diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md index 2d33477aca505e08eb899698c8d036273035d2c9..64a8935237c8c209eab6a2f2905f342bc30c67a2 100644 --- a/target/i386/hvf/README.md +++ b/target/i386/hvf/README.md @@ -4,4 +4,4 @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk 1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets. 2. Removal of `apic_page` and hyperv-related functionality. -3. More relaxed use of `qemu_mutex_lock_iothread`. +3. More relaxed use of `bql_lock`. diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 20b9ca3ef5135afd3d5b0e959568a503d1c72603..11ffdd4c69fdb61ab072e9364d6007c19ce88c39 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -429,9 +429,9 @@ int hvf_vcpu_exec(CPUState *cpu) } vmx_update_tpr(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { - qemu_mutex_lock_iothread(); + bql_lock(); return EXCP_HLT; } @@ -450,7 +450,7 @@ int hvf_vcpu_exec(CPUState *cpu) rip = rreg(cpu->accel->fd, HV_X86_RIP); env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); - qemu_mutex_lock_iothread(); + bql_lock(); update_apic_tpr(cpu); current_cpu = cpu; diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index e3ac978648b81cc31fa9f2bd1ba22c3d1ad9a764..6825c89af374ab7319a1bc10724068fffe16e306 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -45,9 +45,9 @@ void hyperv_x86_synic_update(X86CPU *cpu) static void async_synic_update(CPUState *cs, run_on_cpu_data data) { - qemu_mutex_lock_iothread(); + bql_lock(); hyperv_x86_synic_update(X86_CPU(cs)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index d1d068ce074d172ce5ce74c4f25a3ec8a23250dd..4e046c7777b1cfd3a8e702d35b1a2db908f433d6 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -31,6 +31,7 @@ #include "sysemu/kvm_int.h" #include "sysemu/runstate.h" #include "kvm_i386.h" +#include "../confidential-guest.h" #include "sev.h" #include "csv.h" #include "xen-emu.h" @@ -164,6 +165,51 @@ static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES]; static RateLimit bus_lock_ratelimit_ctrl; static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); +static const char *vm_type_name[] = { + [KVM_X86_DEFAULT_VM] = "default", + [KVM_X86_SEV_VM] = "SEV", + [KVM_X86_SEV_ES_VM] = "SEV-ES", +}; + +bool kvm_is_vm_type_supported(int type) +{ + uint32_t machine_types; + + /* + * old KVM doesn't support KVM_CAP_VM_TYPES but KVM_X86_DEFAULT_VM + * is always supported + */ + if (type == KVM_X86_DEFAULT_VM) { + return true; + } + + machine_types = kvm_check_extension(KVM_STATE(current_machine->accelerator), + KVM_CAP_VM_TYPES); + return !!(machine_types & BIT(type)); +} + +int kvm_get_vm_type(MachineState *ms) +{ + int kvm_type = KVM_X86_DEFAULT_VM; + + if (ms->cgs) { + if (!object_dynamic_cast(OBJECT(ms->cgs), TYPE_X86_CONFIDENTIAL_GUEST)) { + error_report("configuration type %s not supported for x86 guests", + object_get_typename(OBJECT(ms->cgs))); + exit(1); + } + kvm_type = x86_confidential_guest_kvm_type( + X86_CONFIDENTIAL_GUEST(ms->cgs)); + } + + if (!kvm_is_vm_type_supported(kvm_type)) { + error_report("vm-type %s not supported by KVM", vm_type_name[kvm_type]); + exit(1); + } + + return kvm_type; +} + bool kvm_has_smm(void) { return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM); @@ -4884,9 +4930,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) /* Inject NMI */ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); if (ret < 0) { @@ -4895,9 +4941,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); if (ret < 0) { @@ -4908,7 +4954,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } if (!kvm_pic_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Force the VCPU out of its inner loop to process any INIT requests @@ -4961,7 +5007,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -5009,12 +5055,12 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); if (!kvm_irqchip_in_kernel()) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return cpu_get_mem_attrs(env); } @@ -5474,17 +5520,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) switch (run->exit_reason) { case KVM_EXIT_HLT: DPRINTF("handle_hlt\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_halt(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_SET_TPR: ret = 0; break; case KVM_EXIT_TPR_ACCESS: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_tpr_access(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_FAIL_ENTRY: code = run->fail_entry.hardware_entry_failure_reason; @@ -5510,9 +5556,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; case KVM_EXIT_DEBUG: DPRINTF("kvm_exit_debug\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_debug(cpu, &run->debug.arch); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_HYPERV: ret = kvm_hv_handle_exit(cpu, &run->hyperv); @@ -5808,14 +5854,6 @@ bool kvm_has_waitpkg(void) return has_msr_umwait; } -bool kvm_arch_cpu_check_are_resettable(void) -{ - if (is_hygon_cpu()) - return !csv_kvm_cpu_reset_inhibit; - - return !sev_es_enabled(); -} - #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 30fedcffea3e59161eeb9ff1750738845c1a4637..6b44844d95d0640809c45cd2759ffa2eb5f19f83 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -37,6 +37,7 @@ bool kvm_hv_vpindex_settable(void); bool kvm_enable_sgx_provisioning(KVMState *s); bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); +int kvm_get_vm_type(MachineState *ms); void kvm_arch_reset_vcpu(X86CPU *cs); void kvm_arch_after_reset_vcpu(X86CPU *cpu); void kvm_arch_do_init_vcpu(X86CPU *cs); @@ -49,6 +50,7 @@ void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); #ifdef CONFIG_KVM +bool kvm_is_vm_type_supported(int type); bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); void kvm_synchronize_all_tsc(void); diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index c0631f9cf439fd580756f716501a31305b778ce0..b0ed2e6aeb2a2464d28725c23a8a4de357b2cc48 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -403,7 +403,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */ if (!vi->evtchn_upcall_pending) { - qemu_mutex_lock_iothread(); + bql_lock(); /* * Check again now we have the lock, because it may have been * asserted in the interim. And we don't want to take the lock @@ -413,7 +413,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) X86_CPU(cs)->env.xen_callback_asserted = false; xen_evtchn_set_callback_level(0); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -773,9 +773,9 @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu, switch (hp.index) { case HVM_PARAM_CALLBACK_IRQ: - qemu_mutex_lock_iothread(); + bql_lock(); err = xen_evtchn_set_callback_param(hp.value); - qemu_mutex_unlock_iothread(); + bql_unlock(); xen_set_long_mode(exit->u.hcall.longmode); break; default: @@ -1408,7 +1408,7 @@ int kvm_xen_soft_reset(void) CPUState *cpu; int err; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); trace_kvm_xen_soft_reset(); @@ -1481,9 +1481,9 @@ static int schedop_shutdown(CPUState *cs, uint64_t arg) break; case SHUTDOWN_soft_reset: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_xen_soft_reset(); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; default: diff --git a/target/i386/meson.build b/target/i386/meson.build index 594a0a6abf7bdc028fa7e94390890c2547ead7db..9a1d070d130ca097f50294cf86ca168ea746c5fb 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -6,7 +6,7 @@ i386_ss.add(files( 'xsave_helper.c', 'cpu-dump.c', )) -i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c')) +i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'confidential-guest.c')) # x86 cpu type i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 6c46101ac1aecafb6e92c64f3c909e6392b64b10..f9d5e9a37ae4077ffa94d5b96273136737d6c345 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -25,7 +25,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -55,7 +55,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) nvmm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 7d752bc5e000d10465f0e5af3d1c05063275e510..cfdca91123c6615722b0ce5a529dc51aa2e71aee 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -399,7 +399,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) uint8_t tpr; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); tpr = cpu_get_apic_tpr(x86_cpu->apic_state); if (tpr != qcpu->tpr) { @@ -462,7 +462,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -485,9 +485,9 @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) tpr = exit->exitstate.cr8; if (qcpu->tpr != tpr) { qcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -648,7 +648,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && @@ -658,7 +658,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -721,7 +721,7 @@ nvmm_vcpu_loop(CPUState *cpu) return 0; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); /* @@ -806,16 +806,16 @@ nvmm_vcpu_loop(CPUState *cpu) error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]", exit->reason, exit->u.inv.hwcode); nvmm_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = -1; break; } } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qatomic_set(&cpu->exit_request, false); diff --git a/target/i386/sev.c b/target/i386/sev.c index b734d9dc561d4085bedee3d2b391e864a987a0bb..273d82c70e0dfa787b6cb2a11271c30cad7921a6 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -26,6 +26,7 @@ #include "qemu/error-report.h" #include "crypto/hash.h" #include "sysemu/kvm.h" +#include "kvm/kvm_i386.h" #include "sev.h" #include "csv.h" #include "sysemu/sysemu.h" @@ -38,7 +39,7 @@ #include "monitor/monitor.h" #include "monitor/hmp-target.h" #include "qapi/qapi-commands-misc-target.h" -#include "exec/confidential-guest-support.h" +#include "confidential-guest.h" #include "hw/i386/pc.h" #include "exec/address-spaces.h" @@ -62,7 +63,9 @@ struct shared_region { * -machine ...,memory-encryption=sev0 */ struct SevGuestState { - ConfidentialGuestSupport parent_obj; + X86ConfidentialGuest parent_obj; + + int kvm_type; /* configuration parameters */ char *sev_device; @@ -896,6 +899,7 @@ sev_launch_get_measure(Notifier *notifier, void *unused) if (ret) { exit(1); } + kvm_mark_guest_state_protected(); } /* query the measurement blob length */ @@ -1131,6 +1135,26 @@ sev_migration_state_notifier(Notifier *notifier, void *data) static Notifier sev_migration_state; +static int sev_kvm_type(X86ConfidentialGuest *cg) +{ + SevGuestState *sev = SEV_GUEST(cg); + int kvm_type; + + if (sev->kvm_type != -1) { + goto out; + } + + kvm_type = (sev->policy & SEV_POLICY_ES) ? KVM_X86_SEV_ES_VM : KVM_X86_SEV_VM; + if (kvm_is_vm_type_supported(kvm_type)) { + sev->kvm_type = kvm_type; + } else { + sev->kvm_type = KVM_X86_DEFAULT_VM; + } + +out: + return sev->kvm_type; +} + static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { SevGuestState *sev = SEV_GUEST(cgs); @@ -1213,9 +1237,9 @@ static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) __func__); goto err; } - cmd = KVM_SEV_ES_INIT; + cmd = KVM_SEV_ES_INIT; } else { - cmd = KVM_SEV_INIT; + cmd = KVM_SEV_INIT; } trace_kvm_sev_init(); @@ -1240,7 +1264,15 @@ static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) g_free(init_cmd_buf); } } else { + if (sev_kvm_type(X86_CONFIDENTIAL_GUEST(sev)) == KVM_X86_DEFAULT_VM) { + cmd = sev_es_enabled() ? KVM_SEV_ES_INIT : KVM_SEV_INIT; + ret = sev_ioctl(sev->sev_fd, cmd, NULL, &fw_error); + } else { + struct kvm_sev_init args = { 0 }; + + ret = sev_ioctl(sev->sev_fd, KVM_SEV_INIT2, &args, &fw_error); + } } if (ret) { @@ -2766,8 +2798,10 @@ static void sev_guest_class_init(ObjectClass *oc, void *data) { ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); + X86ConfidentialGuestClass *x86_klass = X86_CONFIDENTIAL_GUEST_CLASS(oc); klass->kvm_init = sev_kvm_init; + x86_klass->kvm_type = sev_kvm_type; object_class_property_add_str(oc, "sev-device", sev_guest_get_sev_device, @@ -2816,6 +2850,8 @@ sev_guest_instance_init(Object *obj) { SevGuestState *sev = SEV_GUEST(obj); + sev->kvm_type = -1; + sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE); sev->policy = DEFAULT_GUEST_POLICY; object_property_add_uint32_ptr(obj, "policy", &sev->policy, @@ -2831,7 +2867,7 @@ sev_guest_instance_init(Object *obj) /* sev guest info */ static const TypeInfo sev_guest_info = { - .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, + .parent = TYPE_X86_CONFIDENTIAL_GUEST, .name = TYPE_SEV_GUEST, .instance_size = sizeof(SevGuestState), .instance_finalize = sev_guest_finalize, diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c index 93506cdd94e0bc17bd1a1bbc6beaf75acc337149..e0305ba23450e977ebea05bfd305aa655fb60e24 100644 --- a/target/i386/tcg/sysemu/fpu_helper.c +++ b/target/i386/tcg/sysemu/fpu_helper.c @@ -32,9 +32,9 @@ void x86_register_ferr_irq(qemu_irq irq) void fpu_check_raise_ferr_irq(CPUX86State *env) { if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); qemu_irq_raise(ferr_irq); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } @@ -49,7 +49,7 @@ void cpu_set_ignne(void) { CPUX86State *env = &X86_CPU(first_cpu)->env; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); env->hflags2 |= HF2_IGNNE_MASK; /* diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c index f380b587892a3547843c2da33725f33e62b1e876..ea4d2c7e0ddbf3c958f0d5d298cb57c64abd7502 100644 --- a/target/i386/tcg/sysemu/misc_helper.c +++ b/target/i386/tcg/sysemu/misc_helper.c @@ -25,6 +25,7 @@ #include "exec/address-spaces.h" #include "exec/exec-all.h" #include "tcg/helper-tcg.h" +#include "hw/i386/apic.h" void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) { @@ -118,9 +119,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) break; case 8: if (!(env->hflags2 & HF2_VINTR_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); @@ -292,6 +293,19 @@ void helper_wrmsr(CPUX86State *env) env->msr_bndcfgs = val; cpu_sync_bndcs_hflags(env); break; + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + bql_lock(); + ret = apic_msr_write(index, val); + bql_unlock(); + if (ret < 0) { + goto error; + } + + break; + } default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + @@ -457,6 +471,19 @@ void helper_rdmsr(CPUX86State *env) val = cpu_x86_get_msr_core_thread_count(x86_cpu); break; } + case MSR_APIC_START ... MSR_APIC_END: { + int ret; + int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; + + bql_lock(); + ret = apic_msr_read(index, &val); + bql_unlock(); + if (ret < 0) { + raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); + } + + break; + } default: if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 67cad867207233dbd22ae38a6075f2895bae532a..e783a760a718824975802a1d2e7216228d176845 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -25,7 +25,7 @@ static void *whpx_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -55,7 +55,7 @@ static void *whpx_cpu_thread_fn(void *arg) whpx_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index d29ba916a0cc8b8d649bf24ae91f83be399a1f5b..a7262654acdee1f656393465327caa61d0f0de8c 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1324,7 +1324,7 @@ static int whpx_first_vcpu_starting(CPUState *cpu) struct whpx_state *whpx = &whpx_global; HRESULT hr; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!QTAILQ_EMPTY(&cpu->breakpoints) || (whpx->breakpoints.breakpoints && @@ -1442,7 +1442,7 @@ static int whpx_handle_halt(CPUState *cpu) CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { @@ -1450,7 +1450,7 @@ static int whpx_handle_halt(CPUState *cpu) cpu->halted = true; ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -1472,7 +1472,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) memset(&new_int, 0, sizeof(new_int)); memset(reg_values, 0, sizeof(reg_values)); - qemu_mutex_lock_iothread(); + bql_lock(); /* Inject NMI */ if (!vcpu->interruption_pending && @@ -1563,7 +1563,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) reg_count += 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); vcpu->ready_for_pic_interrupt = false; if (reg_count) { @@ -1590,9 +1590,9 @@ static void whpx_vcpu_post_run(CPUState *cpu) uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8; if (vcpu->tpr != tpr) { vcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } vcpu->interruption_pending = @@ -1652,7 +1652,7 @@ static int whpx_vcpu_run(CPUState *cpu) WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (whpx->running_cpus++ == 0) { /* Insert breakpoints into memory, update exception exit bitmap. */ @@ -1690,7 +1690,7 @@ static int whpx_vcpu_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (exclusive_step_mode != WHPX_STEP_NONE) { start_exclusive(); @@ -2028,9 +2028,9 @@ static int whpx_vcpu_run(CPUState *cpu) error_report("WHPX: Unexpected VP exit code %d", vcpu->exit_ctx.ExitReason); whpx_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } @@ -2055,7 +2055,7 @@ static int whpx_vcpu_run(CPUState *cpu) cpu_exec_end(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); current_cpu = cpu; if (--whpx->running_cpus == 0) { diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c index 6dfc09c9d1d44594e0870648baa1575e7bb0e6d9..bb6a98bfcb5268f8e4e025bfc3ceda5a576ed6a0 100644 --- a/target/loongarch/kvm/kvm.c +++ b/target/loongarch/kvm/kvm.c @@ -1000,11 +1000,6 @@ bool kvm_arch_stop_on_emulation_error(CPUState *cs) return true; } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return true; -} - void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) { diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c index 55341551a5c7751224f4789e50c3abb31b509c27..15f94caefabc7722263fa46e948e21de37b4203c 100644 --- a/target/loongarch/tcg/csr_helper.c +++ b/target/loongarch/tcg/csr_helper.c @@ -89,9 +89,9 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val) int64_t old_v = 0; if (val & 0x1) { - qemu_mutex_lock_iothread(); + bql_lock(); loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } return old_v; } diff --git a/target/mips/kvm.c b/target/mips/kvm.c index 6bf2a9b76ef0a476b540cbb1c038e316d1b53856..2383434ac8dfd50609d2774ab4d7ce8092c22fb8 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -138,7 +138,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) int r; struct kvm_mips_interrupt intr; - qemu_mutex_lock_iothread(); + bql_lock(); if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_io_interrupts_pending(cpu)) { @@ -151,7 +151,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) @@ -1282,11 +1282,6 @@ int kvm_arch_get_default_type(MachineState *machine) return -1; } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return true; -} - void kvm_arch_accel_class_init(ObjectClass *oc) { } diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c index d3495487431c0d769941437f3437fdd1d510663a..cc545aed9ca903075e2a42bcaf4f9a2162e3b05f 100644 --- a/target/mips/tcg/sysemu/cp0_helper.c +++ b/target/mips/tcg/sysemu/cp0_helper.c @@ -59,9 +59,9 @@ static inline void mips_vpe_wake(MIPSCPU *c) * because there might be other conditions that state that c should * be sleeping. */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); - qemu_mutex_unlock_iothread(); + bql_unlock(); } static inline void mips_vpe_sleep(MIPSCPU *cpu) diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 782a5751b750ce973a2b2cca0f5aa6b74754e3bb..77567afba47f773e4679a8d8fa452222fa116cb3 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -160,20 +160,20 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) break; case TO_SPR(9, 0): /* PICMR */ env->picmr = rb; - qemu_mutex_lock_iothread(); + bql_lock(); if (env->picsr & env->picmr) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case TO_SPR(9, 2): /* PICSR */ env->picsr &= ~rb; break; case TO_SPR(10, 0): /* TTMR */ { - qemu_mutex_lock_iothread(); + bql_lock(); if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) { switch (rb & TTMR_M) { case TIMER_NONE: @@ -198,15 +198,15 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; } cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); } break; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_set(cpu, rb); cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } #endif @@ -347,9 +347,9 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, return env->ttmr; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cpu_openrisc_count_get(cpu); } #endif diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 9b8fd69b85e924f7be4c3b05c5858d617f1c9368..c44d15468e5bb13eeaf7b09440245396b1a1d50b 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -3065,7 +3065,7 @@ void helper_msgsnd(target_ulong rb) return; } - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3074,7 +3074,7 @@ void helper_msgsnd(target_ulong rb) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Server Processor Control */ @@ -3102,7 +3102,7 @@ static void book3s_msgsnd_common(int pir, int irq) { CPUState *cs; - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3112,7 +3112,7 @@ static void book3s_msgsnd_common(int pir, int irq) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } void helper_book3s_msgsnd(target_ulong rb) @@ -3166,14 +3166,14 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); if (ttir == thread_id) { ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index c84a252f0a2900e2d28fe6d56fd103ca831b8137..cfd7ebce898ac095bcc9945d6e3e3832daa48f47 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -1656,7 +1656,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) CPUPPCState *env = &cpu->env; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); switch (run->exit_reason) { case KVM_EXIT_DCR: @@ -1715,7 +1715,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2971,11 +2971,6 @@ void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset) } } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return true; -} - void kvm_arch_accel_class_init(ObjectClass *oc) { } diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index a05bdf78c982e4e038f08ecfc2fe9c6c034cc124..a9d41d28020fd4aa6424b5c6ec0d95dcb7b2b04c 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -238,7 +238,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) return dpdes; } - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); CPUPPCState *cenv = &ccpu->env; @@ -248,7 +248,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) dpdes |= (0x1 << thread_id); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); return dpdes; } @@ -278,14 +278,14 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif /* defined(TARGET_PPC64) */ diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 08a6b47ee0825699a3e6c8890657e5f232901f4f..f618ed292271ff382e0e8fb826f65c3e22837512 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -173,9 +173,9 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); @@ -196,9 +196,9 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) POWERPC_EXCP_INVAL_INVAL, GETPC()); } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 46df7280400f3d0822ef43cd4237054aee7bdff3..e049650e6d2b6f44c5c599f5cb8dcf598801de27 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1268,11 +1268,6 @@ void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level) } } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return true; -} - static int aia_mode; static const char *kvm_aia_mode_str(uint64_t mode) diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 26a5b4e804bb4d82118cb3013ebbef45229a1c4f..91de2768b19c8b08c474eae32e5039a855813b94 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -1923,7 +1923,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) S390CPU *cpu = S390_CPU(cs); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_cpu_synchronize_state(cs); @@ -1947,7 +1947,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret == 0) { ret = EXCP_INTERRUPT; @@ -2624,11 +2624,6 @@ void kvm_s390_stop_interrupt(S390CPU *cpu) kvm_s390_vcpu_interrupt(cpu, &irq); } -bool kvm_arch_cpu_check_are_resettable(void) -{ - return true; -} - int kvm_s390_get_zpci_op(void) { return cap_zpci_op; diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 6aa7907438fd7d22058e83d36632f91fe67f569d..89b5268fd49d4467cc9d936953eeee458cbd84a8 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -101,9 +101,9 @@ uint64_t HELPER(stck)(CPUS390XState *env) /* SCLP service call */ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) { - qemu_mutex_lock_iothread(); + bql_lock(); int r = sclp_service_call(env_archcpu(env), r1, r2); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (r < 0) { tcg_s390_program_interrupt(env, -r, GETPC()); } @@ -117,9 +117,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) switch (num) { case 0x500: /* KVM hypercall */ - qemu_mutex_lock_iothread(); + bql_lock(); r = s390_virtio_hypercall(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case 0x44: /* yield */ @@ -127,9 +127,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) break; case 0x308: /* ipl */ - qemu_mutex_lock_iothread(); + bql_lock(); handle_diag_308(env, r1, r3, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); r = 0; break; case 0x288: @@ -185,7 +185,7 @@ static void update_ckc_timer(CPUS390XState *env) /* stop the timer and remove pending CKC IRQs */ timer_del(env->tod_timer); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ @@ -207,9 +207,9 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) { env->ckc = ckc; - qemu_mutex_lock_iothread(); + bql_lock(); update_ckc_timer(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) @@ -229,9 +229,9 @@ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) .low = tod_low, }; - qemu_mutex_lock_iothread(); + bql_lock(); tdc->set(td, &tod, &error_abort); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -421,9 +421,9 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, int cc; /* TODO: needed to inject interrupts - push further down */ - qemu_mutex_lock_iothread(); + bql_lock(); cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cc; } @@ -433,92 +433,92 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, void HELPER(xsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_xsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(csch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_csch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(hsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_hsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rchp)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rchp(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sal)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_sal(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) @@ -533,10 +533,10 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } - qemu_mutex_lock_iothread(); + bql_lock(); io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); if (!io) { - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -554,7 +554,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { /* writing failed, reinject and properly clean up */ s390_io_interrupt(io->id, io->nr, io->parm, io->word); - qemu_mutex_unlock_iothread(); + bql_unlock(); g_free(io); s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; @@ -570,24 +570,24 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) } g_free(io); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 1; } void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(chsc)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_chsc(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -726,27 +726,27 @@ void HELPER(clp)(CPUS390XState *env, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); clp_service_call(cpu, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcilg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -754,9 +754,9 @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) @@ -764,9 +764,9 @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) S390CPU *cpu = env_archcpu(env); int r; - qemu_mutex_lock_iothread(); + bql_lock(); r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* css_do_sic() may actually return a PGM_xxx value to inject */ if (r) { tcg_s390_program_interrupt(env, -r, GETPC()); @@ -777,9 +777,9 @@ void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); rpcit_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, @@ -787,9 +787,9 @@ void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -797,8 +797,8 @@ void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 156361358220d35e0921da32731a10aa506705f6..49a91492639260cebae7e5de9c9144bbaaac8d0b 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -70,7 +70,7 @@ void cpu_check_irqs(CPUSPARCState *env) CPUState *cs; /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (env->pil_in && (env->interrupt_index == 0 || (env->interrupt_index & ~15) == TT_EXTINT)) { diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index 1b4155f5f37d97e9f258a33c1159d1eee2b0e269..27df9dba89bb79006a8b35f9a94be0b9863b7428 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -69,7 +69,7 @@ void cpu_check_irqs(CPUSPARCState *env) (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { @@ -267,9 +267,9 @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value) env->softint = value; #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif return true; diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c index 16d1c70fe713ddb6eb56c753a4e55b2bd1df955c..b53fc9ce94074d41f3caefcd40e4e64a86b0d72e 100644 --- a/target/sparc/win_helper.c +++ b/target/sparc/win_helper.c @@ -179,9 +179,9 @@ void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { /* cpu_put_psr may trigger interrupts, hence BQL */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_put_psr(env, new_psr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -407,9 +407,9 @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -422,9 +422,9 @@ void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) env->psrpil = new_pil; if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -451,9 +451,9 @@ void helper_done(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -480,9 +480,9 @@ void helper_retry(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index 91354884f7ec8456bac49bf5979fb501cfca48ed..168419a505f586d8d7be2ffd5a8314a762377a6c 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -105,9 +105,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | (intlevel << PS_INTLEVEL_SHIFT); - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (env->pending_irq_level) { cpu_loop_exit(cpu); @@ -120,9 +120,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) void HELPER(check_interrupts)(CPUXtensaState *env) { - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(intset)(CPUXtensaState *env, uint32_t v) diff --git a/ui/cocoa.m b/ui/cocoa.m index cd069da6965b7aec43b0f3e542b7bcdc041c426f..5ebb535070530c2d375314264e01755a9967c900 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -117,29 +117,29 @@ static void cocoa_switch(DisplayChangeListener *dcl, typedef void (^CodeBlock)(void); typedef bool (^BoolCodeBlock)(void); -static void with_iothread_lock(CodeBlock block) +static void with_bql(CodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } -static bool bool_with_iothread_lock(BoolCodeBlock block) +static bool bool_with_bql(BoolCodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); bool val; if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } val = block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return val; } @@ -605,7 +605,7 @@ - (void) updateUIInfo return; } - with_iothread_lock(^{ + with_bql(^{ [self updateUIInfoLocked]; }); } @@ -790,7 +790,7 @@ - (void) handleMonitorInput:(NSEvent *)event - (bool) handleEvent:(NSEvent *)event { - return bool_with_iothread_lock(^{ + return bool_with_bql(^{ return [self handleEventLocked:event]; }); } @@ -1182,7 +1182,7 @@ - (QEMUScreen) gscreen {return screen;} */ - (void) raiseAllKeys { - with_iothread_lock(^{ + with_bql(^{ qkbd_state_lift_all_keys(kbd); }); } @@ -1282,7 +1282,7 @@ - (void)applicationWillTerminate:(NSNotification *)aNotification { COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); - with_iothread_lock(^{ + with_bql(^{ shutdown_action = SHUTDOWN_ACTION_POWEROFF; qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); }); @@ -1420,7 +1420,7 @@ - (void)displayConsole:(id)sender /* Pause the guest */ - (void)pauseQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_stop(NULL); }); [sender setEnabled: NO]; @@ -1431,7 +1431,7 @@ - (void)pauseQEMU:(id)sender /* Resume running the guest operating system */ - (void)resumeQEMU:(id) sender { - with_iothread_lock(^{ + with_bql(^{ qmp_cont(NULL); }); [sender setEnabled: NO]; @@ -1461,7 +1461,7 @@ - (void)removePause /* Restarts QEMU */ - (void)restartQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_reset(NULL); }); } @@ -1469,7 +1469,7 @@ - (void)restartQEMU:(id)sender /* Powers down QEMU */ - (void)powerDownQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_powerdown(NULL); }); } @@ -1488,7 +1488,7 @@ - (void)ejectDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, false, false, &err); }); @@ -1523,7 +1523,7 @@ - (void)changeDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_blockdev_change_medium([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, @@ -1605,7 +1605,7 @@ - (void)adjustSpeed:(id)sender // get the throttle percentage throttle_pct = [sender tag]; - with_iothread_lock(^{ + with_bql(^{ cpu_throttle_set(throttle_pct); }); COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%'); @@ -1819,7 +1819,7 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t return; } - with_iothread_lock(^{ + with_bql(^{ QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo); qemu_event_reset(&cbevent); qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT); @@ -1827,9 +1827,9 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t while (info == cbinfo && info->types[QEMU_CLIPBOARD_TYPE_TEXT].available && info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_wait(&cbevent); - qemu_mutex_lock_iothread(); + bql_lock(); } if (info == cbinfo) { @@ -1927,9 +1927,9 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, int status; COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); - qemu_mutex_lock_iothread(); + bql_lock(); status = qemu_default_main(); - qemu_mutex_unlock_iothread(); + bql_unlock(); COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); [cbowner release]; exit(status); @@ -1941,7 +1941,7 @@ static int cocoa_main(void) COCOA_DEBUG("Entered %s()\n", __func__); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_create(&thread, "qemu_main", call_qemu_main, NULL, QEMU_THREAD_DETACHED); diff --git a/ui/spice-core.c b/ui/spice-core.c index db21db2c9428225defe66f8904e4fb7dd90bac33..b6ee495a8fef479b855ce79e6fdb02e2cf686fae 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -222,7 +222,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) */ bool need_lock = !qemu_thread_is_self(&me); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT) { @@ -260,7 +260,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) } if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } qapi_free_SpiceServerInfo(server); diff --git a/util/async.c b/util/async.c index 8f90ddc3047a9f0567da47ab6f89b6190e1d0f9d..def720045b51dbddc096624089ca45aba574af3e 100644 --- a/util/async.c +++ b/util/async.c @@ -741,7 +741,7 @@ AioContext *qemu_get_current_aio_context(void) if (ctx) { return ctx; } - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { /* Possibly in a vCPU thread. */ return qemu_get_aio_context(); } diff --git a/util/main-loop.c b/util/main-loop.c index 797b640c4152d6ff22cb1b6a37a38d8a1c639bdf..bfbff4f246bc2137e8a73acbe052af47782a9457 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -302,13 +302,13 @@ static int os_host_main_loop_wait(int64_t timeout) glib_pollfds_fill(&timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); glib_pollfds_poll(); @@ -517,7 +517,7 @@ static int os_host_main_loop_wait(int64_t timeout) poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); @@ -525,7 +525,7 @@ static int os_host_main_loop_wait(int64_t timeout) replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; diff --git a/util/qsp.c b/util/qsp.c index 2fe3764906c53c53ec08572fc165c6f441241e19..6b783e2e7f8e495f041383716ebb161f5d9af0ed 100644 --- a/util/qsp.c +++ b/util/qsp.c @@ -124,7 +124,7 @@ static const char * const qsp_typenames[] = { [QSP_CONDVAR] = "condvar", }; -QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl; +QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl; QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; @@ -439,7 +439,7 @@ void qsp_enable(void) { qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); - qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); + qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock); qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); qatomic_set(&qemu_cond_wait_func, qsp_cond_wait); @@ -450,7 +450,7 @@ void qsp_disable(void) { qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); - qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); + qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); diff --git a/util/rcu.c b/util/rcu.c index e587bcc48314b81a5a79bc5c399e88a3cefec245..bb7f633b5c7147059b1fb5700745bace497146e0 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -283,24 +283,24 @@ static void *call_rcu_thread(void *opaque) qatomic_sub(&rcu_call_count, n); synchronize_rcu(); - qemu_mutex_lock_iothread(); + bql_lock(); while (n > 0) { node = try_dequeue(); while (!node) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_reset(&rcu_call_ready_event); node = try_dequeue(); if (!node) { qemu_event_wait(&rcu_call_ready_event); node = try_dequeue(); } - qemu_mutex_lock_iothread(); + bql_lock(); } n--; node->func(node); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } abort(); } @@ -337,13 +337,13 @@ static void drain_rcu_callback(struct rcu_head *node) void drain_call_rcu(void) { struct rcu_drain rcu_drain; - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); memset(&rcu_drain, 0, sizeof(struct rcu_drain)); qemu_event_init(&rcu_drain.drain_complete_event, false); if (locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } @@ -365,7 +365,7 @@ void drain_call_rcu(void) qatomic_dec(&in_drain_call_rcu); if (locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } }