Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Andrea Fioraldi 2021-09-29 10:14:19 +02:00
commit d824ae585c
502 changed files with 14960 additions and 4285 deletions

View File

@ -17,7 +17,6 @@ variables:
# setup by the scripts/ci/setup/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-18.04-s390x-all-linux-static:
allow_failure: true
needs: []
stage: build
tags:
@ -37,7 +36,6 @@ ubuntu-18.04-s390x-all-linux-static:
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-18.04-s390x-all:
allow_failure: true
needs: []
stage: build
tags:
@ -54,7 +52,6 @@ ubuntu-18.04-s390x-all:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-alldbg:
allow_failure: true
needs: []
stage: build
tags:
@ -62,7 +59,11 @@ ubuntu-18.04-s390x-alldbg:
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -72,7 +73,6 @@ ubuntu-18.04-s390x-alldbg:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-clang:
allow_failure: true
needs: []
stage: build
tags:
@ -81,8 +81,10 @@ ubuntu-18.04-s390x-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -91,7 +93,6 @@ ubuntu-18.04-s390x-clang:
- make --output-sync -j`nproc` check V=1
ubuntu-18.04-s390x-tci:
allow_failure: true
needs: []
stage: build
tags:
@ -99,7 +100,11 @@ ubuntu-18.04-s390x-tci:
- s390x
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -107,7 +112,6 @@ ubuntu-18.04-s390x-tci:
- make --output-sync -j`nproc`
ubuntu-18.04-s390x-notcg:
allow_failure: true
needs: []
stage: build
tags:
@ -116,8 +120,10 @@ ubuntu-18.04-s390x-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$S390X_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -129,7 +135,6 @@ ubuntu-18.04-s390x-notcg:
# setup by the scripts/ci/setup/qemu/build-environment.yml task
# "Install basic packages to build QEMU on Ubuntu 18.04/20.04"
ubuntu-20.04-aarch64-all-linux-static:
allow_failure: true
needs: []
stage: build
tags:
@ -149,7 +154,6 @@ ubuntu-20.04-aarch64-all-linux-static:
- make --output-sync -j`nproc` check-tcg V=1
ubuntu-20.04-aarch64-all:
allow_failure: true
needs: []
stage: build
tags:
@ -157,7 +161,11 @@ ubuntu-20.04-aarch64-all:
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -166,7 +174,6 @@ ubuntu-20.04-aarch64-all:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-alldbg:
allow_failure: true
needs: []
stage: build
tags:
@ -184,7 +191,6 @@ ubuntu-20.04-aarch64-alldbg:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-clang:
allow_failure: true
needs: []
stage: build
tags:
@ -193,8 +199,10 @@ ubuntu-20.04-aarch64-clang:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -203,7 +211,6 @@ ubuntu-20.04-aarch64-clang:
- make --output-sync -j`nproc` check V=1
ubuntu-20.04-aarch64-tci:
allow_failure: true
needs: []
stage: build
tags:
@ -211,7 +218,11 @@ ubuntu-20.04-aarch64-tci:
- aarch64
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build
@ -219,7 +230,6 @@ ubuntu-20.04-aarch64-tci:
- make --output-sync -j`nproc`
ubuntu-20.04-aarch64-notcg:
allow_failure: true
needs: []
stage: build
tags:
@ -228,8 +238,10 @@ ubuntu-20.04-aarch64-notcg:
rules:
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
when: manual
allow_failure: true
- if: "$AARCH64_RUNNER_AVAILABLE"
when: manual
allow_failure: true
script:
- mkdir build
- cd build

View File

@ -274,7 +274,6 @@ F: target/ppc/
F: hw/ppc/
F: include/hw/ppc/
F: disas/ppc.c
F: tests/acceptance/machine_ppc.py
RISC-V TCG CPUs
M: Palmer Dabbelt <palmer@dabbelt.com>
@ -433,6 +432,11 @@ F: accel/accel-*.c
F: accel/Makefile.objs
F: accel/stubs/Makefile.objs
Apple Silicon HVF CPUs
M: Alexander Graf <agraf@csgraf.de>
S: Maintained
F: target/arm/hvf/
X86 HVF CPUs
M: Cameron Esfahani <dirty@apple.com>
M: Roman Bolshakov <r.bolshakov@yadro.com>
@ -1265,6 +1269,7 @@ L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/mpc8544ds.c
F: hw/ppc/mpc8544_guts.c
F: tests/acceptance/ppc_mpc8544ds.py
New World (mac99)
M: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
@ -1335,6 +1340,7 @@ F: tests/qtest/spapr*
F: tests/qtest/libqos/*spapr*
F: tests/qtest/rtas*
F: tests/qtest/libqos/rtas*
F: tests/acceptance/ppc_pseries.py
PowerNV (Non-Virtualized)
M: Cédric Le Goater <clg@kaod.org>
@ -1356,6 +1362,7 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/virtex_ml507.c
F: tests/acceptance/ppc_virtex_ml507.py
sam460ex
M: BALATON Zoltan <balaton@eik.bme.hu>
@ -2968,6 +2975,7 @@ F: include/sysemu/replay.h
F: docs/replay.txt
F: stubs/replay.c
F: tests/acceptance/replay_kernel.py
F: tests/acceptance/replay_linux.py
F: tests/acceptance/reverse_debugging.py
F: qapi/replay.json
@ -3479,6 +3487,7 @@ W: https://trello.com/b/6Qi1pxVn/avocado-qemu
R: Cleber Rosa <crosa@redhat.com>
R: Philippe Mathieu-Daudé <philmd@redhat.com>
R: Wainer dos Santos Moschetta <wainersm@redhat.com>
R: Willian Rampazzo <willianr@redhat.com>
S: Odd Fixes
F: tests/acceptance/

View File

@ -60,6 +60,10 @@
HVFState *hvf_state;
#ifdef __aarch64__
#define HV_VM_DEFAULT NULL
#endif
/* Memory slots */
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
@ -239,12 +243,12 @@ static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
if (on) {
slot->flags |= HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ);
HV_MEMORY_READ | HV_MEMORY_EXEC);
/* stop tracking region*/
} else {
slot->flags &= ~HVF_SLOT_LOG;
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
HV_MEMORY_READ | HV_MEMORY_WRITE);
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
}
}
@ -324,7 +328,8 @@ static int hvf_accel_init(MachineState *ms)
hvf_state = s;
memory_listener_register(&hvf_memory_listener, &address_space_memory);
return 0;
return hvf_arch_init();
}
static void hvf_accel_class_init(ObjectClass *oc, void *data)
@ -365,17 +370,20 @@ static int hvf_init_vcpu(CPUState *cpu)
cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
/* init cpu signals */
sigset_t set;
struct sigaction sigact;
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
pthread_sigmask(SIG_BLOCK, NULL, &set);
sigdelset(&set, SIG_IPI);
pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__
r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
#else
r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
#endif
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
@ -451,6 +459,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
ops->create_vcpu_thread = hvf_start_vcpu_thread;
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;

View File

@ -2469,7 +2469,7 @@ static int kvm_init(MachineState *ms)
ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
if (ret) {
error_report("Enabling of KVM dirty ring failed: %s. "
"Suggested mininum value is 1024.", strerror(-ret));
"Suggested minimum value is 1024.", strerror(-ret));
goto err;
}

View File

@ -588,8 +588,9 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
static inline bool cpu_handle_halt(CPUState *cpu)
{
#ifndef CONFIG_USER_ONLY
if (cpu->halted) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
@ -597,13 +598,14 @@ static inline bool cpu_handle_halt(CPUState *cpu)
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
}
#endif
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
return true;
}
cpu->halted = 0;
}
#endif /* !CONFIG_USER_ONLY */
return false;
}
@ -663,8 +665,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
loop */
#if defined(TARGET_I386)
CPUClass *cc = CPU_GET_CLASS(cpu);
cc->tcg_ops->do_interrupt(cpu);
#endif
cc->tcg_ops->fake_user_interrupt(cpu);
#endif /* TARGET_I386 */
*ret = cpu->exception_index;
cpu->exception_index = -1;
return true;
@ -697,6 +699,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
return false;
}
#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
@ -710,12 +713,11 @@ static inline bool need_replay_interrupt(int interrupt_request)
return true;
#endif
}
#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
@ -737,6 +739,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
@ -765,12 +768,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
qemu_mutex_unlock_iothread();
return true;
}
#endif
#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
CPUClass *cc = CPU_GET_CLASS(cpu);
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
@ -789,6 +794,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as

View File

@ -60,8 +60,6 @@ void rr_kick_vcpu_thread(CPUState *unused)
static QEMUTimer *rr_kick_vcpu_timer;
static CPUState *rr_current_cpu;
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
static inline int64_t rr_next_kick_time(void)
{
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;

View File

@ -1594,31 +1594,8 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
invalidate_page_bitmap(p);
#if defined(CONFIG_USER_ONLY)
if (p->flags & PAGE_WRITE) {
target_ulong addr;
PageDesc *p2;
int prot;
/* force the host page as non writable (writes will have a
page fault + mprotect overhead) */
page_addr &= qemu_host_page_mask;
prot = 0;
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
addr += TARGET_PAGE_SIZE) {
p2 = page_find(addr >> TARGET_PAGE_BITS);
if (!p2) {
continue;
}
prot |= p2->flags;
p2->flags &= ~PAGE_WRITE;
}
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
if (DEBUG_TB_INVALIDATE_GATE) {
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
}
}
/* translator_loop() must have made all TB pages non-writable */
assert(!(p->flags & PAGE_WRITE));
#else
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
@ -2827,6 +2804,38 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return 0;
}
void page_protect(tb_page_addr_t page_addr)
{
target_ulong addr;
PageDesc *p;
int prot;
p = page_find(page_addr >> TARGET_PAGE_BITS);
if (p && (p->flags & PAGE_WRITE)) {
/*
* Force the host page as non writable (writes will have a page fault +
* mprotect overhead).
*/
page_addr &= qemu_host_page_mask;
prot = 0;
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
addr += TARGET_PAGE_SIZE) {
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
continue;
}
prot |= p->flags;
p->flags &= ~PAGE_WRITE;
}
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
if (DEBUG_TB_INVALIDATE_GATE) {
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
}
}
}
/* called from signal handler: invalidate the code and unprotect the
* page. Return 0 if the fault was not handled, 1 if it was handled,
* and 2 if it was handled but the caller must cause the TB to be

View File

@ -53,6 +53,15 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
}
static inline void translator_page_protect(DisasContextBase *dcbase,
target_ulong pc)
{
#ifdef CONFIG_USER_ONLY
dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
page_protect(pc);
#endif
}
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
CPUState *cpu, TranslationBlock *tb, int max_insns)
{
@ -67,6 +76,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
db->num_insns = 0;
db->max_insns = max_insns;
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
translator_page_protect(db, db->pc_next);
ops->init_disas_context(db, cpu);
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
@ -160,3 +170,32 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
}
#endif
}
static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
target_ulong pc, size_t len)
{
#ifdef CONFIG_USER_ONLY
target_ulong end = pc + len - 1;
if (end > dcbase->page_protect_end) {
translator_page_protect(dcbase, end);
}
#endif
}
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
abi_ptr pc, bool do_swap) \
{ \
translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
type ret = load_fn(env, pc); \
if (do_swap) { \
ret = swap_fn(ret); \
} \
plugin_insn_append(&ret, sizeof(ret)); \
return ret; \
}
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
#undef GEN_TRANSLATOR_LD

View File

@ -680,18 +680,26 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.psw.addr;
/* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
of the normal 2 arguments. The 3rd argument contains the "int_code"
from the hardware which does in fact contain the is_write value.
The rt signal handler, as far as I can tell, does not give this value
at all. Not that we could get to it from here even if it were. */
/* ??? This is not even close to complete, since it ignores all
of the read-modify-write instructions. */
/*
* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
* of the normal 2 arguments. The 4th argument contains the "Translation-
* Exception Identification for DAT Exceptions" from the hardware (aka
* "int_parm_long"), which does in fact contain the is_write value.
* The rt signal handler, as far as I can tell, does not give this value
* at all. Not that we could get to it from here even if it were.
* So fall back to parsing instructions. Treat read-modify-write ones as
* writes, which is not fully correct, but for tracking self-modifying code
* this is better than treating them as reads. Checking si_addr page flags
* might be a viable improvement, albeit a racy one.
*/
/* ??? This is not even close to complete. */
pinsn = (uint16_t *)pc;
switch (pinsn[0] >> 8) {
case 0x50: /* ST */
case 0x42: /* STC */
case 0x40: /* STH */
case 0xba: /* CS */
case 0xbb: /* CDS */
is_write = 1;
break;
case 0xc4: /* RIL format insns */
@ -702,6 +710,12 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
case 0xc8: /* SSF format insns */
switch (pinsn[0] & 0xf) {
case 0x2: /* CSST */
is_write = 1;
}
break;
case 0xe3: /* RXY format insns */
switch (pinsn[2] & 0xff) {
case 0x50: /* STY */
@ -715,7 +729,27 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write = 1;
}
break;
case 0xeb: /* RSY format insns */
switch (pinsn[2] & 0xff) {
case 0x14: /* CSY */
case 0x30: /* CSG */
case 0x31: /* CDSY */
case 0x3e: /* CDSG */
case 0xe4: /* LANG */
case 0xe6: /* LAOG */
case 0xe7: /* LAXG */
case 0xe8: /* LAAG */
case 0xea: /* LAALG */
case 0xf4: /* LAN */
case 0xf6: /* LAO */
case 0xf7: /* LAX */
case 0xfa: /* LAAL */
case 0xf8: /* LAA */
is_write = 1;
}
break;
}
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}

View File

@ -623,7 +623,7 @@ static TpmTypeOptions *tpm_emulator_get_tpm_options(TPMBackend *tb)
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
TpmTypeOptions *options = g_new0(TpmTypeOptions, 1);
options->type = TPM_TYPE_OPTIONS_KIND_EMULATOR;
options->type = TPM_TYPE_EMULATOR;
options->u.emulator.data = QAPI_CLONE(TPMEmulatorOptions, tpm_emu->options);
return options;

View File

@ -321,7 +321,7 @@ static TpmTypeOptions *tpm_passthrough_get_tpm_options(TPMBackend *tb)
{
TpmTypeOptions *options = g_new0(TpmTypeOptions, 1);
options->type = TPM_TYPE_OPTIONS_KIND_PASSTHROUGH;
options->type = TPM_TYPE_PASSTHROUGH;
options->u.passthrough.data = QAPI_CLONE(TPMPassthroughOptions,
TPM_PASSTHROUGH(tb)->options);

88
block.c
View File

@ -49,6 +49,8 @@
#include "qemu/timer.h"
#include "qemu/cutils.h"
#include "qemu/id.h"
#include "qemu/range.h"
#include "qemu/rcu.h"
#include "block/coroutines.h"
#ifdef CONFIG_BSD
@ -401,6 +403,9 @@ BlockDriverState *bdrv_new(void)
qemu_co_queue_init(&bs->flush_queue);
qemu_co_mutex_init(&bs->bsc_modify_lock);
bs->block_status_cache = g_new0(BdrvBlockStatusCache, 1);
for (i = 0; i < bdrv_drain_all_count; i++) {
bdrv_drained_begin(bs);
}
@ -4694,6 +4699,8 @@ static void bdrv_close(BlockDriverState *bs)
bs->explicit_options = NULL;
qobject_unref(bs->full_open_options);
bs->full_open_options = NULL;
g_free(bs->block_status_cache);
bs->block_status_cache = NULL;
bdrv_release_named_dirty_bitmaps(bs);
assert(QLIST_EMPTY(&bs->dirty_bitmaps));
@ -6319,6 +6326,7 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
{
BdrvChild *child, *parent;
int ret;
uint64_t cumulative_perms, cumulative_shared_perms;
if (!bs->drv) {
return -ENOMEDIUM;
@ -6349,6 +6357,13 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs)
}
}
bdrv_get_cumulative_perm(bs, &cumulative_perms,
&cumulative_shared_perms);
if (cumulative_perms & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
/* Our inactive parents still need write access. Inactivation failed. */
return -EPERM;
}
bs->open_flags |= BDRV_O_INACTIVE;
/*
@ -7684,3 +7699,76 @@ BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs)
{
return bdrv_skip_filters(bdrv_cow_bs(bdrv_skip_filters(bs)));
}
/**
* Check whether [offset, offset + bytes) overlaps with the cached
* block-status data region.
*
* If so, and @pnum is not NULL, set *pnum to `bsc.data_end - offset`,
* which is what bdrv_bsc_is_data()'s interface needs.
* Otherwise, *pnum is not touched.
*/
static bool bdrv_bsc_range_overlaps_locked(BlockDriverState *bs,
int64_t offset, int64_t bytes,
int64_t *pnum)
{
BdrvBlockStatusCache *bsc = qatomic_rcu_read(&bs->block_status_cache);
bool overlaps;
overlaps =
qatomic_read(&bsc->valid) &&
ranges_overlap(offset, bytes, bsc->data_start,
bsc->data_end - bsc->data_start);
if (overlaps && pnum) {
*pnum = bsc->data_end - offset;
}
return overlaps;
}
/**
* See block_int.h for this function's documentation.
*/
bool bdrv_bsc_is_data(BlockDriverState *bs, int64_t offset, int64_t *pnum)
{
RCU_READ_LOCK_GUARD();
return bdrv_bsc_range_overlaps_locked(bs, offset, 1, pnum);
}
/**
* See block_int.h for this function's documentation.
*/
void bdrv_bsc_invalidate_range(BlockDriverState *bs,
int64_t offset, int64_t bytes)
{
RCU_READ_LOCK_GUARD();
if (bdrv_bsc_range_overlaps_locked(bs, offset, bytes, NULL)) {
qatomic_set(&bs->block_status_cache->valid, false);
}
}
/**
* See block_int.h for this function's documentation.
*/
void bdrv_bsc_fill(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
BdrvBlockStatusCache *new_bsc = g_new(BdrvBlockStatusCache, 1);
BdrvBlockStatusCache *old_bsc;
*new_bsc = (BdrvBlockStatusCache) {
.valid = true,
.data_start = offset,
.data_end = offset + bytes,
};
QEMU_LOCK_GUARD(&bs->bsc_modify_lock);
old_bsc = qatomic_rcu_read(&bs->block_status_cache);
qatomic_rcu_set(&bs->block_status_cache, new_bsc);
if (old_bsc) {
g_free_rcu(old_bsc, rcu);
}
}

View File

@ -1705,7 +1705,7 @@ static int handle_aiocb_write_zeroes(void *opaque)
*/
warn_report_once("Your file system is misbehaving: "
"fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
"Please report this bug to your file sytem "
"Please report this bug to your file system "
"vendor.");
} else if (ret != -ENOTSUP) {
return ret;
@ -2744,7 +2744,8 @@ static int find_allocation(BlockDriverState *bs, off_t start,
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
* 'bytes' is the max value 'pnum' should be set to.
* 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
* well exceed it.
*/
static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
bool want_zero,
@ -2782,7 +2783,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
*pnum = MIN(bytes, hole - offset);
*pnum = hole - offset;
/*
* We are not allowed to return partial sectors, though, so
@ -2801,7 +2802,7 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
*pnum = MIN(bytes, data - offset);
*pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}
*map = offset;

View File

@ -1461,7 +1461,8 @@ exit:
* the specified offset) that are known to be in the same
* allocated/unallocated state.
*
* 'bytes' is the max value 'pnum' should be set to.
* 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
* well exceed it.
*
* (Based on raw_co_block_status() from file-posix.c.)
*/
@ -1477,6 +1478,8 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
off_t data = 0, hole = 0;
int ret = -EINVAL;
assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
if (!s->fd) {
return ret;
}
@ -1500,12 +1503,26 @@ static int coroutine_fn qemu_gluster_co_block_status(BlockDriverState *bs,
} else if (data == offset) {
/* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
*pnum = MIN(bytes, hole - offset);
*pnum = hole - offset;
/*
* We are not allowed to return partial sectors, though, so
* round up if necessary.
*/
if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
int64_t file_length = qemu_gluster_getlength(bs);
if (file_length > 0) {
/* Ignore errors, this is just a safeguard */
assert(hole == file_length);
}
*pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
}
ret = BDRV_BLOCK_DATA;
} else {
/* On a hole, compute bytes to the beginning of the next extent. */
assert(hole == offset);
*pnum = MIN(bytes, data - offset);
*pnum = data - offset;
ret = BDRV_BLOCK_ZERO;
}

View File

@ -1883,6 +1883,9 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
return -ENOTSUP;
}
/* Invalidate the cached block-status data range if this write overlaps */
bdrv_bsc_invalidate_range(bs, offset, bytes);
assert(alignment % bs->bl.request_alignment == 0);
head = offset % alignment;
tail = (offset + bytes) % alignment;
@ -2447,9 +2450,65 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
if (bs->drv->bdrv_co_block_status) {
ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
aligned_bytes, pnum, &local_map,
&local_file);
/*
* Use the block-status cache only for protocol nodes: Format
* drivers are generally quick to inquire the status, but protocol
* drivers often need to get information from outside of qemu, so
* we do not have control over the actual implementation. There
* have been cases where inquiring the status took an unreasonably
* long time, and we can do nothing in qemu to fix it.
* This is especially problematic for images with large data areas,
* because finding the few holes in them and giving them special
* treatment does not gain much performance. Therefore, we try to
* cache the last-identified data region.
*
* Second, limiting ourselves to protocol nodes allows us to assume
* the block status for data regions to be DATA | OFFSET_VALID, and
* that the host offset is the same as the guest offset.
*
* Note that it is possible that external writers zero parts of
* the cached regions without the cache being invalidated, and so
* we may report zeroes as data. This is not catastrophic,
* however, because reporting zeroes as data is fine.
*/
if (QLIST_EMPTY(&bs->children) &&
bdrv_bsc_is_data(bs, aligned_offset, pnum))
{
ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
local_file = bs;
local_map = aligned_offset;
} else {
ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
aligned_bytes, pnum, &local_map,
&local_file);
/*
* Note that checking QLIST_EMPTY(&bs->children) is also done when
* the cache is queried above. Technically, we do not need to check
* it here; the worst that can happen is that we fill the cache for
* non-protocol nodes, and then it is never used. However, filling
* the cache requires an RCU update, so double check here to avoid
* such an update if possible.
*/
if (ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
QLIST_EMPTY(&bs->children))
{
/*
* When a protocol driver reports BLOCK_OFFSET_VALID, the
* returned local_map value must be the same as the offset we
* have passed (aligned_offset), and local_bs must be the node
* itself.
* Assert this, because we follow this rule when reading from
* the cache (see the `local_file = bs` and
* `local_map = aligned_offset` assignments above), and the
* result the cache delivers must be the same as the driver
* would deliver.
*/
assert(local_file == bs);
assert(local_map == aligned_offset);
bdrv_bsc_fill(bs, aligned_offset, *pnum);
}
}
} else {
/* Default code for filters */
@ -3002,6 +3061,9 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
return 0;
}
/* Invalidate the cached block-status data range if this discard overlaps */
bdrv_bsc_invalidate_range(bs, offset, bytes);
/* Discard is advisory, but some devices track and coalesce
* unaligned requests, so we must pass everything down rather than
* round here. Still, most devices will just silently ignore

View File

@ -781,9 +781,6 @@ retry:
iscsi_allocmap_set_allocated(iscsilun, offset, *pnum);
}
if (*pnum > bytes) {
*pnum = bytes;
}
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
g_free(iTask.err_str);

View File

@ -160,18 +160,25 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
/*
* If the operation is already (indirectly) waiting for us, or
* will wait for us as soon as it wakes up, then just go on
* (instead of producing a deadlock in the former case).
*/
if (op->waiting_for_op) {
continue;
if (self) {
/*
* If the operation is already (indirectly) waiting for us,
* or will wait for us as soon as it wakes up, then just go
* on (instead of producing a deadlock in the former case).
*/
if (op->waiting_for_op) {
continue;
}
self->waiting_for_op = op;
}
self->waiting_for_op = op;
qemu_co_queue_wait(&op->waiting_requests, NULL);
self->waiting_for_op = NULL;
if (self) {
self->waiting_for_op = NULL;
}
break;
}
}

View File

@ -556,8 +556,7 @@ static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
* offset needs to be aligned to a cluster boundary.
*
* If the cluster is unallocated then *host_offset will be 0.
* If the cluster is compressed then *host_offset will contain the
* complete compressed cluster descriptor.
* If the cluster is compressed then *host_offset will contain the l2 entry.
*
* On entry, *bytes is the maximum number of contiguous bytes starting at
* offset that we are interested in.
@ -660,7 +659,7 @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
ret = -EIO;
goto fail;
}
*host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK;
*host_offset = l2_entry;
break;
case QCOW2_SUBCLUSTER_ZERO_PLAIN:
case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
@ -1400,29 +1399,47 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
if (end <= old_start || start >= old_end) {
/* No intersection */
continue;
}
if (old_alloc->keep_old_clusters &&
(end <= l2meta_cow_start(old_alloc) ||
start >= l2meta_cow_end(old_alloc)))
{
/*
* Clusters intersect but COW areas don't. And cluster itself is
* already allocated. So, there is no actual conflict.
*/
continue;
}
/* Conflict */
if (start < old_start) {
/* Stop at the start of a running allocation */
bytes = old_start - start;
} else {
if (start < old_start) {
/* Stop at the start of a running allocation */
bytes = old_start - start;
} else {
bytes = 0;
}
bytes = 0;
}
/* Stop if already an l2meta exists. After yielding, it wouldn't
* be valid any more, so we'd have to clean up the old L2Metas
* and deal with requests depending on them before starting to
* gather new ones. Not worth the trouble. */
if (bytes == 0 && *m) {
*cur_bytes = 0;
return 0;
}
/*
* Stop if an l2meta already exists. After yielding, it wouldn't
* be valid any more, so we'd have to clean up the old L2Metas
* and deal with requests depending on them before starting to
* gather new ones. Not worth the trouble.
*/
if (bytes == 0 && *m) {
*cur_bytes = 0;
return 0;
}
if (bytes == 0) {
/* Wait for the dependency to complete. We need to recheck
* the free/allocated clusters when we continue. */
qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
return -EAGAIN;
}
if (bytes == 0) {
/*
* Wait for the dependency to complete. We need to recheck
* the free/allocated clusters when we continue.
*/
qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
return -EAGAIN;
}
}
@ -2463,3 +2480,18 @@ fail:
g_free(l1_table);
return ret;
}
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
uint64_t *coffset, int *csize)
{
BDRVQcow2State *s = bs->opaque;
int nb_csectors;
assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED);
*coffset = l2_entry & s->cluster_offset_mask;
nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1;
*csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
(*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1));
}

View File

@ -1177,11 +1177,11 @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
switch (ctype) {
case QCOW2_CLUSTER_COMPRESSED:
{
int64_t offset = (l2_entry & s->cluster_offset_mask)
& QCOW2_COMPRESSED_SECTOR_MASK;
int size = QCOW2_COMPRESSED_SECTOR_SIZE *
(((l2_entry >> s->csize_shift) & s->csize_mask) + 1);
qcow2_free_clusters(bs, offset, size, type);
uint64_t coffset;
int csize;
qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
qcow2_free_clusters(bs, coffset, csize, type);
}
break;
case QCOW2_CLUSTER_NORMAL:
@ -1247,7 +1247,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
bool l1_allocated = false;
int64_t old_entry, old_l2_offset;
unsigned slice, slice_size2, n_slices;
int i, j, l1_modified = 0, nb_csectors;
int i, j, l1_modified = 0;
int ret;
assert(addend >= -1 && addend <= 1);
@ -1318,14 +1318,14 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
switch (qcow2_get_cluster_type(bs, entry)) {
case QCOW2_CLUSTER_COMPRESSED:
nb_csectors = ((entry >> s->csize_shift) &
s->csize_mask) + 1;
if (addend != 0) {
uint64_t coffset = (entry & s->cluster_offset_mask)
& QCOW2_COMPRESSED_SECTOR_MASK;
uint64_t coffset;
int csize;
qcow2_parse_compressed_l2_entry(bs, entry,
&coffset, &csize);
ret = update_refcount(
bs, coffset,
nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE,
bs, coffset, csize,
abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
if (ret < 0) {
@ -1587,6 +1587,66 @@ enum {
CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
};
/*
* Fix L2 entry by making it QCOW2_CLUSTER_ZERO_PLAIN (or making all its present
* subclusters QCOW2_SUBCLUSTER_ZERO_PLAIN).
*
* This function decrements res->corruptions on success, so the caller is
* responsible to increment res->corruptions prior to the call.
*
* On failure in-memory @l2_table may be modified.
*/
static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
uint64_t l2_offset,
uint64_t *l2_table, int l2_index, bool active,
bool *metadata_overlap)
{
BDRVQcow2State *s = bs->opaque;
int ret;
int idx = l2_index * (l2_entry_size(s) / sizeof(uint64_t));
uint64_t l2e_offset = l2_offset + (uint64_t)l2_index * l2_entry_size(s);
int ign = active ? QCOW2_OL_ACTIVE_L2 : QCOW2_OL_INACTIVE_L2;
if (has_subclusters(s)) {
uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, l2_index);
/* Allocated subclusters become zero */
l2_bitmap |= l2_bitmap << 32;
l2_bitmap &= QCOW_L2_BITMAP_ALL_ZEROES;
set_l2_bitmap(s, l2_table, l2_index, l2_bitmap);
set_l2_entry(s, l2_table, l2_index, 0);
} else {
set_l2_entry(s, l2_table, l2_index, QCOW_OFLAG_ZERO);
}
ret = qcow2_pre_write_overlap_check(bs, ign, l2e_offset, l2_entry_size(s),
false);
if (metadata_overlap) {
*metadata_overlap = ret < 0;
}
if (ret < 0) {
fprintf(stderr, "ERROR: Overlap check failed\n");
goto fail;
}
ret = bdrv_pwrite_sync(bs->file, l2e_offset, &l2_table[idx],
l2_entry_size(s));
if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 "
"table entry: %s\n", strerror(-ret));
goto fail;
}
res->corruptions--;
res->corruptions_fixed++;
return 0;
fail:
res->check_errors++;
return ret;
}
/*
* Increases the refcount in the given refcount table for the all clusters
* referenced in the L2 table. While doing so, performs some checks on L2
@ -1601,26 +1661,41 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l2_table, l2_entry;
uint64_t l2_entry, l2_bitmap;
uint64_t next_contiguous_offset = 0;
int i, l2_size, nb_csectors, ret;
int i, ret;
size_t l2_size_bytes = s->l2_size * l2_entry_size(s);
g_autofree uint64_t *l2_table = g_malloc(l2_size_bytes);
bool metadata_overlap;
/* Read L2 table from disk */
l2_size = s->l2_size * l2_entry_size(s);
l2_table = g_malloc(l2_size);
ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size_bytes);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
goto fail;
return ret;
}
/* Do the actual checks */
for(i = 0; i < s->l2_size; i++) {
l2_entry = get_l2_entry(s, l2_table, i);
for (i = 0; i < s->l2_size; i++) {
uint64_t coffset;
int csize;
QCow2ClusterType type;
switch (qcow2_get_cluster_type(bs, l2_entry)) {
l2_entry = get_l2_entry(s, l2_table, i);
l2_bitmap = get_l2_bitmap(s, l2_table, i);
type = qcow2_get_cluster_type(bs, l2_entry);
if (type != QCOW2_CLUSTER_COMPRESSED) {
/* Check reserved bits of Standard Cluster Descriptor */
if (l2_entry & L2E_STD_RESERVED_MASK) {
fprintf(stderr, "ERROR found l2 entry with reserved bits set: "
"%" PRIx64 "\n", l2_entry);
res->corruptions++;
}
}
switch (type) {
case QCOW2_CLUSTER_COMPRESSED:
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
if (l2_entry & QCOW_OFLAG_COPIED) {
@ -1638,23 +1713,28 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
break;
}
if (l2_bitmap) {
fprintf(stderr, "ERROR compressed cluster %d with non-zero "
"subcluster allocation bitmap, entry=0x%" PRIx64 "\n",
i, l2_entry);
res->corruptions++;
break;
}
/* Mark cluster as used */
nb_csectors = ((l2_entry >> s->csize_shift) &
s->csize_mask) + 1;
l2_entry &= s->cluster_offset_mask;
qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
ret = qcow2_inc_refcounts_imrt(
bs, res, refcount_table, refcount_table_size,
l2_entry & QCOW2_COMPRESSED_SECTOR_MASK,
nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE);
bs, res, refcount_table, refcount_table_size, coffset, csize);
if (ret < 0) {
goto fail;
return ret;
}
if (flags & CHECK_FRAG_INFO) {
res->bfi.allocated_clusters++;
res->bfi.compressed_clusters++;
/* Compressed clusters are fragmented by nature. Since they
/*
* Compressed clusters are fragmented by nature. Since they
* take up sub-sector space but we only have sector granularity
* I/O we need to re-read the same sectors even for adjacent
* compressed clusters.
@ -1668,13 +1748,19 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
{
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
if ((l2_bitmap >> 32) & l2_bitmap) {
res->corruptions++;
fprintf(stderr, "ERROR offset=%" PRIx64 ": Allocated "
"cluster has corrupted subcluster allocation bitmap\n",
offset);
}
/* Correct offsets are cluster aligned */
if (offset_into_cluster(s, offset)) {
bool contains_data;
res->corruptions++;
if (has_subclusters(s)) {
uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, i);
contains_data = (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC);
} else {
contains_data = !(l2_entry & QCOW_OFLAG_ZERO);
@ -1687,40 +1773,30 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
offset);
if (fix & BDRV_FIX_ERRORS) {
int idx = i * (l2_entry_size(s) / sizeof(uint64_t));
uint64_t l2e_offset =
l2_offset + (uint64_t)i * l2_entry_size(s);
int ign = active ? QCOW2_OL_ACTIVE_L2 :
QCOW2_OL_INACTIVE_L2;
l2_entry = has_subclusters(s) ? 0 : QCOW_OFLAG_ZERO;
set_l2_entry(s, l2_table, i, l2_entry);
ret = qcow2_pre_write_overlap_check(bs, ign,
l2e_offset, l2_entry_size(s), false);
if (ret < 0) {
fprintf(stderr, "ERROR: Overlap check failed\n");
res->check_errors++;
/* Something is seriously wrong, so abort checking
* this L2 table */
goto fail;
ret = fix_l2_entry_by_zero(bs, res, l2_offset,
l2_table, i, active,
&metadata_overlap);
if (metadata_overlap) {
/*
* Something is seriously wrong, so abort checking
* this L2 table.
*/
return ret;
}
ret = bdrv_pwrite_sync(bs->file, l2e_offset,
&l2_table[idx],
l2_entry_size(s));
if (ret < 0) {
fprintf(stderr, "ERROR: Failed to overwrite L2 "
"table entry: %s\n", strerror(-ret));
res->check_errors++;
/* Do not abort, continue checking the rest of this
* L2 table's entries */
} else {
res->corruptions--;
res->corruptions_fixed++;
/* Skip marking the cluster as used
* (it is unused now) */
if (ret == 0) {
/*
* Skip marking the cluster as used
* (it is unused now).
*/
continue;
}
/*
* Failed to fix.
* Do not abort, continue checking the rest of this
* L2 table's entries.
*/
}
} else {
fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is "
@ -1743,14 +1819,23 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
refcount_table_size,
offset, s->cluster_size);
if (ret < 0) {
goto fail;
return ret;
}
}
break;
}
case QCOW2_CLUSTER_ZERO_PLAIN:
/* Impossible when image has subclusters */
assert(!l2_bitmap);
break;
case QCOW2_CLUSTER_UNALLOCATED:
if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) {
res->corruptions++;
fprintf(stderr, "ERROR: Unallocated "
"cluster has non-zero subcluster allocation map\n");
}
break;
default:
@ -1758,12 +1843,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
}
}
g_free(l2_table);
return 0;
fail:
g_free(l2_table);
return ret;
}
/*
@ -1782,71 +1862,79 @@ static int check_refcounts_l1(BlockDriverState *bs,
int flags, BdrvCheckMode fix, bool active)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l1_table = NULL, l2_offset, l1_size2;
size_t l1_size_bytes = l1_size * L1E_SIZE;
g_autofree uint64_t *l1_table = NULL;
uint64_t l2_offset;
int i, ret;
l1_size2 = l1_size * L1E_SIZE;
if (!l1_size) {
return 0;
}
/* Mark L1 table as used */
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
l1_table_offset, l1_size2);
l1_table_offset, l1_size_bytes);
if (ret < 0) {
goto fail;
return ret;
}
l1_table = g_try_malloc(l1_size_bytes);
if (l1_table == NULL) {
res->check_errors++;
return -ENOMEM;
}
/* Read L1 table entries from disk */
if (l1_size2 > 0) {
l1_table = g_try_malloc(l1_size2);
if (l1_table == NULL) {
ret = -ENOMEM;
res->check_errors++;
goto fail;
}
ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
goto fail;
}
for(i = 0;i < l1_size; i++)
be64_to_cpus(&l1_table[i]);
ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size_bytes);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
return ret;
}
for (i = 0; i < l1_size; i++) {
be64_to_cpus(&l1_table[i]);
}
/* Do the actual checks */
for(i = 0; i < l1_size; i++) {
l2_offset = l1_table[i];
if (l2_offset) {
/* Mark L2 table as used */
l2_offset &= L1E_OFFSET_MASK;
ret = qcow2_inc_refcounts_imrt(bs, res,
refcount_table, refcount_table_size,
l2_offset, s->cluster_size);
if (ret < 0) {
goto fail;
}
for (i = 0; i < l1_size; i++) {
if (!l1_table[i]) {
continue;
}
/* L2 tables are cluster aligned */
if (offset_into_cluster(s, l2_offset)) {
fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
"cluster aligned; L1 entry corrupted\n", l2_offset);
res->corruptions++;
}
if (l1_table[i] & L1E_RESERVED_MASK) {
fprintf(stderr, "ERROR found L1 entry with reserved bits set: "
"%" PRIx64 "\n", l1_table[i]);
res->corruptions++;
}
/* Process and check L2 entries */
ret = check_refcounts_l2(bs, res, refcount_table,
refcount_table_size, l2_offset, flags,
fix, active);
if (ret < 0) {
goto fail;
}
l2_offset = l1_table[i] & L1E_OFFSET_MASK;
/* Mark L2 table as used */
ret = qcow2_inc_refcounts_imrt(bs, res,
refcount_table, refcount_table_size,
l2_offset, s->cluster_size);
if (ret < 0) {
return ret;
}
/* L2 tables are cluster aligned */
if (offset_into_cluster(s, l2_offset)) {
fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
"cluster aligned; L1 entry corrupted\n", l2_offset);
res->corruptions++;
}
/* Process and check L2 entries */
ret = check_refcounts_l2(bs, res, refcount_table,
refcount_table_size, l2_offset, flags,
fix, active);
if (ret < 0) {
return ret;
}
}
g_free(l1_table);
return 0;
fail:
g_free(l1_table);
return ret;
return 0;
}
/*
@ -2001,9 +2089,17 @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
for(i = 0; i < s->refcount_table_size; i++) {
uint64_t offset, cluster;
offset = s->refcount_table[i];
offset = s->refcount_table[i] & REFT_OFFSET_MASK;
cluster = offset >> s->cluster_bits;
if (s->refcount_table[i] & REFT_RESERVED_MASK) {
fprintf(stderr, "ERROR refcount table entry %" PRId64 " has "
"reserved bits set\n", i);
res->corruptions++;
*rebuild = true;
continue;
}
/* Refcount blocks are cluster aligned */
if (offset_into_cluster(s, offset)) {
fprintf(stderr, "ERROR refcount block %" PRId64 " is not "

View File

@ -74,7 +74,7 @@ typedef struct {
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
uint64_t cluster_descriptor,
uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
@ -2205,7 +2205,7 @@ typedef struct Qcow2AioTask {
BlockDriverState *bs;
QCow2SubclusterType subcluster_type; /* only for read */
uint64_t host_offset; /* or full descriptor in compressed clusters */
uint64_t host_offset; /* or l2_entry for compressed read */
uint64_t offset;
uint64_t bytes;
QEMUIOVector *qiov;
@ -4693,22 +4693,19 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
static int coroutine_fn
qcow2_co_preadv_compressed(BlockDriverState *bs,
uint64_t cluster_descriptor,
uint64_t l2_entry,
uint64_t offset,
uint64_t bytes,
QEMUIOVector *qiov,
size_t qiov_offset)
{
BDRVQcow2State *s = bs->opaque;
int ret = 0, csize, nb_csectors;
int ret = 0, csize;
uint64_t coffset;
uint8_t *buf, *out_buf;
int offset_in_cluster = offset_into_cluster(s, offset);
coffset = cluster_descriptor & s->cluster_offset_mask;
nb_csectors = ((cluster_descriptor >> s->csize_shift) & s->csize_mask) + 1;
csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
(coffset & ~QCOW2_COMPRESSED_SECTOR_MASK);
qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
buf = g_try_malloc(csize);
if (!buf) {

View File

@ -110,7 +110,6 @@
/* Defined in the qcow2 spec (compressed cluster descriptor) */
#define QCOW2_COMPRESSED_SECTOR_SIZE 512U
#define QCOW2_COMPRESSED_SECTOR_MASK (~(QCOW2_COMPRESSED_SECTOR_SIZE - 1ULL))
/* Must be at least 2 to cover COW */
#define MIN_L2_CACHE_SIZE 2 /* cache entries */
@ -587,10 +586,12 @@ typedef enum QCow2MetadataOverlap {
(QCOW2_OL_CACHED | QCOW2_OL_INACTIVE_L2)
#define L1E_OFFSET_MASK 0x00fffffffffffe00ULL
#define L1E_RESERVED_MASK 0x7f000000000001ffULL
#define L2E_OFFSET_MASK 0x00fffffffffffe00ULL
#define L2E_COMPRESSED_OFFSET_SIZE_MASK 0x3fffffffffffffffULL
#define L2E_STD_RESERVED_MASK 0x3f000000000001feULL
#define REFT_OFFSET_MASK 0xfffffffffffffe00ULL
#define REFT_RESERVED_MASK 0x1ffULL
#define INV_OFFSET (-1ULL)
@ -914,6 +915,8 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size,
uint64_t *host_offset);
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
uint64_t *coffset, int *csize);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);

View File

@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
int cpu_get_pic_interrupt(CPUX86State *env)
{
return -1;
}
void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit,
int flags)
{

View File

@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
return cpu_get_host_ticks();
}
int cpu_get_pic_interrupt(CPUX86State *env)
{
return -1;
}
void bsd_x86_64_write_dt(void *ptr, unsigned long addr,
unsigned long limit, int flags)
{

View File

@ -386,10 +386,9 @@ void suspend_mux_open(void)
static int chardev_options_parsed_cb(Object *child, void *opaque)
{
Chardev *chr = (Chardev *)child;
ChardevClass *class = CHARDEV_GET_CLASS(chr);
if (!chr->be_open && class->chr_options_parsed) {
class->chr_options_parsed(chr);
if (!chr->be_open && CHARDEV_IS_MUX(chr)) {
open_muxes(chr);
}
return 0;
@ -412,7 +411,6 @@ static void char_mux_class_init(ObjectClass *oc, void *data)
cc->chr_accept_input = mux_chr_accept_input;
cc->chr_add_watch = mux_chr_add_watch;
cc->chr_be_event = mux_chr_be_event;
cc->chr_options_parsed = open_muxes;
cc->chr_update_read_handler = mux_chr_update_read_handlers;
}

View File

@ -1520,7 +1520,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
addr = g_new0(SocketAddressLegacy, 1);
if (path) {
UnixSocketAddress *q_unix;
addr->type = SOCKET_ADDRESS_LEGACY_KIND_UNIX;
addr->type = SOCKET_ADDRESS_TYPE_UNIX;
q_unix = addr->u.q_unix.data = g_new0(UnixSocketAddress, 1);
q_unix->path = g_strdup(path);
#ifdef CONFIG_LINUX
@ -1530,7 +1530,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
q_unix->abstract = abstract;
#endif
} else if (host) {
addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET;
addr->type = SOCKET_ADDRESS_TYPE_INET;
addr->u.inet.data = g_new(InetSocketAddress, 1);
*addr->u.inet.data = (InetSocketAddress) {
.host = g_strdup(host),
@ -1543,7 +1543,7 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
.ipv6 = qemu_opt_get_bool(opts, "ipv6", 0),
};
} else if (fd) {
addr->type = SOCKET_ADDRESS_LEGACY_KIND_FD;
addr->type = SOCKET_ADDRESS_TYPE_FD;
addr->u.fd.data = g_new(String, 1);
addr->u.fd.data->str = g_strdup(fd);
} else {

View File

@ -165,7 +165,7 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend,
qemu_chr_parse_common(opts, qapi_ChardevUdp_base(udp));
addr = g_new0(SocketAddressLegacy, 1);
addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET;
addr->type = SOCKET_ADDRESS_TYPE_INET;
addr->u.inet.data = g_new(InetSocketAddress, 1);
*addr->u.inet.data = (InetSocketAddress) {
.host = g_strdup(host),
@ -180,7 +180,7 @@ static void qemu_chr_parse_udp(QemuOpts *opts, ChardevBackend *backend,
if (has_local) {
udp->has_local = true;
addr = g_new0(SocketAddressLegacy, 1);
addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET;
addr->type = SOCKET_ADDRESS_TYPE_INET;
addr->u.inet.data = g_new(InetSocketAddress, 1);
*addr->u.inet.data = (InetSocketAddress) {
.host = g_strdup(localaddr),

View File

@ -241,18 +241,15 @@ static void qemu_char_open(Chardev *chr, ChardevBackend *backend,
ChardevCommon *common = backend ? backend->u.null.data : NULL;
if (common && common->has_logfile) {
int flags = O_WRONLY | O_CREAT;
int flags = O_WRONLY;
if (common->has_logappend &&
common->logappend) {
flags |= O_APPEND;
} else {
flags |= O_TRUNC;
}
chr->logfd = qemu_open_old(common->logfile, flags, 0666);
chr->logfd = qemu_create(common->logfile, flags, 0666, errp);
if (chr->logfd < 0) {
error_setg_errno(errp, errno,
"Unable to open logfile %s",
common->logfile);
return;
}
}

4
configure vendored
View File

@ -5071,7 +5071,9 @@ for bios_file in \
$source_path/pc-bios/openbios-* \
$source_path/pc-bios/u-boot.* \
$source_path/pc-bios/edk2-*.fd.bz2 \
$source_path/pc-bios/palcode-*
$source_path/pc-bios/palcode-* \
$source_path/pc-bios/qemu_vga.ndrv
do
LINKS="$LINKS pc-bios/$(basename $bios_file)"
done

View File

@ -25,21 +25,19 @@ int download_url(const char *name, const char *url)
goto out_curl;
}
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, file);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0);
if (curl_easy_perform(curl) != CURLE_OK) {
err = 1;
fclose(file);
if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK
|| curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK
|| curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK
|| curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK
|| curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK
|| curl_easy_perform(curl) != CURLE_OK) {
unlink(name);
goto out_curl;
fclose(file);
err = 1;
} else {
err = fclose(file);
}
err = fclose(file);
out_curl:
curl_easy_cleanup(curl);

View File

@ -215,6 +215,10 @@ out_symbols:
static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr)
{
if (hdr->block_size == 0) {
return 1;
}
memset(r->file_used, 0, sizeof(r->file_used));
r->ds.header = hdr;
r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr +

View File

@ -29,6 +29,39 @@ The `Repology`_ site is a useful resource to identify
currently shipped versions of software in various operating systems,
though it does not cover all distros listed below.
Supported host architectures
----------------------------
Those hosts are officially supported, with various accelerators:
.. list-table::
:header-rows: 1
* - CPU Architecture
- Accelerators
* - Arm
- kvm (64 bit only), tcg, xen
* - MIPS
- kvm, tcg
* - PPC
- kvm, tcg
* - RISC-V
- tcg
* - s390x
- kvm, tcg
* - SPARC
- tcg
* - x86
- hax, hvf (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen
Other host architectures are not supported. It is possible to build QEMU on an
unsupported host architecture using the configure ``--enable-tcg-interpreter``
option to enable the experimental TCI support, but note that this is very slow
and is not recommended.
Non-supported architectures may be removed in the future following the
:ref:`deprecation process<Deprecated features>`.
Linux OS, macOS, FreeBSD, NetBSD, OpenBSD
-----------------------------------------

View File

@ -1,3 +1,5 @@
.. _Deprecated features:
Deprecated features
===================

View File

@ -1,5 +1,6 @@
----------
About QEMU
==========
----------
QEMU is a generic and open source machine emulator and virtualizer.

View File

@ -55,6 +55,6 @@ Sources
-------
This document is based on the `Fedora Code of Conduct
<https://fedoraproject.org/code-of-conduct>`__ and the
`Contributor Covenant version 1.3.0
<http://web.archive.org/web/20210429132536/https://docs.fedoraproject.org/en-US/project/code-of-conduct/>`__
(as of April 2021) and the `Contributor Covenant version 1.3.0
<https://www.contributor-covenant.org/version/1/3/0/code-of-conduct/>`__.

View File

@ -1,5 +1,6 @@
---------------------
Developer Information
=====================
---------------------
This section of the manual documents various parts of the internals of QEMU.
You only need to read it if you are interested in reading or

View File

@ -319,13 +319,9 @@ Union types
Syntax::
UNION = { 'union': STRING,
'data': BRANCHES,
'*if': COND,
'*features': FEATURES }
| { 'union': STRING,
'data': BRANCHES,
'base': ( MEMBERS | STRING ),
'discriminator': STRING,
'data': BRANCHES,
'*if': COND,
'*features': FEATURES }
BRANCHES = { BRANCH, ... }
@ -334,63 +330,30 @@ Syntax::
Member 'union' names the union type.
There are two flavors of union types: simple (no discriminator or
base), and flat (both discriminator and base).
Each BRANCH of the 'data' object defines a branch of the union. A
union must have at least one branch.
The BRANCH's STRING name is the branch name.
The BRANCH's value defines the branch's properties, in particular its
type. The form TYPE-REF_ is shorthand for :code:`{ 'type': TYPE-REF }`.
A simple union type defines a mapping from automatic discriminator
values to data types like in this example::
{ 'struct': 'BlockdevOptionsFile', 'data': { 'filename': 'str' } }
{ 'struct': 'BlockdevOptionsQcow2',
'data': { 'backing': 'str', '*lazy-refcounts': 'bool' } }
{ 'union': 'BlockdevOptionsSimple',
'data': { 'file': 'BlockdevOptionsFile',
'qcow2': 'BlockdevOptionsQcow2' } }
In the Client JSON Protocol, a simple union is represented by an
object that contains the 'type' member as a discriminator, and a
'data' member that is of the specified data type corresponding to the
discriminator value, as in these examples::
{ "type": "file", "data": { "filename": "/some/place/my-image" } }
{ "type": "qcow2", "data": { "backing": "/some/place/my-image",
"lazy-refcounts": true } }
The generated C code uses a struct containing a union. Additionally,
an implicit C enum 'NameKind' is created, corresponding to the union
'Name', for accessing the various branches of the union. The value
for each branch can be of any type.
Flat unions permit arbitrary common members that occur in all variants
of the union, not just a discriminator. Their discriminators need not
be named 'type'. They also avoid nesting on the wire.
The 'base' member defines the common members. If it is a MEMBERS_
object, it defines common members just like a struct type's 'data'
member defines struct type members. If it is a STRING, it names a
struct type whose members are the common members.
All flat union branches must be `Struct types`_.
Member 'discriminator' must name a non-optional enum-typed member of
the base struct. That member's value selects a branch by its name.
If no such branch exists, an empty branch is assumed.
In the Client JSON Protocol, a flat union is represented by an object
with the common members (from the base type) and the selected branch's
members. The two sets of member names must be disjoint. Member
'discriminator' must name a non-optional enum-typed member of the base
struct.
Each BRANCH of the 'data' object defines a branch of the union. A
union must have at least one branch.
The following example enhances the above simple union example by
adding an optional common member 'read-only', renaming the
discriminator to something more applicable than the simple union's
default of 'type', and reducing the number of ``{}`` required on the wire::
The BRANCH's STRING name is the branch name. It must be a value of
the discriminator enum type.
The BRANCH's value defines the branch's properties, in particular its
type. The type must a struct type. The form TYPE-REF_ is shorthand
for :code:`{ 'type': TYPE-REF }`.
In the Client JSON Protocol, a union is represented by an object with
the common members (from the base type) and the selected branch's
members. The two sets of member names must be disjoint.
Example::
{ 'enum': 'BlockdevDriver', 'data': [ 'file', 'qcow2' ] }
{ 'union': 'BlockdevOptions',
@ -406,30 +369,11 @@ Resulting in these JSON objects::
{ "driver": "qcow2", "read-only": false,
"backing": "/some/place/my-image", "lazy-refcounts": true }
Notice that in a flat union, the discriminator name is controlled by
the user, but because it must map to a base member with enum type, the
code generator ensures that branches match the existing values of the
enum. The order of branches need not match the order of the enum
values. The branches need not cover all possible enum values.
Omitted enum values are still valid branches that add no additional
members to the data type. In the resulting generated C data types, a
flat union is represented as a struct with the base members in QAPI
schema order, and then a union of structures for each branch of the
struct.
A simple union can always be re-written as a flat union where the base
class has a single member named 'type', and where each branch of the
union has a struct with a single member named 'data'. That is, ::
{ 'union': 'Simple', 'data': { 'one': 'str', 'two': 'int' } }
is identical on the wire to::
{ 'enum': 'Enum', 'data': ['one', 'two'] }
{ 'struct': 'Branch1', 'data': { 'data': 'str' } }
{ 'struct': 'Branch2', 'data': { 'data': 'int' } }
{ 'union': 'Flat', 'base': { 'type': 'Enum' }, 'discriminator': 'type',
'data': { 'one': 'Branch1', 'two': 'Branch2' } }
The order of branches need not match the order of the enum values.
The branches need not cover all possible enum values. In the
resulting generated C data types, a union is represented as a struct
with the base members in QAPI schema order, and then a union of
structures for each branch of the struct.
The optional 'if' member specifies a conditional. See `Configuring
the schema`_ below for more on this.
@ -859,9 +803,9 @@ longhand form of MEMBER.
Example: a struct type with unconditional member 'foo' and conditional
member 'bar' ::
{ 'struct': 'IfStruct', 'data':
{ 'foo': 'int',
'bar': { 'type': 'int', 'if': 'IFCOND'} } }
{ 'struct': 'IfStruct',
'data': { 'foo': 'int',
'bar': { 'type': 'int', 'if': 'IFCOND'} } }
A union's discriminator may not be conditional.
@ -871,9 +815,9 @@ the longhand form of ENUM-VALUE_.
Example: an enum type with unconditional value 'foo' and conditional
value 'bar' ::
{ 'enum': 'IfEnum', 'data':
[ 'foo',
{ 'name' : 'bar', 'if': 'IFCOND' } ] }
{ 'enum': 'IfEnum',
'data': [ 'foo',
{ 'name' : 'bar', 'if': 'IFCOND' } ] }
Likewise, features can be conditional. This requires the longhand
form of FEATURE_.
@ -1246,7 +1190,7 @@ that provides the variant members for this type tag value). The
"variants" array is in no particular order, and is not guaranteed to
list cases in the same order as the corresponding "tag" enum type.
Example: the SchemaInfo for flat union BlockdevOptions from section
Example: the SchemaInfo for union BlockdevOptions from section
`Union types`_ ::
{ "name": "BlockdevOptions", "meta-type": "object",
@ -1261,27 +1205,6 @@ Example: the SchemaInfo for flat union BlockdevOptions from section
Note that base types are "flattened": its members are included in the
"members" array.
A simple union implicitly defines an enumeration type for its implicit
discriminator (called "type" on the wire, see section `Union types`_).
A simple union implicitly defines an object type for each of its
variants.
Example: the SchemaInfo for simple union BlockdevOptionsSimple from section
`Union types`_ ::
{ "name": "BlockdevOptionsSimple", "meta-type": "object",
"members": [
{ "name": "type", "type": "BlockdevOptionsSimpleKind" } ],
"tag": "type",
"variants": [
{ "case": "file", "type": "q_obj-BlockdevOptionsFile-wrapper" },
{ "case": "qcow2", "type": "q_obj-BlockdevOptionsQcow2-wrapper" } ] }
Enumeration type "BlockdevOptionsSimpleKind" and the object types
"q_obj-BlockdevOptionsFile-wrapper", "q_obj-BlockdevOptionsQcow2-wrapper"
are implicitly defined.
The SchemaInfo for an alternate type has meta-type "alternate", and
variant member "members". "members" is a JSON array. Each element is
a JSON object with member "type", which names a type. Values of the

View File

@ -732,6 +732,47 @@ available. On Debian and Ubuntu based systems, depending on the
specific version, they may be on packages named ``python3-venv`` and
``python3-pip``.
It is also possible to run tests based on tags using the
``make check-acceptance`` command and the ``AVOCADO_TAGS`` environment
variable:
.. code::
make check-acceptance AVOCADO_TAGS=quick
Note that tags separated with commas have an AND behavior, while tags
separated by spaces have an OR behavior. For more information on Avocado
tags, see:
https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/tags.html
To run a single test file, a couple of them, or a test within a file
using the ``make check-acceptance`` command, set the ``AVOCADO_TESTS``
environment variable with the test files or test names. To run all
tests from a single file, use:
.. code::
make check-acceptance AVOCADO_TESTS=$FILEPATH
The same is valid to run tests from multiple test files:
.. code::
make check-acceptance AVOCADO_TESTS='$FILEPATH1 $FILEPATH2'
To run a single test within a file, use:
.. code::
make check-acceptance AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME
The same is valid to run single tests from multiple test files:
.. code::
make check-acceptance AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2'
The scripts installed inside the virtual environment may be used
without an "activation". For instance, the Avocado test runner
may be invoked by running:
@ -740,6 +781,34 @@ may be invoked by running:
tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/acceptance/
Note that if ``make check-acceptance`` was not executed before, it is
possible to create the Python virtual environment with the dependencies
needed running:
.. code::
make check-venv
It is also possible to run tests from a single file or a single test within
a test file. To run tests from a single file within the build tree, use:
.. code::
tests/venv/bin/avocado run tests/acceptance/$TESTFILE
To run a single test within a test file, use:
.. code::
tests/venv/bin/avocado run tests/acceptance/$TESTFILE:$TESTCLASS.$TESTNAME
Valid test names are visible in the output from any previous execution
of Avocado or ``make check-acceptance``, and can also be queried using:
.. code::
tests/venv/bin/avocado list tests/acceptance
Manual Installation
-------------------

View File

@ -3,6 +3,7 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
================================
Welcome to QEMU's documentation!
================================

View File

@ -1,5 +1,6 @@
------------------------------------------------
System Emulation Management and Interoperability
================================================
------------------------------------------------
This section of the manual contains documents and specifications that
are useful for making QEMU interoperate with other software.

View File

@ -15,7 +15,7 @@ backend (i.e. memory-backend-file and memory-backend-ram). A simple
way to create a vNVDIMM device at startup time is done via the
following command line options:
-machine pc,nvdimm
-machine pc,nvdimm=on
-m $RAM_SIZE,slots=$N,maxmem=$MAX_SIZE
-object memory-backend-file,id=mem1,share=on,mem-path=$PATH,size=$NVDIMM_SIZE,readonly=off
-device nvdimm,id=nvdimm1,memdev=mem1,unarmed=off

View File

@ -1,5 +1,6 @@
----------------------------------------------
System Emulation Guest Hardware Specifications
==============================================
----------------------------------------------
This section of the manual contains specifications of
guest hardware that is specific to QEMU.

View File

@ -20,6 +20,7 @@ Hyperscale applications. The following machines are based on this chip :
- ``quanta-gbs-bmc`` Quanta GBS server BMC
- ``quanta-gsj`` Quanta GSJ server BMC
- ``kudo-bmc`` Fii USA Kudo server BMC
There are also two more SoCs, NPCM710 and NPCM705, which are single-core
variants of NPCM750 and NPCM730, respectively. These are currently not

View File

@ -1,5 +1,5 @@
Recommendations for KVM CPU model configuration on x86 hosts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
============================================================
The information that follows provides recommendations for configuring
CPU models on x86 hosts. The goals are to maximise performance, while
@ -368,7 +368,7 @@ featureset, which prevents guests having optimal performance.
Syntax for configuring CPU models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
=================================
The examples below illustrate the approach to configuring the various
CPU models / features in QEMU and libvirt.

1
docs/system/i386/cpu.rst Normal file
View File

@ -0,0 +1 @@
.. include:: ../cpu-models-x86.rst.inc

View File

@ -1,5 +1,6 @@
----------------
System Emulation
================
----------------
This section of the manual is the overall guide for users using QEMU
for full system emulation (as opposed to user-mode emulation).

View File

@ -45,7 +45,7 @@ Following is a description of command-line used to launch mpqemu.
-device lsi53c895a,id=lsi0 \
-drive id=drive_image2,file=/build/ol7-nvme-test-1.qcow2 \
-device scsi-hd,id=drive2,drive=drive_image2,bus=lsi0.0,scsi-id=0 \
-object x-remote-object,id=robj1,devid=lsi1,fd=4,
-object x-remote-object,id=robj1,devid=lsi0,fd=4,
* QEMU:

View File

@ -1,18 +1,22 @@
:orphan:
============================
QEMU block drivers reference
============================
--------
Synopsis
--------
QEMU block driver reference manual
-----------
Description
-----------
.. include:: qemu-block-drivers.rst.inc
--------
See also
--------

View File

@ -1,20 +1,24 @@
:orphan:
==================================
QEMU / KVM CPU model configuration
==================================
--------
Synopsis
''''''''
--------
QEMU CPU Modelling Infrastructure manual
-----------
Description
'''''''''''
-----------
.. include:: cpu-models-x86.rst.inc
.. include:: cpu-models-mips.rst.inc
--------
See also
''''''''
--------
The HTML documentation of QEMU for more precise information and Linux user mode emulator invocation.

View File

@ -6,9 +6,11 @@
parts of the documentation that go in the manpage as well as the
HTML manual.
Title
=====
=======================
QEMU User Documentation
=======================
--------
Synopsis
--------
@ -16,11 +18,13 @@ Synopsis
|qemu_system| [options] [disk_image]
-----------
Description
-----------
.. include:: target-i386-desc.rst.inc
-------
Options
-------
@ -33,11 +37,13 @@ not need a disk image.
.. include:: mux-chardev.rst.inc
-----
Notes
-----
.. include:: device-url-syntax.rst.inc
--------
See also
--------

View File

@ -24,6 +24,7 @@ The ``sifive_u`` machine supports the following devices:
* 2 QSPI controllers
* 1 ISSI 25WP256 flash
* 1 SD card in SPI mode
* PWM0 and PWM1
Please note the real world HiFive Unleashed board has a fixed configuration of
1 E51 core and 4 U54 core combination and the RISC-V core boots in 64-bit mode.
@ -209,15 +210,16 @@ command line options with ``qemu-system-riscv32``.
Running U-Boot
--------------
U-Boot mainline v2021.01 release is tested at the time of writing. To build a
U-Boot mainline v2021.07 release is tested at the time of writing. To build a
U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use
the sifive_fu540_defconfig with similar commands as described above for Linux:
the sifive_unleashed_defconfig with similar commands as described above for
Linux:
.. code-block:: bash
$ export CROSS_COMPILE=riscv64-linux-
$ export OPENSBI=/path/to/opensbi-riscv64-generic-fw_dynamic.bin
$ make sifive_fu540_defconfig
$ make sifive_unleashed_defconfig
You will get spl/u-boot-spl.bin and u-boot.itb file in the build tree.
@ -312,31 +314,29 @@ board on QEMU ``sifive_u`` machine out of the box. This allows users to
develop and test the recommended RISC-V boot flow with a real world use
case: ZSBL (in QEMU) loads U-Boot SPL from SD card or SPI flash to L2LIM,
then U-Boot SPL loads the combined payload image of OpenSBI fw_dynamic
firmware and U-Boot proper. However sometimes we want to have a quick test
of booting U-Boot on QEMU without the needs of preparing the SPI flash or
SD card images, an alternate way can be used, which is to create a U-Boot
S-mode image by modifying the configuration of U-Boot:
firmware and U-Boot proper.
However sometimes we want to have a quick test of booting U-Boot on QEMU
without the needs of preparing the SPI flash or SD card images, an alternate
way can be used, which is to create a U-Boot S-mode image by modifying the
configuration of U-Boot:
.. code-block:: bash
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_unleashed_defconfig
$ make menuconfig
then manually select the following configuration in U-Boot:
then manually select the following configuration:
Device Tree Control > Provider of DTB for DT Control > Prior Stage bootloader DTB
* Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
This lets U-Boot to use the QEMU generated device tree blob. During the build,
a build error will be seen below:
and unselect the following configuration:
.. code-block:: none
* Library routines ---> Allow access to binman information in the device tree
MKIMAGE u-boot.img
./tools/mkimage: Can't open arch/riscv/dts/hifive-unleashed-a00.dtb: No such file or directory
./tools/mkimage: failed to build FIT
make: *** [Makefile:1440: u-boot.img] Error 1
The above errors can be safely ignored as we don't run U-Boot SPL under QEMU
in this alternate configuration.
This changes U-Boot to use the QEMU generated device tree blob, and bypass
running the U-Boot SPL stage.
Boot the 64-bit U-Boot S-mode image directly:
@ -351,14 +351,18 @@ It's possible to create a 32-bit U-Boot S-mode image as well.
.. code-block:: bash
$ export CROSS_COMPILE=riscv64-linux-
$ make sifive_fu540_defconfig
$ make sifive_unleashed_defconfig
$ make menuconfig
then manually update the following configuration in U-Boot:
Device Tree Control > Provider of DTB for DT Control > Prior Stage bootloader DTB
RISC-V architecture > Base ISA > RV32I
Boot images > Text Base > 0x80400000
* Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB
* RISC-V architecture ---> Base ISA ---> RV32I
* Boot options ---> Boot images ---> Text Base ---> 0x80400000
and unselect the following configuration:
* Library routines ---> Allow access to binman information in the device tree
Use the same command line options to boot the 32-bit U-Boot S-mode image:

View File

@ -53,6 +53,16 @@ with the default OpenSBI firmware image as the -bios. It also supports
the recommended RISC-V bootflow: U-Boot SPL (M-mode) loads OpenSBI fw_dynamic
firmware and U-Boot proper (S-mode), using the standard -bios functionality.
Machine-specific options
------------------------
The following machine-specific options are supported:
- aclint=[on|off]
When this option is "on", ACLINT devices will be emulated instead of
SiFive CLINT. When not specified, this option is assumed to be "off".
Running Linux kernel
--------------------

View File

@ -19,7 +19,13 @@ Board-specific documentation
i386/microvm
i386/pc
.. include:: cpu-models-x86.rst.inc
Architectural features
~~~~~~~~~~~~~~~~~~~~~~
.. toctree::
:maxdepth: 1
i386/cpu
.. _pcsys_005freq:

View File

@ -1,5 +1,6 @@
-----
Tools
=====
-----
This section of the manual documents QEMU's "tools": its
command line utilities and other standalone programs.

View File

@ -1,3 +1,4 @@
=======================
QEMU disk image utility
=======================
@ -414,7 +415,7 @@ Command description:
4
Error on reading data
.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F backing_fmt]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME
Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM*
to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can
@ -438,7 +439,7 @@ Command description:
You can use the *BACKING_FILE* option to force the output image to be
created as a copy on write image of the specified base image; the
*BACKING_FILE* should have the same content as the input's base image,
however the path, image format, etc may differ.
however the path, image format (as given by *BACKING_FMT*), etc may differ.
If a relative path name is given, the backing file is looked up relative to
the directory containing *OUTPUT_FILENAME*.

View File

@ -1,3 +1,4 @@
=====================================
QEMU Disk Network Block Device Server
=====================================

View File

@ -1,3 +1,4 @@
==================================
QEMU persistent reservation helper
==================================

View File

@ -1,3 +1,4 @@
===================
QEMU Storage Daemon
===================

View File

@ -1,3 +1,4 @@
=========================
QEMU SystemTap trace tool
=========================

View File

@ -1,5 +1,6 @@
-------------------
User Mode Emulation
===================
-------------------
This section of the manual is the overall guide for users using QEMU
for user-mode emulation. In this mode, QEMU can launch

View File

@ -1 +1 @@
common_ss.add(when: libbpf, if_true: files('ebpf_rss.c'), if_false: files('ebpf_rss-stub.c'))
softmmu_ss.add(when: libbpf, if_true: files('ebpf_rss.c'), if_false: files('ebpf_rss-stub.c'))

View File

@ -31,13 +31,13 @@
#include "qemu/cutils.h"
#include "qemu/module.h"
#include "trace/trace-root.h"
#include "exec/gdbstub.h"
#ifdef CONFIG_USER_ONLY
#include "qemu.h"
#else
#include "monitor/monitor.h"
#include "chardev/char.h"
#include "chardev/char-fe.h"
#include "exec/gdbstub.h"
#include "hw/cpu/cluster.h"
#include "hw/boards.h"
#endif

View File

@ -1522,12 +1522,11 @@ ERST
SRST
``set_password [ vnc | spice ] password [ action-if-connected ]``
Change spice/vnc password. Use zero to make the password stay valid
forever. *action-if-connected* specifies what should happen in
case a connection is established: *fail* makes the password change
fail. *disconnect* changes the password and disconnects the
client. *keep* changes the password and keeps the connection up.
*keep* is the default.
Change spice/vnc password. *action-if-connected* specifies what
should happen in case a connection is established: *fail* makes the
password change fail. *disconnect* changes the password and
disconnects the client. *keep* changes the password and keeps the
connection up. *keep* is the default.
ERST
{

View File

@ -428,6 +428,7 @@ config ASPEED_SOC
select DS1338
select FTGMAC100
select I2C
select DPS310
select PCA9552
select SERIAL
select SMBUS_EEPROM

View File

@ -159,6 +159,10 @@ struct AspeedMachineState {
#define RAINIER_BMC_HW_STRAP1 0x00000000
#define RAINIER_BMC_HW_STRAP2 0x00000000
/* Fuji hardware value */
#define FUJI_BMC_HW_STRAP1 0x00000000
#define FUJI_BMC_HW_STRAP2 0x00000000
/*
* The max ram region is for firmwares that scan the address space
* with load/store to guess how much RAM the SoC has.
@ -350,6 +354,8 @@ static void aspeed_machine_init(MachineState *machine)
object_property_set_int(OBJECT(&bmc->soc), "hw-prot-key",
ASPEED_SCU_PROT_KEY, &error_abort);
}
qdev_prop_set_uint32(DEVICE(&bmc->soc), "uart-default",
amc->uart_default);
qdev_realize(DEVICE(&bmc->soc), NULL, &error_abort);
memory_region_add_subregion(get_system_memory(),
@ -602,7 +608,6 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc)
/* Bus 3: TODO bmp280@77 */
/* Bus 3: TODO max31785@52 */
/* Bus 3: TODO dps310@76 */
dev = DEVICE(i2c_slave_new(TYPE_PCA9552, 0x60));
qdev_prop_set_string(dev, "description", "pca1");
i2c_slave_realize_and_unref(I2C_SLAVE(dev),
@ -617,6 +622,7 @@ static void witherspoon_bmc_i2c_init(AspeedMachineState *bmc)
qdev_connect_gpio_out(dev, pca1_leds[i].gpio_id,
qdev_get_gpio_in(DEVICE(led), 0));
}
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 3), "dps310", 0x76);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "tmp423", 0x4c);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), "tmp423", 0x4c);
@ -674,9 +680,21 @@ static void g220a_bmc_i2c_init(AspeedMachineState *bmc)
eeprom_buf);
}
static void aspeed_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize)
{
I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr);
DeviceState *dev = DEVICE(i2c_dev);
qdev_prop_set_uint32(dev, "rom-size", rsize);
i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort);
}
static void rainier_bmc_i2c_init(AspeedMachineState *bmc)
{
AspeedSoCState *soc = &bmc->soc;
I2CSlave *i2c_mux;
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 0), 0x51, 32 * KiB);
/* The rainier expects a TMP275 but a TMP105 is compatible */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105,
@ -685,11 +703,20 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc)
0x49);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), TYPE_TMP105,
0x4a);
i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4),
"pca9546", 0x70);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x52, 64 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105,
0x48);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5), TYPE_TMP105,
0x49);
i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 5),
"pca9546", 0x70);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105,
0x48);
@ -697,18 +724,28 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc)
0x4a);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6), TYPE_TMP105,
0x4b);
i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 6),
"pca9546", 0x70);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 2), 0x50, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 3), 0x51, 64 * KiB);
/* Bus 7: TODO dps310@76 */
/* Bus 7: TODO max31785@52 */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "pca9552", 0x61);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), "dps310", 0x76);
/* Bus 7: TODO si7021-a20@20 */
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 7), TYPE_TMP105,
0x48);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x50, 64 * KiB);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 7), 0x51, 64 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105,
0x48);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_TMP105,
0x4a);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x50, 64 * KiB);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 8), 0x51, 64 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), "pca9552", 0x61);
/* Bus 8: ucd90320@11 */
/* Bus 8: ucd90320@b */
@ -716,14 +753,112 @@ static void rainier_bmc_i2c_init(AspeedMachineState *bmc)
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4c);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 9), "tmp423", 0x4d);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 9), 0x50, 128 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4c);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 10), "tmp423", 0x4d);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 10), 0x50, 128 * KiB);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105,
0x48);
i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11), TYPE_TMP105,
0x49);
i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 11),
"pca9546", 0x70);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 0), 0x50, 64 * KiB);
aspeed_eeprom_init(pca954x_i2c_get_bus(i2c_mux, 1), 0x51, 64 * KiB);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 13), 0x50, 64 * KiB);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 14), 0x50, 64 * KiB);
aspeed_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 15), 0x50, 64 * KiB);
}
static void get_pca9548_channels(I2CBus *bus, uint8_t mux_addr,
I2CBus **channels)
{
I2CSlave *mux = i2c_slave_create_simple(bus, "pca9548", mux_addr);
for (int i = 0; i < 8; i++) {
channels[i] = pca954x_i2c_get_bus(mux, i);
}
}
#define TYPE_LM75 TYPE_TMP105
#define TYPE_TMP75 TYPE_TMP105
#define TYPE_TMP422 "tmp422"
static void fuji_bmc_i2c_init(AspeedMachineState *bmc)
{
AspeedSoCState *soc = &bmc->soc;
I2CBus *i2c[144] = {};
for (int i = 0; i < 16; i++) {
i2c[i] = aspeed_i2c_get_bus(&soc->i2c, i);
}
I2CBus *i2c180 = i2c[2];
I2CBus *i2c480 = i2c[8];
I2CBus *i2c600 = i2c[11];
get_pca9548_channels(i2c180, 0x70, &i2c[16]);
get_pca9548_channels(i2c480, 0x70, &i2c[24]);
/* NOTE: The device tree skips [32, 40) in the alias numbering */
get_pca9548_channels(i2c600, 0x77, &i2c[40]);
get_pca9548_channels(i2c[24], 0x71, &i2c[48]);
get_pca9548_channels(i2c[25], 0x72, &i2c[56]);
get_pca9548_channels(i2c[26], 0x76, &i2c[64]);
get_pca9548_channels(i2c[27], 0x76, &i2c[72]);
for (int i = 0; i < 8; i++) {
get_pca9548_channels(i2c[40 + i], 0x76, &i2c[80 + i * 8]);
}
i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4c);
i2c_slave_create_simple(i2c[17], TYPE_LM75, 0x4d);
aspeed_eeprom_init(i2c[19], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[20], 0x50, 2 * KiB);
aspeed_eeprom_init(i2c[22], 0x52, 2 * KiB);
i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x48);
i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x49);
i2c_slave_create_simple(i2c[3], TYPE_LM75, 0x4a);
i2c_slave_create_simple(i2c[3], TYPE_TMP422, 0x4c);
aspeed_eeprom_init(i2c[8], 0x51, 64 * KiB);
i2c_slave_create_simple(i2c[8], TYPE_LM75, 0x4a);
i2c_slave_create_simple(i2c[50], TYPE_LM75, 0x4c);
aspeed_eeprom_init(i2c[50], 0x52, 64 * KiB);
i2c_slave_create_simple(i2c[51], TYPE_TMP75, 0x48);
i2c_slave_create_simple(i2c[52], TYPE_TMP75, 0x49);
i2c_slave_create_simple(i2c[59], TYPE_TMP75, 0x48);
i2c_slave_create_simple(i2c[60], TYPE_TMP75, 0x49);
aspeed_eeprom_init(i2c[65], 0x53, 64 * KiB);
i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x49);
i2c_slave_create_simple(i2c[66], TYPE_TMP75, 0x48);
aspeed_eeprom_init(i2c[68], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[69], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[70], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[71], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[73], 0x53, 64 * KiB);
i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x49);
i2c_slave_create_simple(i2c[74], TYPE_TMP75, 0x48);
aspeed_eeprom_init(i2c[76], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[77], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[78], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[79], 0x52, 64 * KiB);
aspeed_eeprom_init(i2c[28], 0x50, 2 * KiB);
for (int i = 0; i < 8; i++) {
aspeed_eeprom_init(i2c[81 + i * 8], 0x56, 64 * KiB);
i2c_slave_create_simple(i2c[82 + i * 8], TYPE_TMP75, 0x48);
i2c_slave_create_simple(i2c[83 + i * 8], TYPE_TMP75, 0x4b);
i2c_slave_create_simple(i2c[84 + i * 8], TYPE_TMP75, 0x4a);
}
}
static bool aspeed_get_mmio_exec(Object *obj, Error **errp)
@ -804,6 +939,7 @@ static void aspeed_machine_class_init(ObjectClass *oc, void *data)
mc->no_parallel = 1;
mc->default_ram_id = "ram";
amc->macs_mask = ASPEED_MAC0_ON;
amc->uart_default = ASPEED_DEV_UART5;
aspeed_machine_class_props_init(oc);
}
@ -953,13 +1089,14 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, void *data)
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
mc->desc = "Aspeed AST2600 EVB (Cortex-A7)";
amc->soc_name = "ast2600-a1";
amc->soc_name = "ast2600-a3";
amc->hw_strap1 = AST2600_EVB_HW_STRAP1;
amc->hw_strap2 = AST2600_EVB_HW_STRAP2;
amc->fmc_model = "w25q512jv";
amc->spi_model = "mx66u51235f";
amc->num_cs = 1;
amc->macs_mask = ASPEED_MAC1_ON | ASPEED_MAC2_ON | ASPEED_MAC3_ON;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON |
ASPEED_MAC3_ON;
amc->i2c_init = ast2600_evb_i2c_init;
mc->default_ram_size = 1 * GiB;
mc->default_cpus = mc->min_cpus = mc->max_cpus =
@ -972,7 +1109,7 @@ static void aspeed_machine_tacoma_class_init(ObjectClass *oc, void *data)
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
mc->desc = "OpenPOWER Tacoma BMC (Cortex-A7)";
amc->soc_name = "ast2600-a1";
amc->soc_name = "ast2600-a3";
amc->hw_strap1 = TACOMA_BMC_HW_STRAP1;
amc->hw_strap2 = TACOMA_BMC_HW_STRAP2;
amc->fmc_model = "mx66l1g45g";
@ -996,7 +1133,7 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data)
amc->fmc_model = "n25q512a";
amc->spi_model = "mx25l25635e";
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC1_ON | ASPEED_MAC2_ON;
amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON;
amc->i2c_init = g220a_bmc_i2c_init;
mc->default_ram_size = 1024 * MiB;
mc->default_cpus = mc->min_cpus = mc->max_cpus =
@ -1009,7 +1146,7 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
mc->desc = "IBM Rainier BMC (Cortex-A7)";
amc->soc_name = "ast2600-a1";
amc->soc_name = "ast2600-a3";
amc->hw_strap1 = RAINIER_BMC_HW_STRAP1;
amc->hw_strap2 = RAINIER_BMC_HW_STRAP2;
amc->fmc_model = "mx66l1g45g";
@ -1022,6 +1159,33 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data)
aspeed_soc_num_cpus(amc->soc_name);
};
/* On 32-bit hosts, lower RAM to 1G because of the 2047 MB limit */
#if HOST_LONG_BITS == 32
#define FUJI_BMC_RAM_SIZE (1 * GiB)
#else
#define FUJI_BMC_RAM_SIZE (2 * GiB)
#endif
static void aspeed_machine_fuji_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc);
mc->desc = "Facebook Fuji BMC (Cortex-A7)";
amc->soc_name = "ast2600-a3";
amc->hw_strap1 = FUJI_BMC_HW_STRAP1;
amc->hw_strap2 = FUJI_BMC_HW_STRAP2;
amc->fmc_model = "mx66l1g45g";
amc->spi_model = "mx66l1g45g";
amc->num_cs = 2;
amc->macs_mask = ASPEED_MAC3_ON;
amc->i2c_init = fuji_bmc_i2c_init;
amc->uart_default = ASPEED_DEV_UART1;
mc->default_ram_size = FUJI_BMC_RAM_SIZE;
mc->default_cpus = mc->min_cpus = mc->max_cpus =
aspeed_soc_num_cpus(amc->soc_name);
};
static const TypeInfo aspeed_machine_types[] = {
{
.name = MACHINE_TYPE_NAME("palmetto-bmc"),
@ -1071,6 +1235,10 @@ static const TypeInfo aspeed_machine_types[] = {
.name = MACHINE_TYPE_NAME("rainier-bmc"),
.parent = TYPE_ASPEED_MACHINE,
.class_init = aspeed_machine_rainier_class_init,
}, {
.name = MACHINE_TYPE_NAME("fuji-bmc"),
.parent = TYPE_ASPEED_MACHINE,
.class_init = aspeed_machine_fuji_class_init,
}, {
.name = TYPE_ASPEED_MACHINE,
.parent = TYPE_MACHINE,

View File

@ -322,10 +322,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
}
/* UART - attach an 8250 to the IO space as our UART5 */
serial_mm_init(get_system_memory(), sc->memmap[ASPEED_DEV_UART5], 2,
aspeed_soc_get_irq(s, ASPEED_DEV_UART5),
38400, serial_hd(0), DEVICE_LITTLE_ENDIAN);
/* UART - attach an 8250 to the IO space as our UART */
serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2,
aspeed_soc_get_irq(s, s->uart_default), 38400,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
/* I2C */
object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr),
@ -516,9 +516,9 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
dc->realize = aspeed_soc_ast2600_realize;
sc->name = "ast2600-a1";
sc->name = "ast2600-a3";
sc->cpu_type = ARM_CPU_TYPE_NAME("cortex-a7");
sc->silicon_rev = AST2600_A1_SILICON_REV;
sc->silicon_rev = AST2600_A3_SILICON_REV;
sc->sram_size = 0x16400;
sc->spis_num = 2;
sc->ehcis_num = 2;
@ -530,7 +530,7 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
}
static const TypeInfo aspeed_soc_ast2600_type_info = {
.name = "ast2600-a1",
.name = "ast2600-a3",
.parent = TYPE_ASPEED_SOC,
.instance_size = sizeof(AspeedSoCState),
.instance_init = aspeed_soc_ast2600_init,

View File

@ -287,9 +287,9 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
}
/* UART - attach an 8250 to the IO space as our UART5 */
serial_mm_init(get_system_memory(), sc->memmap[ASPEED_DEV_UART5], 2,
aspeed_soc_get_irq(s, ASPEED_DEV_UART5), 38400,
/* UART - attach an 8250 to the IO space as our UART */
serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2,
aspeed_soc_get_irq(s, s->uart_default), 38400,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
/* I2C */
@ -439,6 +439,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
static Property aspeed_soc_properties[] = {
DEFINE_PROP_LINK("dram", AspeedSoCState, dram_mr, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_UINT32("uart-default", AspeedSoCState, uart_default,
ASPEED_DEV_UART5),
DEFINE_PROP_END_OF_LIST(),
};

View File

@ -373,6 +373,11 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno)
}
}
/* Union describing the device-specific extra data we pass to the devfn. */
typedef union PPCExtraData {
bool i2c_internal;
} PPCExtraData;
/* Most of the devices in the AN505 FPGA image sit behind
* Peripheral Protection Controllers. These data structures
* define the layout of which devices sit behind which PPCs.
@ -382,7 +387,8 @@ static qemu_irq get_sse_irq_in(MPS2TZMachineState *mms, int irqno)
*/
typedef MemoryRegion *MakeDevFn(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs);
const int *irqs,
const PPCExtraData *extradata);
typedef struct PPCPortInfo {
const char *name;
@ -391,6 +397,7 @@ typedef struct PPCPortInfo {
hwaddr addr;
hwaddr size;
int irqs[3]; /* currently no device needs more IRQ lines than this */
PPCExtraData extradata; /* to pass device-specific info to the devfn */
} PPCPortInfo;
typedef struct PPCInfo {
@ -401,7 +408,8 @@ typedef struct PPCInfo {
static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms,
void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs,
const PPCExtraData *extradata)
{
/* Initialize, configure and realize a TYPE_UNIMPLEMENTED_DEVICE,
* and return a pointer to its MemoryRegion.
@ -417,7 +425,7 @@ static MemoryRegion *make_unimp_dev(MPS2TZMachineState *mms,
static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
/* The irq[] array is tx, rx, combined, in that order */
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
@ -441,7 +449,7 @@ static MemoryRegion *make_uart(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
MPS2SCC *scc = opaque;
DeviceState *sccdev;
@ -465,7 +473,7 @@ static MemoryRegion *make_scc(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
MPS2FPGAIO *fpgaio = opaque;
MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_GET_CLASS(mms);
@ -480,7 +488,8 @@ static MemoryRegion *make_fpgaio(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs,
const PPCExtraData *extradata)
{
SysBusDevice *s;
NICInfo *nd = &nd_table[0];
@ -500,7 +509,8 @@ static MemoryRegion *make_eth_dev(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs,
const PPCExtraData *extradata)
{
/*
* The AN524 makes the ethernet and USB share a PPC port.
@ -543,7 +553,7 @@ static MemoryRegion *make_eth_usb(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_mpc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
TZMPC *mpc = opaque;
int i = mpc - &mms->mpc[0];
@ -615,7 +625,7 @@ static void remap_irq_fn(void *opaque, int n, int level)
static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
/* The irq[] array is DMACINTR, DMACINTERR, DMACINTTC, in that order */
PL080State *dma = opaque;
@ -672,7 +682,7 @@ static MemoryRegion *make_dma(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
/*
* The AN505 has five PL022 SPI controllers.
@ -694,7 +704,7 @@ static MemoryRegion *make_spi(MPS2TZMachineState *mms, void *opaque,
static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
ArmSbconI2CState *i2c = opaque;
SysBusDevice *s;
@ -702,12 +712,26 @@ static MemoryRegion *make_i2c(MPS2TZMachineState *mms, void *opaque,
object_initialize_child(OBJECT(mms), name, i2c, TYPE_ARM_SBCON_I2C);
s = SYS_BUS_DEVICE(i2c);
sysbus_realize(s, &error_fatal);
/*
* If this is an internal-use-only i2c bus, mark it full
* so that user-created i2c devices are not plugged into it.
* If we implement models of any on-board i2c devices that
* plug in to one of the internal-use-only buses, then we will
* need to create and plugging those in here before we mark the
* bus as full.
*/
if (extradata->i2c_internal) {
BusState *qbus = qdev_get_child_bus(DEVICE(i2c), "i2c");
qbus_mark_full(qbus);
}
return sysbus_mmio_get_region(s, 0);
}
static MemoryRegion *make_rtc(MPS2TZMachineState *mms, void *opaque,
const char *name, hwaddr size,
const int *irqs)
const int *irqs, const PPCExtraData *extradata)
{
PL031State *pl031 = opaque;
SysBusDevice *s;
@ -912,10 +936,14 @@ static void mps2tz_common_init(MachineState *machine)
{ "uart2", make_uart, &mms->uart[2], 0x40202000, 0x1000, { 36, 37, 44 } },
{ "uart3", make_uart, &mms->uart[3], 0x40203000, 0x1000, { 38, 39, 45 } },
{ "uart4", make_uart, &mms->uart[4], 0x40204000, 0x1000, { 40, 41, 46 } },
{ "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000 },
{ "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000 },
{ "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000 },
{ "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000 },
{ "i2c0", make_i2c, &mms->i2c[0], 0x40207000, 0x1000, {},
{ .i2c_internal = true /* touchscreen */ } },
{ "i2c1", make_i2c, &mms->i2c[1], 0x40208000, 0x1000, {},
{ .i2c_internal = true /* audio conf */ } },
{ "i2c2", make_i2c, &mms->i2c[2], 0x4020c000, 0x1000, {},
{ .i2c_internal = false /* shield 0 */ } },
{ "i2c3", make_i2c, &mms->i2c[3], 0x4020d000, 0x1000, {},
{ .i2c_internal = false /* shield 1 */ } },
},
}, {
.name = "apb_ppcexp2",
@ -956,15 +984,20 @@ static void mps2tz_common_init(MachineState *machine)
}, {
.name = "apb_ppcexp1",
.ports = {
{ "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000 },
{ "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000 },
{ "i2c0", make_i2c, &mms->i2c[0], 0x41200000, 0x1000, {},
{ .i2c_internal = true /* touchscreen */ } },
{ "i2c1", make_i2c, &mms->i2c[1], 0x41201000, 0x1000, {},
{ .i2c_internal = true /* audio conf */ } },
{ "spi0", make_spi, &mms->spi[0], 0x41202000, 0x1000, { 52 } },
{ "spi1", make_spi, &mms->spi[1], 0x41203000, 0x1000, { 53 } },
{ "spi2", make_spi, &mms->spi[2], 0x41204000, 0x1000, { 54 } },
{ "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000 },
{ "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000 },
{ "i2c2", make_i2c, &mms->i2c[2], 0x41205000, 0x1000, {},
{ .i2c_internal = false /* shield 0 */ } },
{ "i2c3", make_i2c, &mms->i2c[3], 0x41206000, 0x1000, {},
{ .i2c_internal = false /* shield 1 */ } },
{ /* port 7 reserved */ },
{ "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000 },
{ "i2c4", make_i2c, &mms->i2c[4], 0x41208000, 0x1000, {},
{ .i2c_internal = true /* DDR4 EEPROM */ } },
},
}, {
.name = "apb_ppcexp2",
@ -1006,15 +1039,20 @@ static void mps2tz_common_init(MachineState *machine)
}, {
.name = "apb_ppcexp1",
.ports = {
{ "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000 },
{ "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000 },
{ "i2c0", make_i2c, &mms->i2c[0], 0x49200000, 0x1000, {},
{ .i2c_internal = true /* touchscreen */ } },
{ "i2c1", make_i2c, &mms->i2c[1], 0x49201000, 0x1000, {},
{ .i2c_internal = true /* audio conf */ } },
{ "spi0", make_spi, &mms->spi[0], 0x49202000, 0x1000, { 53 } },
{ "spi1", make_spi, &mms->spi[1], 0x49203000, 0x1000, { 54 } },
{ "spi2", make_spi, &mms->spi[2], 0x49204000, 0x1000, { 55 } },
{ "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000 },
{ "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000 },
{ "i2c2", make_i2c, &mms->i2c[2], 0x49205000, 0x1000, {},
{ .i2c_internal = false /* shield 0 */ } },
{ "i2c3", make_i2c, &mms->i2c[3], 0x49206000, 0x1000, {},
{ .i2c_internal = false /* shield 1 */ } },
{ /* port 7 reserved */ },
{ "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000 },
{ "i2c4", make_i2c, &mms->i2c[4], 0x49208000, 0x1000, {},
{ .i2c_internal = true /* DDR4 EEPROM */ } },
},
}, {
.name = "apb_ppcexp2",
@ -1084,7 +1122,7 @@ static void mps2tz_common_init(MachineState *machine)
}
mr = pinfo->devfn(mms, pinfo->opaque, pinfo->name, pinfo->size,
pinfo->irqs);
pinfo->irqs, &pinfo->extradata);
portname = g_strdup_printf("port[%d]", port);
object_property_set_link(OBJECT(ppc), portname, OBJECT(mr),
&error_fatal);

View File

@ -428,7 +428,17 @@ static void mps2_common_init(MachineState *machine)
0x40023000, /* Audio */
0x40029000, /* Shield0 */
0x4002a000}; /* Shield1 */
sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL);
DeviceState *dev;
dev = sysbus_create_simple(TYPE_ARM_SBCON_I2C, i2cbase[i], NULL);
if (i < 2) {
/*
* internal-only bus: mark it full to avoid user-created
* i2c devices being plugged into it.
*/
BusState *qbus = qdev_get_child_bus(dev, "i2c");
qbus_mark_full(qbus);
}
}
create_unimplemented_device("i2s", 0x40024000, 0x400);

View File

@ -31,6 +31,7 @@
#define NPCM750_EVB_POWER_ON_STRAPS 0x00001ff7
#define QUANTA_GSJ_POWER_ON_STRAPS 0x00001fff
#define QUANTA_GBS_POWER_ON_STRAPS 0x000017ff
#define KUDO_BMC_POWER_ON_STRAPS 0x00001fff
static const char npcm7xx_default_bootrom[] = "npcm7xx_bootrom.bin";
@ -357,6 +358,23 @@ static void quanta_gbs_init(MachineState *machine)
npcm7xx_load_kernel(machine, soc);
}
static void kudo_bmc_init(MachineState *machine)
{
NPCM7xxState *soc;
soc = npcm7xx_create_soc(machine, KUDO_BMC_POWER_ON_STRAPS);
npcm7xx_connect_dram(soc, machine->ram);
qdev_realize(DEVICE(soc), NULL, &error_fatal);
npcm7xx_load_bootrom(machine, soc);
npcm7xx_connect_flash(&soc->fiu[0], 0, "mx66u51235f",
drive_get(IF_MTD, 0, 0));
npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f",
drive_get(IF_MTD, 3, 0));
npcm7xx_load_kernel(machine, soc);
}
static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type)
{
NPCM7xxClass *sc = NPCM7XX_CLASS(object_class_by_name(type));
@ -417,6 +435,18 @@ static void gbs_bmc_machine_class_init(ObjectClass *oc, void *data)
mc->default_ram_size = 1 * GiB;
}
static void kudo_bmc_machine_class_init(ObjectClass *oc, void *data)
{
NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_CLASS(oc);
MachineClass *mc = MACHINE_CLASS(oc);
npcm7xx_set_soc_type(nmc, TYPE_NPCM730);
mc->desc = "Kudo BMC (Cortex-A9)";
mc->init = kudo_bmc_init;
mc->default_ram_size = 1 * GiB;
};
static const TypeInfo npcm7xx_machine_types[] = {
{
.name = TYPE_NPCM7XX_MACHINE,
@ -437,6 +467,10 @@ static const TypeInfo npcm7xx_machine_types[] = {
.name = MACHINE_TYPE_NAME("quanta-gbs-bmc"),
.parent = TYPE_NPCM7XX_MACHINE,
.class_init = gbs_bmc_machine_class_init,
}, {
.name = MACHINE_TYPE_NAME("kudo-bmc"),
.parent = TYPE_NPCM7XX_MACHINE,
.class_init = kudo_bmc_machine_class_init,
},
};

View File

@ -584,6 +584,12 @@ static void create_its(VirtMachineState *vms)
const char *itsclass = its_class_name();
DeviceState *dev;
if (!strcmp(itsclass, "arm-gicv3-its")) {
if (!vms->tcg_its) {
itsclass = NULL;
}
}
if (!itsclass) {
/* Do nothing if not supported */
return;
@ -621,7 +627,7 @@ static void create_v2m(VirtMachineState *vms)
vms->msi_controller = VIRT_MSI_CTRL_GICV2M;
}
static void create_gic(VirtMachineState *vms)
static void create_gic(VirtMachineState *vms, MemoryRegion *mem)
{
MachineState *ms = MACHINE(vms);
/* We create a standalone GIC */
@ -655,6 +661,14 @@ static void create_gic(VirtMachineState *vms)
nb_redist_regions);
qdev_prop_set_uint32(vms->gic, "redist-region-count[0]", redist0_count);
if (!kvm_irqchip_in_kernel()) {
if (vms->tcg_its) {
object_property_set_link(OBJECT(vms->gic), "sysmem",
OBJECT(mem), &error_fatal);
qdev_prop_set_bit(vms->gic, "has-lpi", true);
}
}
if (nb_redist_regions == 2) {
uint32_t redist1_capacity =
vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE;
@ -2039,7 +2053,7 @@ static void machvirt_init(MachineState *machine)
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
create_gic(vms);
create_gic(vms, sysmem);
virt_cpu_post_init(vms, sysmem);
@ -2742,6 +2756,12 @@ static void virt_instance_init(Object *obj)
} else {
/* Default allows ITS instantiation */
vms->its = true;
if (vmc->no_tcg_its) {
vms->tcg_its = false;
} else {
vms->tcg_its = true;
}
}
/* Default disallows iommu instantiation */
@ -2791,8 +2811,13 @@ DEFINE_VIRT_MACHINE_AS_LATEST(6, 2)
static void virt_machine_6_1_options(MachineClass *mc)
{
VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
virt_machine_6_2_options(mc);
compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len);
/* qemu ITS was introduced with 6.2 */
vmc->no_tcg_its = true;
}
DEFINE_VIRT_MACHINE(6, 1)

View File

@ -235,8 +235,18 @@ static void uart_parameters_setup(CadenceUARTState *s)
static int uart_can_receive(void *opaque)
{
CadenceUARTState *s = opaque;
int ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE);
uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE;
int ret;
uint32_t ch_mode;
/* ignore characters when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
__func__);
return 0;
}
ret = MAX(CADENCE_UART_RX_FIFO_SIZE, CADENCE_UART_TX_FIFO_SIZE);
ch_mode = s->r[R_MR] & UART_MR_CHMODE;
if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) {
ret = MIN(ret, CADENCE_UART_RX_FIFO_SIZE - s->rx_count);
@ -353,11 +363,6 @@ static void uart_receive(void *opaque, const uint8_t *buf, int size)
CadenceUARTState *s = opaque;
uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE;
/* ignore characters when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
return;
}
if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) {
uart_write_rx_fifo(opaque, buf, size);
}
@ -373,6 +378,8 @@ static void uart_event(void *opaque, QEMUChrEvent event)
/* ignore characters when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
__func__);
return;
}
@ -403,15 +410,22 @@ static void uart_read_rx_fifo(CadenceUARTState *s, uint32_t *c)
uart_update_status(s);
}
static void uart_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
static MemTxResult uart_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size, MemTxAttrs attrs)
{
CadenceUARTState *s = opaque;
/* ignore access when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
__func__);
return MEMTX_ERROR;
}
DB_PRINT(" offset:%x data:%08x\n", (unsigned)offset, (unsigned)value);
offset >>= 2;
if (offset >= CADENCE_UART_R_MAX) {
return;
return MEMTX_DECODE_ERROR;
}
switch (offset) {
case R_IER: /* ier (wts imr) */
@ -458,30 +472,41 @@ static void uart_write(void *opaque, hwaddr offset,
break;
}
uart_update_status(s);
return MEMTX_OK;
}
static uint64_t uart_read(void *opaque, hwaddr offset,
unsigned size)
static MemTxResult uart_read(void *opaque, hwaddr offset,
uint64_t *value, unsigned size, MemTxAttrs attrs)
{
CadenceUARTState *s = opaque;
uint32_t c = 0;
/* ignore access when unclocked or in reset */
if (!clock_is_enabled(s->refclk) || device_is_in_reset(DEVICE(s))) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: uart is unclocked or in reset\n",
__func__);
return MEMTX_ERROR;
}
offset >>= 2;
if (offset >= CADENCE_UART_R_MAX) {
c = 0;
} else if (offset == R_TX_RX) {
return MEMTX_DECODE_ERROR;
}
if (offset == R_TX_RX) {
uart_read_rx_fifo(s, &c);
} else {
c = s->r[offset];
c = s->r[offset];
}
DB_PRINT(" offset:%x data:%08x\n", (unsigned)(offset << 2), (unsigned)c);
return c;
*value = c;
return MEMTX_OK;
}
static const MemoryRegionOps uart_ops = {
.read = uart_read,
.write = uart_write,
.read_with_attrs = uart_read,
.write_with_attrs = uart_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};

View File

@ -2252,7 +2252,7 @@ static int qxl_pre_save(void *opaque)
} else {
d->last_release_offset = (uint8_t *)d->last_release - ram_start;
}
if (d->last_release_offset < d->vga.vram_size) {
if (d->last_release_offset >= d->vga.vram_size) {
return 1;
}

View File

@ -185,6 +185,7 @@ static VGPUDMABuf
dmabuf->buf.stride = fb->stride;
dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
dmabuf->buf.fd = res->dmabuf_fd;
dmabuf->buf.allow_fences = true;
dmabuf->scanout_id = scanout_id;
QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);

View File

@ -985,8 +985,10 @@ void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
break;
}
if (!cmd->finished) {
virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
VIRTIO_GPU_RESP_OK_NODATA);
if (!g->parent_obj.renderer_blocked) {
virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
VIRTIO_GPU_RESP_OK_NODATA);
}
}
}
@ -1042,6 +1044,30 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
g->processing_cmdq = false;
}
static void virtio_gpu_process_fenceq(VirtIOGPU *g)
{
struct virtio_gpu_ctrl_command *cmd, *tmp;
QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
QTAILQ_REMOVE(&g->fenceq, cmd, next);
g_free(cmd);
g->inflight--;
if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
}
}
}
static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
{
VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
virtio_gpu_process_fenceq(g);
virtio_gpu_process_cmdq(g);
}
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
@ -1400,10 +1426,12 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
VirtIOGPUBaseClass *vgbc = &vgc->parent;
vgc->handle_ctrl = virtio_gpu_handle_ctrl;
vgc->process_cmd = virtio_gpu_simple_process_cmd;
vgc->update_cursor_data = virtio_gpu_update_cursor_data;
vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
vdc->realize = virtio_gpu_device_realize;
vdc->reset = virtio_gpu_reset;

View File

@ -54,6 +54,13 @@
#define DMA_EXEC_DST 0x110
#define DMA_EXEC_SRC 0x118
/*
* FU540/FU740 docs are incorrect with NextConfig.wsize/rsize reset values.
* The reset values tested on Unleashed/Unmatched boards are 6 instead of 0.
*/
#define CONFIG_WRSZ_DEFAULT 6
#define CONFIG_RDSZ_DEFAULT 6
enum dma_chan_state {
DMA_CHAN_STATE_IDLE,
DMA_CHAN_STATE_STARTED,
@ -67,13 +74,13 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch)
uint64_t dst = s->chan[ch].next_dst;
uint64_t src = s->chan[ch].next_src;
uint32_t config = s->chan[ch].next_config;
int wsize, rsize, size;
int wsize, rsize, size, remainder;
uint8_t buf[64];
int n;
/* do nothing if bytes to transfer is zero */
if (!bytes) {
goto error;
goto done;
}
/*
@ -99,11 +106,7 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch)
size = 6;
}
size = 1 << size;
/* the bytes to transfer should be multiple of transaction size */
if (bytes % size) {
goto error;
}
remainder = bytes % size;
/* indicate a DMA transfer is started */
s->chan[ch].state = DMA_CHAN_STATE_STARTED;
@ -124,10 +127,13 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch)
s->chan[ch].exec_bytes -= size;
}
/* indicate a DMA transfer is done */
s->chan[ch].state = DMA_CHAN_STATE_DONE;
s->chan[ch].control &= ~CONTROL_RUN;
s->chan[ch].control |= CONTROL_DONE;
if (remainder) {
cpu_physical_memory_read(s->chan[ch].exec_src, buf, remainder);
cpu_physical_memory_write(s->chan[ch].exec_dst, buf, remainder);
s->chan[ch].exec_src += remainder;
s->chan[ch].exec_dst += remainder;
s->chan[ch].exec_bytes -= remainder;
}
/* reload exec_ registers if repeat is required */
if (s->chan[ch].next_config & CONFIG_REPEAT) {
@ -136,6 +142,11 @@ static void sifive_pdma_run(SiFivePDMAState *s, int ch)
s->chan[ch].exec_src = src;
}
done:
/* indicate a DMA transfer is done */
s->chan[ch].state = DMA_CHAN_STATE_DONE;
s->chan[ch].control &= ~CONTROL_RUN;
s->chan[ch].control |= CONTROL_DONE;
return;
error:
@ -221,6 +232,7 @@ static void sifive_pdma_write(void *opaque, hwaddr offset,
{
SiFivePDMAState *s = opaque;
int ch = SIFIVE_PDMA_CHAN_NO(offset);
bool claimed;
if (ch >= SIFIVE_PDMA_CHANS) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
@ -231,8 +243,28 @@ static void sifive_pdma_write(void *opaque, hwaddr offset,
offset &= 0xfff;
switch (offset) {
case DMA_CONTROL:
claimed = !!s->chan[ch].control & CONTROL_CLAIM;
if (!claimed && (value & CONTROL_CLAIM)) {
/* reset Next* registers */
s->chan[ch].next_config = (CONFIG_RDSZ_DEFAULT << CONFIG_RDSZ_SHIFT) |
(CONFIG_WRSZ_DEFAULT << CONFIG_WRSZ_SHIFT);
s->chan[ch].next_bytes = 0;
s->chan[ch].next_dst = 0;
s->chan[ch].next_src = 0;
}
s->chan[ch].control = value;
/*
* If channel was not claimed before run bit is set,
* DMA won't run.
*/
if (!claimed) {
s->chan[ch].control &= ~CONTROL_RUN;
return;
}
if (value & CONTROL_RUN) {
sifive_pdma_run(s, ch);
}

View File

@ -164,49 +164,48 @@
#define GPIO_YZAAAB_DIRECTION (0x1E4 >> 2)
#define GPIO_AC_DATA_VALUE (0x1E8 >> 2)
#define GPIO_AC_DIRECTION (0x1EC >> 2)
#define GPIO_3_6V_MEM_SIZE 0x1F0
#define GPIO_3_6V_REG_ARRAY_SIZE (GPIO_3_6V_MEM_SIZE >> 2)
#define GPIO_3_3V_MEM_SIZE 0x1F0
#define GPIO_3_3V_REG_ARRAY_SIZE (GPIO_3_3V_MEM_SIZE >> 2)
/* AST2600 only - 1.8V gpios */
/*
* The AST2600 has same 3.6V gpios as the AST2400 (memory offsets 0x0-0x198)
* and additional 1.8V gpios (memory offsets 0x800-0x9D4).
* The AST2600 two copies of the GPIO controller: the same 3.3V gpios as the
* AST2400 (memory offsets 0x0-0x198) and a second controller with 1.8V gpios
* (memory offsets 0x800-0x9D4).
*/
#define GPIO_1_8V_REG_OFFSET 0x800
#define GPIO_1_8V_ABCD_DATA_VALUE ((0x800 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_DIRECTION ((0x804 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INT_ENABLE ((0x808 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_0 ((0x80C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_1 ((0x810 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_2 ((0x814 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INT_STATUS ((0x818 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_RESET_TOLERANT ((0x81C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_DATA_VALUE ((0x820 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_DIRECTION ((0x824 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INT_ENABLE ((0x828 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INT_SENS_0 ((0x82C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INT_SENS_1 ((0x830 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INT_SENS_2 ((0x834 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INT_STATUS ((0x838 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_RESET_TOLERANT ((0x83C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_DEBOUNCE_1 ((0x840 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_DEBOUNCE_2 ((0x844 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_DEBOUNCE_1 ((0x848 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_DEBOUNCE_2 ((0x84C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_1 ((0x850 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_2 ((0x854 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_3 ((0x858 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_COMMAND_SRC_0 ((0x860 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_COMMAND_SRC_1 ((0x864 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_COMMAND_SRC_0 ((0x868 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_COMMAND_SRC_1 ((0x86C - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_DATA_READ ((0x8C0 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_DATA_READ ((0x8C4 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_INPUT_MASK ((0x9D0 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_E_INPUT_MASK ((0x9D4 - GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_MEM_SIZE 0x9D8
#define GPIO_1_8V_REG_ARRAY_SIZE ((GPIO_1_8V_MEM_SIZE - \
GPIO_1_8V_REG_OFFSET) >> 2)
#define GPIO_1_8V_ABCD_DATA_VALUE (0x000 >> 2)
#define GPIO_1_8V_ABCD_DIRECTION (0x004 >> 2)
#define GPIO_1_8V_ABCD_INT_ENABLE (0x008 >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_0 (0x00C >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_1 (0x010 >> 2)
#define GPIO_1_8V_ABCD_INT_SENS_2 (0x014 >> 2)
#define GPIO_1_8V_ABCD_INT_STATUS (0x018 >> 2)
#define GPIO_1_8V_ABCD_RESET_TOLERANT (0x01C >> 2)
#define GPIO_1_8V_E_DATA_VALUE (0x020 >> 2)
#define GPIO_1_8V_E_DIRECTION (0x024 >> 2)
#define GPIO_1_8V_E_INT_ENABLE (0x028 >> 2)
#define GPIO_1_8V_E_INT_SENS_0 (0x02C >> 2)
#define GPIO_1_8V_E_INT_SENS_1 (0x030 >> 2)
#define GPIO_1_8V_E_INT_SENS_2 (0x034 >> 2)
#define GPIO_1_8V_E_INT_STATUS (0x038 >> 2)
#define GPIO_1_8V_E_RESET_TOLERANT (0x03C >> 2)
#define GPIO_1_8V_ABCD_DEBOUNCE_1 (0x040 >> 2)
#define GPIO_1_8V_ABCD_DEBOUNCE_2 (0x044 >> 2)
#define GPIO_1_8V_E_DEBOUNCE_1 (0x048 >> 2)
#define GPIO_1_8V_E_DEBOUNCE_2 (0x04C >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_1 (0x050 >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_2 (0x054 >> 2)
#define GPIO_1_8V_DEBOUNCE_TIME_3 (0x058 >> 2)
#define GPIO_1_8V_ABCD_COMMAND_SRC_0 (0x060 >> 2)
#define GPIO_1_8V_ABCD_COMMAND_SRC_1 (0x064 >> 2)
#define GPIO_1_8V_E_COMMAND_SRC_0 (0x068 >> 2)
#define GPIO_1_8V_E_COMMAND_SRC_1 (0x06C >> 2)
#define GPIO_1_8V_ABCD_DATA_READ (0x0C0 >> 2)
#define GPIO_1_8V_E_DATA_READ (0x0C4 >> 2)
#define GPIO_1_8V_ABCD_INPUT_MASK (0x1D0 >> 2)
#define GPIO_1_8V_E_INPUT_MASK (0x1D4 >> 2)
#define GPIO_1_8V_MEM_SIZE 0x1D8
#define GPIO_1_8V_REG_ARRAY_SIZE (GPIO_1_8V_MEM_SIZE >> 2)
static int aspeed_evaluate_irq(GPIOSets *regs, int gpio_prev_high, int gpio)
{
@ -381,7 +380,7 @@ static uint32_t update_value_control_source(GPIOSets *regs, uint32_t old_value,
return new_value;
}
static const AspeedGPIOReg aspeed_3_6v_gpios[GPIO_3_6V_REG_ARRAY_SIZE] = {
static const AspeedGPIOReg aspeed_3_3v_gpios[GPIO_3_3V_REG_ARRAY_SIZE] = {
/* Set ABCD */
[GPIO_ABCD_DATA_VALUE] = { 0, gpio_reg_data_value },
[GPIO_ABCD_DIRECTION] = { 0, gpio_reg_direction },
@ -801,7 +800,7 @@ static const GPIOSetProperties ast2500_set_props[] = {
[7] = {0x000000ff, 0x000000ff, {"AC"} },
};
static GPIOSetProperties ast2600_3_6v_set_props[] = {
static GPIOSetProperties ast2600_3_3v_set_props[] = {
[0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} },
[1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} },
[2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} },
@ -928,7 +927,7 @@ static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_pins = 216;
agc->nr_gpio_sets = 7;
agc->gap = 196;
agc->reg_table = aspeed_3_6v_gpios;
agc->reg_table = aspeed_3_3v_gpios;
}
static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data)
@ -939,17 +938,17 @@ static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data)
agc->nr_gpio_pins = 228;
agc->nr_gpio_sets = 8;
agc->gap = 220;
agc->reg_table = aspeed_3_6v_gpios;
agc->reg_table = aspeed_3_3v_gpios;
}
static void aspeed_gpio_ast2600_3_6v_class_init(ObjectClass *klass, void *data)
static void aspeed_gpio_ast2600_3_3v_class_init(ObjectClass *klass, void *data)
{
AspeedGPIOClass *agc = ASPEED_GPIO_CLASS(klass);
agc->props = ast2600_3_6v_set_props;
agc->props = ast2600_3_3v_set_props;
agc->nr_gpio_pins = 208;
agc->nr_gpio_sets = 7;
agc->reg_table = aspeed_3_6v_gpios;
agc->reg_table = aspeed_3_3v_gpios;
}
static void aspeed_gpio_ast2600_1_8v_class_init(ObjectClass *klass, void *data)
@ -985,10 +984,10 @@ static const TypeInfo aspeed_gpio_ast2500_info = {
.instance_init = aspeed_gpio_init,
};
static const TypeInfo aspeed_gpio_ast2600_3_6v_info = {
static const TypeInfo aspeed_gpio_ast2600_3_3v_info = {
.name = TYPE_ASPEED_GPIO "-ast2600",
.parent = TYPE_ASPEED_GPIO,
.class_init = aspeed_gpio_ast2600_3_6v_class_init,
.class_init = aspeed_gpio_ast2600_3_3v_class_init,
.instance_init = aspeed_gpio_init,
};
@ -1004,7 +1003,7 @@ static void aspeed_gpio_register_types(void)
type_register_static(&aspeed_gpio_info);
type_register_static(&aspeed_gpio_ast2400_info);
type_register_static(&aspeed_gpio_ast2500_info);
type_register_static(&aspeed_gpio_ast2600_3_6v_info);
type_register_static(&aspeed_gpio_ast2600_3_3v_info);
type_register_static(&aspeed_gpio_ast2600_1_8v_info);
}

View File

@ -1916,7 +1916,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
PCMachineState *pcms = PC_MACHINE(machine);
int nb_numa_nodes = machine->numa_state->num_nodes;
NodeInfo *numa_info = machine->numa_state->nodes;
ram_addr_t hotplugabble_address_space_size =
ram_addr_t hotpluggable_address_space_size =
object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE,
NULL);
@ -2022,10 +2022,10 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
* Memory devices may override proximity set by this entry,
* providing _PXM method if necessary.
*/
if (hotplugabble_address_space_size) {
if (hotpluggable_address_space_size) {
numamem = acpi_data_push(table_data, sizeof *numamem);
build_srat_memory(numamem, machine->device_memory->base,
hotplugabble_address_space_size, nb_numa_nodes - 1,
hotpluggable_address_space_size, nb_numa_nodes - 1,
MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
}

View File

@ -679,7 +679,7 @@ static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu,
}
break;
default:
/* Unknwon type */
/* Unknown type */
return false;
}
return true;
@ -692,7 +692,7 @@ static inline bool vtd_pdire_present(VTDPASIDDirEntry *pdire)
/**
* Caller of this function should check present bit if wants
* to use pdir entry for futher usage except for fpd bit check.
* to use pdir entry for further usage except for fpd bit check.
*/
static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base,
uint32_t pasid,
@ -746,7 +746,7 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s,
/**
* Caller of this function should check present bit if wants
* to use pasid entry for futher usage except for fpd bit check.
* to use pasid entry for further usage except for fpd bit check.
*/
static int vtd_get_pe_from_pdire(IntelIOMMUState *s,
uint32_t pasid,
@ -1507,7 +1507,7 @@ static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as)
}
/*
* Check if specific device is configed to bypass address
* Check if specific device is configured to bypass address
* translation for DMA requests. In Scalable Mode, bypass
* 1st-level translation or 2nd-level translation, it depends
* on PGTT setting.

View File

@ -843,6 +843,12 @@ void xen_load_linux(PCMachineState *pcms)
x86ms->fw_cfg = fw_cfg;
}
#define PC_ROM_MIN_VGA 0xc0000
#define PC_ROM_MIN_OPTION 0xc8000
#define PC_ROM_MAX 0xe0000
#define PC_ROM_ALIGN 0x800
#define PC_ROM_SIZE (PC_ROM_MAX - PC_ROM_MIN_VGA)
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,

View File

@ -62,7 +62,7 @@ config RX_ICU
config LOONGSON_LIOINTC
bool
config SIFIVE_CLINT
config RISCV_ACLINT
bool
config SIFIVE_PLIC

View File

@ -165,6 +165,16 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
}
if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
(cs->hpplpi.prio != 0xff)) {
if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
cs->hppi.irq = cs->hpplpi.irq;
cs->hppi.prio = cs->hpplpi.prio;
cs->hppi.grp = cs->hpplpi.grp;
seenbetter = true;
}
}
/* If the best interrupt we just found would preempt whatever
* was the previous best interrupt before this update, then
* we know it's definitely the best one now.
@ -339,9 +349,13 @@ static void gicv3_set_irq(void *opaque, int irq, int level)
static void arm_gicv3_post_load(GICv3State *s)
{
int i;
/* Recalculate our cached idea of the current highest priority
* pending interrupt, but don't set IRQ or FIQ lines.
*/
for (i = 0; i < s->num_cpu; i++) {
gicv3_redist_update_lpi(&s->cpu[i]);
}
gicv3_full_update_noirqset(s);
/* Repopulate the cache of GICv3CPUState pointers for target CPUs */
gicv3_cache_all_target_cpustates(s);

View File

@ -345,6 +345,11 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
return;
}
if (s->lpi_enable && !s->dma) {
error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
return;
}
s->cpu = g_new0(GICv3CPUState, s->num_cpu);
for (i = 0; i < s->num_cpu; i++) {
@ -381,6 +386,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
(1 << 24) |
(i << 8) |
(last << 4);
if (s->lpi_enable) {
s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
}
}
}
@ -426,6 +435,7 @@ static void arm_gicv3_common_reset(DeviceState *dev)
memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
cs->hppi.prio = 0xff;
cs->hpplpi.prio = 0xff;
/* State in the CPU interface must *not* be reset here, because it
* is part of the CPU's reset domain, not the GIC device's.
@ -494,9 +504,12 @@ static Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
redist_region_count, qdev_prop_uint32, uint32_t),
DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
MemoryRegion *),
DEFINE_PROP_END_OF_LIST(),
};

View File

@ -417,8 +417,9 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
}
}
if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
maintlevel = maintenance_interrupt_state(cs);
if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
maintenance_interrupt_state(cs) != 0) {
maintlevel = 1;
}
trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
@ -899,10 +900,12 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
gicv3_redist_update(cs);
} else {
} else if (irq < GICV3_LPI_INTID_START) {
gicv3_gicd_active_set(cs->gic, irq);
gicv3_gicd_pending_clear(cs->gic, irq);
gicv3_update(cs->gic, irq, 1);
} else {
gicv3_redist_lpi_pending(cs, irq, 0);
}
}
@ -1318,7 +1321,8 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
gicv3_redist_affid(cs), value);
if (irq >= cs->gic->num_irq) {
if ((irq >= cs->gic->num_irq) &&
!(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
/* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
* to the GICC_EOIR, the GIC ignores that write.

View File

@ -384,7 +384,9 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
* A3V == 1 (non-zero values of Affinity level 3 supported)
* IDbits == 0xf (we support 16-bit interrupt identifiers)
* DVIS == 0 (Direct virtual LPI injection not supported)
* LPIS == 0 (LPIs not supported)
* LPIS == 1 (LPIs are supported if affinity routing is enabled)
* num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated
* by GICD_TYPER.IDbits)
* MBIS == 0 (message-based SPIs not supported)
* SecurityExtn == 1 if security extns supported
* CPUNumber == 0 since for us ARE is always 1
@ -399,6 +401,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset,
bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS);
*data = (1 << 25) | (1 << 24) | (sec_extn << 10) |
(s->lpi_enable << GICD_TYPER_LPIS_SHIFT) |
(0xf << 19) | itlinesnumber;
return true;
}

1322
hw/intc/arm_gicv3_its.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -50,6 +50,8 @@ static int gicv3_its_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_its = {
.name = "arm_gicv3_its",
.version_id = 1,
.minimum_version_id = 1,
.pre_save = gicv3_its_pre_save,
.post_load = gicv3_its_post_load,
.priority = MIG_PRI_GICV3_ITS,
@ -99,14 +101,15 @@ static const MemoryRegionOps gicv3_its_trans_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops)
void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops,
const MemoryRegionOps *tops)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s,
"control", ITS_CONTROL_SIZE);
memory_region_init_io(&s->iomem_its_translation, OBJECT(s),
&gicv3_its_trans_ops, s,
tops ? tops : &gicv3_its_trans_ops, s,
"translation", ITS_TRANS_SIZE);
/* Our two regions are always adjacent, therefore we now combine them

View File

@ -106,7 +106,7 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0);
gicv3_its_init_mmio(s, NULL);
gicv3_its_init_mmio(s, NULL, NULL);
if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
GITS_CTLR)) {

View File

@ -248,10 +248,19 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
case GICR_CTLR:
/* For our implementation, GICR_TYPER.DPGS is 0 and so all
* the DPG bits are RAZ/WI. We don't do anything asynchronously,
* so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
* implement LPIs) so Enable_LPIs is RES0. So there are no writable
* bits for us.
* so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
* implement LPIs) so Enable_LPIs is programmable.
*/
if (cs->gicr_typer & GICR_TYPER_PLPIS) {
if (value & GICR_CTLR_ENABLE_LPIS) {
cs->gicr_ctlr |= GICR_CTLR_ENABLE_LPIS;
/* Check for any pending interr in pending table */
gicv3_redist_update_lpi(cs);
gicv3_redist_update(cs);
} else {
cs->gicr_ctlr &= ~GICR_CTLR_ENABLE_LPIS;
}
}
return MEMTX_OK;
case GICR_STATUSR:
/* RAZ/WI for our implementation */
@ -526,6 +535,144 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
return r;
}
static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq)
{
AddressSpace *as = &cs->gic->dma_as;
uint64_t lpict_baddr;
uint8_t lpite;
uint8_t prio;
lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK;
address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) *
sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite,
sizeof(lpite));
if (!(lpite & LPI_CTE_ENABLED)) {
return;
}
if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
prio = lpite & LPI_PRIORITY_MASK;
} else {
prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80;
}
if ((prio < cs->hpplpi.prio) ||
((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) {
cs->hpplpi.irq = irq;
cs->hpplpi.prio = prio;
/* LPIs are always non-secure Grp1 interrupts */
cs->hpplpi.grp = GICV3_G1NS;
}
}
void gicv3_redist_update_lpi(GICv3CPUState *cs)
{
/*
* This function scans the LPI pending table and for each pending
* LPI, reads the corresponding entry from LPI configuration table
* to extract the priority info and determine if the current LPI
* priority is lower than the last computed high priority lpi interrupt.
* If yes, replace current LPI as the new high priority lpi interrupt.
*/
AddressSpace *as = &cs->gic->dma_as;
uint64_t lpipt_baddr;
uint32_t pendt_size = 0;
uint8_t pend;
int i, bit;
uint64_t idbits;
idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
GICD_TYPER_IDBITS);
if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
!cs->gicr_pendbaser) {
return;
}
cs->hpplpi.prio = 0xff;
lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
/* Determine the highest priority pending interrupt among LPIs */
pendt_size = (1ULL << (idbits + 1));
for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) {
address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend,
sizeof(pend));
while (pend) {
bit = ctz32(pend);
gicv3_redist_check_lpi_priority(cs, i * 8 + bit);
pend &= ~(1 << bit);
}
}
}
void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level)
{
/*
* This function updates the pending bit in lpi pending table for
* the irq being activated or deactivated.
*/
AddressSpace *as = &cs->gic->dma_as;
uint64_t lpipt_baddr;
bool ispend = false;
uint8_t pend;
/*
* get the bit value corresponding to this irq in the
* lpi pending table
*/
lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK;
address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
ispend = extract32(pend, irq % 8, 1);
/* no change in the value of pending bit, return */
if (ispend == level) {
return;
}
pend = deposit32(pend, irq % 8, 1, level ? 1 : 0);
address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)),
MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend));
/*
* check if this LPI is better than the current hpplpi, if yes
* just set hpplpi.prio and .irq without doing a full rescan
*/
if (level) {
gicv3_redist_check_lpi_priority(cs, irq);
} else {
if (irq == cs->hpplpi.irq) {
gicv3_redist_update_lpi(cs);
}
}
}
void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level)
{
uint64_t idbits;
idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS),
GICD_TYPER_IDBITS);
if (!(cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !cs->gicr_propbaser ||
!cs->gicr_pendbaser || (irq > (1ULL << (idbits + 1)) - 1) ||
irq < GICV3_LPI_INTID_START) {
return;
}
/* set/clear the pending bit for this irq */
gicv3_redist_lpi_pending(cs, irq, level);
gicv3_redist_update(cs);
}
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
{
/* Update redistributor state for a change in an external PPI input line */

View File

@ -24,6 +24,7 @@
#ifndef QEMU_ARM_GICV3_INTERNAL_H
#define QEMU_ARM_GICV3_INTERNAL_H
#include "hw/registerfields.h"
#include "hw/intc/arm_gicv3_common.h"
/* Distributor registers, as offsets from the distributor base address */
@ -67,6 +68,11 @@
#define GICD_CTLR_E1NWF (1U << 7)
#define GICD_CTLR_RWP (1U << 31)
#define GICD_TYPER_LPIS_SHIFT 17
/* 16 bits EventId */
#define GICD_TYPER_IDBITS 0xf
/*
* Redistributor frame offsets from RD_base
*/
@ -122,17 +128,19 @@
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK (7ULL << 56)
#define GICR_PROPBASER_ADDR_MASK (0xfffffffffULL << 12)
#define GICR_PROPBASER_SHAREABILITY_MASK (3U << 10)
#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
FIELD(GICR_PROPBASER, IDBITS, 0, 5)
FIELD(GICR_PROPBASER, INNERCACHE, 7, 3)
FIELD(GICR_PROPBASER, SHAREABILITY, 10, 2)
FIELD(GICR_PROPBASER, PHYADDR, 12, 40)
FIELD(GICR_PROPBASER, OUTERCACHE, 56, 3)
#define GICR_PENDBASER_PTZ (1ULL << 62)
#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK (7ULL << 56)
#define GICR_PENDBASER_ADDR_MASK (0xffffffffULL << 16)
#define GICR_PENDBASER_SHAREABILITY_MASK (3U << 10)
#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
FIELD(GICR_PENDBASER, INNERCACHE, 7, 3)
FIELD(GICR_PENDBASER, SHAREABILITY, 10, 2)
FIELD(GICR_PENDBASER, PHYADDR, 16, 36)
FIELD(GICR_PENDBASER, OUTERCACHE, 56, 3)
FIELD(GICR_PENDBASER, PTZ, 62, 1)
#define GICR_PROPBASER_IDBITS_THRESHOLD 0xd
#define ICC_CTLR_EL1_CBPR (1U << 0)
#define ICC_CTLR_EL1_EOIMODE (1U << 1)
@ -239,6 +247,163 @@
#define ICH_VTR_EL2_PREBITS_SHIFT 26
#define ICH_VTR_EL2_PRIBITS_SHIFT 29
/* ITS Registers */
FIELD(GITS_BASER, SIZE, 0, 8)
FIELD(GITS_BASER, PAGESIZE, 8, 2)
FIELD(GITS_BASER, SHAREABILITY, 10, 2)
FIELD(GITS_BASER, PHYADDR, 12, 36)
FIELD(GITS_BASER, PHYADDRL_64K, 16, 32)
FIELD(GITS_BASER, PHYADDRH_64K, 12, 4)
FIELD(GITS_BASER, ENTRYSIZE, 48, 5)
FIELD(GITS_BASER, OUTERCACHE, 53, 3)
FIELD(GITS_BASER, TYPE, 56, 3)
FIELD(GITS_BASER, INNERCACHE, 59, 3)
FIELD(GITS_BASER, INDIRECT, 62, 1)
FIELD(GITS_BASER, VALID, 63, 1)
FIELD(GITS_CBASER, SIZE, 0, 8)
FIELD(GITS_CBASER, SHAREABILITY, 10, 2)
FIELD(GITS_CBASER, PHYADDR, 12, 40)
FIELD(GITS_CBASER, OUTERCACHE, 53, 3)
FIELD(GITS_CBASER, INNERCACHE, 59, 3)
FIELD(GITS_CBASER, VALID, 63, 1)
FIELD(GITS_CREADR, STALLED, 0, 1)
FIELD(GITS_CREADR, OFFSET, 5, 15)
FIELD(GITS_CWRITER, RETRY, 0, 1)
FIELD(GITS_CWRITER, OFFSET, 5, 15)
FIELD(GITS_CTLR, ENABLED, 0, 1)
FIELD(GITS_CTLR, QUIESCENT, 31, 1)
FIELD(GITS_TYPER, PHYSICAL, 0, 1)
FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4)
FIELD(GITS_TYPER, IDBITS, 8, 5)
FIELD(GITS_TYPER, DEVBITS, 13, 5)
FIELD(GITS_TYPER, SEIS, 18, 1)
FIELD(GITS_TYPER, PTA, 19, 1)
FIELD(GITS_TYPER, CIDBITS, 32, 4)
FIELD(GITS_TYPER, CIL, 36, 1)
#define GITS_IDREGS 0xFFD0
#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */
#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
R_GITS_BASER_TYPE_MASK)
#define GITS_BASER_PAGESIZE_4K 0
#define GITS_BASER_PAGESIZE_16K 1
#define GITS_BASER_PAGESIZE_64K 2
#define GITS_BASER_TYPE_DEVICE 1ULL
#define GITS_BASER_TYPE_COLLECTION 4ULL
#define GITS_PAGE_SIZE_4K 0x1000
#define GITS_PAGE_SIZE_16K 0x4000
#define GITS_PAGE_SIZE_64K 0x10000
#define L1TABLE_ENTRY_SIZE 8
#define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK
#define LPI_PRIORITY_MASK 0xfc
#define GITS_CMDQ_ENTRY_SIZE 32
#define NUM_BYTES_IN_DW 8
#define CMD_MASK 0xff
/* ITS Commands */
#define GITS_CMD_CLEAR 0x04
#define GITS_CMD_DISCARD 0x0F
#define GITS_CMD_INT 0x03
#define GITS_CMD_MAPC 0x09
#define GITS_CMD_MAPD 0x08
#define GITS_CMD_MAPI 0x0B
#define GITS_CMD_MAPTI 0x0A
#define GITS_CMD_INV 0x0C
#define GITS_CMD_INVALL 0x0D
#define GITS_CMD_SYNC 0x05
/* MAPC command fields */
#define ICID_LENGTH 16
#define ICID_MASK ((1U << ICID_LENGTH) - 1)
FIELD(MAPC, RDBASE, 16, 32)
#define RDBASE_PROCNUM_LENGTH 16
#define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
/* MAPD command fields */
#define ITTADDR_LENGTH 44
#define ITTADDR_SHIFT 8
#define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
#define SIZE_MASK 0x1f
/* MAPI command fields */
#define EVENTID_MASK ((1ULL << 32) - 1)
/* MAPTI command fields */
#define pINTID_SHIFT 32
#define pINTID_MASK MAKE_64BIT_MASK(32, 32)
#define DEVID_SHIFT 32
#define DEVID_MASK MAKE_64BIT_MASK(32, 32)
#define VALID_SHIFT 63
#define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT)
#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
/**
* Default features advertised by this version of ITS
*/
/* Physical LPIs supported */
#define GITS_TYPE_PHYSICAL (1U << 0)
/*
* 12 bytes Interrupt translation Table Entry size
* as per Table 5.3 in GICv3 spec
* ITE Lower 8 Bytes
* Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
* Values: | 1023 | IntNum | IntType | Valid |
* ITE Higher 4 Bytes
* Bits: | 31 ... 16 | 15 ...0 |
* Values: | vPEID | ICID |
*/
#define ITS_ITT_ENTRY_SIZE 0xC
#define ITE_ENTRY_INTTYPE_SHIFT 1
#define ITE_ENTRY_INTID_SHIFT 2
#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24)
#define ITE_ENTRY_INTSP_SHIFT 26
#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16)
/* 16 bits EventId */
#define ITS_IDBITS GICD_TYPER_IDBITS
/* 16 bits DeviceId */
#define ITS_DEVBITS 0xF
/* 16 bits CollectionId */
#define ITS_CIDBITS 0xF
/*
* 8 bytes Device Table Entry size
* Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
*/
#define GITS_DTE_SIZE (0x8ULL)
#define GITS_DTE_ITTADDR_SHIFT 6
#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
ITTADDR_LENGTH)
/*
* 8 bytes Collection Table Entry size
* Valid = 1 bit,RDBase = 36 bits(considering max RDBASE)
*/
#define GITS_CTE_SIZE (0x8ULL)
#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH)
/* Special interrupt IDs */
#define INTID_SECURE 1020
#define INTID_NONSECURE 1021
@ -296,6 +461,9 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size, MemTxAttrs attrs);
void gicv3_dist_set_irq(GICv3State *s, int irq, int level);
void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level);
void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level);
void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level);
void gicv3_redist_update_lpi(GICv3CPUState *cs);
void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns);
void gicv3_init_cpuif(GICv3State *s);

View File

@ -27,6 +27,7 @@
#include "target/riscv/cpu_bits.h"
#include "target/riscv/cpu.h"
#include "hw/intc/ibex_plic.h"
#include "hw/irq.h"
static bool addr_between(uint32_t addr, uint32_t base, uint32_t num)
{
@ -92,19 +93,10 @@ static bool ibex_plic_irqs_pending(IbexPlicState *s, uint32_t context)
static void ibex_plic_update(IbexPlicState *s)
{
CPUState *cpu;
int level, i;
int i;
for (i = 0; i < s->num_cpus; i++) {
cpu = qemu_get_cpu(i);
if (!cpu) {
continue;
}
level = ibex_plic_irqs_pending(s, 0);
riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MEIP, BOOL_TO_MASK(level));
qemu_set_irq(s->external_irqs[i], ibex_plic_irqs_pending(s, 0));
}
}
@ -268,6 +260,9 @@ static void ibex_plic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, ibex_plic_irq_request, s->num_sources);
s->external_irqs = g_malloc(sizeof(qemu_irq) * s->num_cpus);
qdev_init_gpio_out(dev, s->external_irqs, s->num_cpus);
/*
* We can't allow the supervisor to control SEIP as this would allow the
* supervisor to clear a pending external interrupt which will result in

View File

@ -8,6 +8,7 @@ softmmu_ss.add(when: 'CONFIG_ARM_GIC', if_true: files(
'arm_gicv3_dist.c',
'arm_gicv3_its_common.c',
'arm_gicv3_redist.c',
'arm_gicv3_its.c',
))
softmmu_ss.add(when: 'CONFIG_ETRAXFS', if_true: files('etraxfs_pic.c'))
softmmu_ss.add(when: 'CONFIG_HEATHROW_PIC', if_true: files('heathrow_pic.c'))
@ -46,7 +47,7 @@ specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_CLINT', if_true: files('sifive_clint.c'))
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],

460
hw/intc/riscv_aclint.c Normal file
View File

@ -0,0 +1,460 @@
/*
* RISC-V ACLINT (Advanced Core Local Interruptor)
* URL: https://github.com/riscv/riscv-aclint
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017 SiFive, Inc.
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* This provides real-time clock, timer and interprocessor interrupts.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/sysbus.h"
#include "target/riscv/cpu.h"
#include "hw/qdev-properties.h"
#include "hw/intc/riscv_aclint.h"
#include "qemu/timer.h"
#include "hw/irq.h"
typedef struct riscv_aclint_mtimer_callback {
RISCVAclintMTimerState *s;
int num;
} riscv_aclint_mtimer_callback;
static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq)
{
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
timebase_freq, NANOSECONDS_PER_SECOND);
}
/*
* Called when timecmp is written to update the QEMU timer or immediately
* trigger timer interrupt if mtimecmp <= current timer value.
*/
static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
RISCVCPU *cpu,
int hartid,
uint64_t value,
uint32_t timebase_freq)
{
uint64_t next;
uint64_t diff;
uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq);
cpu->env.timecmp = value;
if (cpu->env.timecmp <= rtc_r) {
/*
* If we're setting an MTIMECMP value in the "past",
* immediately raise the timer interrupt
*/
qemu_irq_raise(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
return;
}
/* otherwise, set up the future timer interrupt */
qemu_irq_lower(mtimer->timer_irqs[hartid - mtimer->hartid_base]);
diff = cpu->env.timecmp - rtc_r;
/* back to ns (note args switched in muldiv64) */
uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
/*
* check if ns_diff overflowed and check if the addition would potentially
* overflow
*/
if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) ||
ns_diff > INT64_MAX) {
next = INT64_MAX;
} else {
/*
* as it is very unlikely qemu_clock_get_ns will return a value
* greater than INT64_MAX, no additional check is needed for an
* unsigned integer overflow.
*/
next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff;
/*
* if ns_diff is INT64_MAX next may still be outside the range
* of a signed integer.
*/
next = MIN(next, INT64_MAX);
}
timer_mod(cpu->env.timer, next);
}
/*
* Callback used when the timer set using timer_mod expires.
* Should raise the timer interrupt line
*/
static void riscv_aclint_mtimer_cb(void *opaque)
{
riscv_aclint_mtimer_callback *state = opaque;
qemu_irq_raise(state->s->timer_irqs[state->num]);
}
/* CPU read MTIMER register */
static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr,
unsigned size)
{
RISCVAclintMTimerState *mtimer = opaque;
if (addr >= mtimer->timecmp_base &&
addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) {
size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid);
} else if ((addr & 0x7) == 0) {
/* timecmp_lo */
uint64_t timecmp = env->timecmp;
return timecmp & 0xFFFFFFFF;
} else if ((addr & 0x7) == 4) {
/* timecmp_hi */
uint64_t timecmp = env->timecmp;
return (timecmp >> 32) & 0xFFFFFFFF;
} else {
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: invalid read: %08x", (uint32_t)addr);
return 0;
}
} else if (addr == mtimer->time_base) {
/* time_lo */
return cpu_riscv_read_rtc(mtimer->timebase_freq) & 0xFFFFFFFF;
} else if (addr == mtimer->time_base + 4) {
/* time_hi */
return (cpu_riscv_read_rtc(mtimer->timebase_freq) >> 32) & 0xFFFFFFFF;
}
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: invalid read: %08x", (uint32_t)addr);
return 0;
}
/* CPU write MTIMER register */
static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
RISCVAclintMTimerState *mtimer = opaque;
if (addr >= mtimer->timecmp_base &&
addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) {
size_t hartid = mtimer->hartid_base +
((addr - mtimer->timecmp_base) >> 3);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-mtimer: invalid hartid: %zu", hartid);
} else if ((addr & 0x7) == 0) {
/* timecmp_lo */
uint64_t timecmp_hi = env->timecmp >> 32;
riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
timecmp_hi << 32 | (value & 0xFFFFFFFF),
mtimer->timebase_freq);
return;
} else if ((addr & 0x7) == 4) {
/* timecmp_hi */
uint64_t timecmp_lo = env->timecmp;
riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
value << 32 | (timecmp_lo & 0xFFFFFFFF),
mtimer->timebase_freq);
} else {
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: invalid timecmp write: %08x",
(uint32_t)addr);
}
return;
} else if (addr == mtimer->time_base) {
/* time_lo */
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: time_lo write not implemented");
return;
} else if (addr == mtimer->time_base + 4) {
/* time_hi */
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: time_hi write not implemented");
return;
}
qemu_log_mask(LOG_UNIMP,
"aclint-mtimer: invalid write: %08x", (uint32_t)addr);
}
static const MemoryRegionOps riscv_aclint_mtimer_ops = {
.read = riscv_aclint_mtimer_read,
.write = riscv_aclint_mtimer_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 8
}
};
static Property riscv_aclint_mtimer_properties[] = {
DEFINE_PROP_UINT32("hartid-base", RISCVAclintMTimerState,
hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAclintMTimerState, num_harts, 1),
DEFINE_PROP_UINT32("timecmp-base", RISCVAclintMTimerState,
timecmp_base, RISCV_ACLINT_DEFAULT_MTIMECMP),
DEFINE_PROP_UINT32("time-base", RISCVAclintMTimerState,
time_base, RISCV_ACLINT_DEFAULT_MTIME),
DEFINE_PROP_UINT32("aperture-size", RISCVAclintMTimerState,
aperture_size, RISCV_ACLINT_DEFAULT_MTIMER_SIZE),
DEFINE_PROP_UINT32("timebase-freq", RISCVAclintMTimerState,
timebase_freq, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp)
{
RISCVAclintMTimerState *s = RISCV_ACLINT_MTIMER(dev);
int i;
memory_region_init_io(&s->mmio, OBJECT(dev), &riscv_aclint_mtimer_ops,
s, TYPE_RISCV_ACLINT_MTIMER, s->aperture_size);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
s->timer_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts);
qdev_init_gpio_out(dev, s->timer_irqs, s->num_harts);
/* Claim timer interrupt bits */
for (i = 0; i < s->num_harts; i++) {
RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i));
if (riscv_cpu_claim_interrupts(cpu, MIP_MTIP) < 0) {
error_report("MTIP already claimed");
exit(1);
}
}
}
static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = riscv_aclint_mtimer_realize;
device_class_set_props(dc, riscv_aclint_mtimer_properties);
}
static const TypeInfo riscv_aclint_mtimer_info = {
.name = TYPE_RISCV_ACLINT_MTIMER,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(RISCVAclintMTimerState),
.class_init = riscv_aclint_mtimer_class_init,
};
/*
* Create ACLINT MTIMER device.
*/
DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
uint32_t hartid_base, uint32_t num_harts,
uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq,
bool provide_rdtime)
{
int i;
DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_MTIMER);
assert(num_harts <= RISCV_ACLINT_MAX_HARTS);
assert(!(addr & 0x7));
assert(!(timecmp_base & 0x7));
assert(!(time_base & 0x7));
qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
qdev_prop_set_uint32(dev, "num-harts", num_harts);
qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base);
qdev_prop_set_uint32(dev, "time-base", time_base);
qdev_prop_set_uint32(dev, "aperture-size", size);
qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
for (i = 0; i < num_harts; i++) {
CPUState *cpu = qemu_get_cpu(hartid_base + i);
RISCVCPU *rvcpu = RISCV_CPU(cpu);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
riscv_aclint_mtimer_callback *cb =
g_malloc0(sizeof(riscv_aclint_mtimer_callback));
if (!env) {
g_free(cb);
continue;
}
if (provide_rdtime) {
riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq);
}
cb->s = RISCV_ACLINT_MTIMER(dev);
cb->num = i;
env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
&riscv_aclint_mtimer_cb, cb);
env->timecmp = 0;
qdev_connect_gpio_out(dev, i,
qdev_get_gpio_in(DEVICE(rvcpu), IRQ_M_TIMER));
}
return dev;
}
/* CPU read [M|S]SWI register */
static uint64_t riscv_aclint_swi_read(void *opaque, hwaddr addr,
unsigned size)
{
RISCVAclintSwiState *swi = opaque;
if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid);
} else if ((addr & 0x3) == 0) {
return (swi->sswi) ? 0 : ((env->mip & MIP_MSIP) > 0);
}
}
qemu_log_mask(LOG_UNIMP,
"aclint-swi: invalid read: %08x", (uint32_t)addr);
return 0;
}
/* CPU write [M|S]SWI register */
static void riscv_aclint_swi_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
RISCVAclintSwiState *swi = opaque;
if (addr < (swi->num_harts << 2)) {
size_t hartid = swi->hartid_base + (addr >> 2);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
qemu_log_mask(LOG_GUEST_ERROR,
"aclint-swi: invalid hartid: %zu", hartid);
} else if ((addr & 0x3) == 0) {
if (value & 0x1) {
qemu_irq_raise(swi->soft_irqs[hartid - swi->hartid_base]);
} else {
if (!swi->sswi) {
qemu_irq_lower(swi->soft_irqs[hartid - swi->hartid_base]);
}
}
return;
}
}
qemu_log_mask(LOG_UNIMP,
"aclint-swi: invalid write: %08x", (uint32_t)addr);
}
static const MemoryRegionOps riscv_aclint_swi_ops = {
.read = riscv_aclint_swi_read,
.write = riscv_aclint_swi_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 4
}
};
static Property riscv_aclint_swi_properties[] = {
DEFINE_PROP_UINT32("hartid-base", RISCVAclintSwiState, hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", RISCVAclintSwiState, num_harts, 1),
DEFINE_PROP_UINT32("sswi", RISCVAclintSwiState, sswi, false),
DEFINE_PROP_END_OF_LIST(),
};
static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp)
{
RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(dev);
int i;
memory_region_init_io(&swi->mmio, OBJECT(dev), &riscv_aclint_swi_ops, swi,
TYPE_RISCV_ACLINT_SWI, RISCV_ACLINT_SWI_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &swi->mmio);
swi->soft_irqs = g_malloc(sizeof(qemu_irq) * swi->num_harts);
qdev_init_gpio_out(dev, swi->soft_irqs, swi->num_harts);
/* Claim software interrupt bits */
for (i = 0; i < swi->num_harts; i++) {
RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(swi->hartid_base + i));
/* We don't claim mip.SSIP because it is writeable by software */
if (riscv_cpu_claim_interrupts(cpu, swi->sswi ? 0 : MIP_MSIP) < 0) {
error_report("MSIP already claimed");
exit(1);
}
}
}
static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = riscv_aclint_swi_realize;
device_class_set_props(dc, riscv_aclint_swi_properties);
}
static const TypeInfo riscv_aclint_swi_info = {
.name = TYPE_RISCV_ACLINT_SWI,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(RISCVAclintSwiState),
.class_init = riscv_aclint_swi_class_init,
};
/*
* Create ACLINT [M|S]SWI device.
*/
DeviceState *riscv_aclint_swi_create(hwaddr addr, uint32_t hartid_base,
uint32_t num_harts, bool sswi)
{
int i;
DeviceState *dev = qdev_new(TYPE_RISCV_ACLINT_SWI);
assert(num_harts <= RISCV_ACLINT_MAX_HARTS);
assert(!(addr & 0x3));
qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
qdev_prop_set_uint32(dev, "num-harts", num_harts);
qdev_prop_set_uint32(dev, "sswi", sswi ? true : false);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
for (i = 0; i < num_harts; i++) {
CPUState *cpu = qemu_get_cpu(hartid_base + i);
RISCVCPU *rvcpu = RISCV_CPU(cpu);
qdev_connect_gpio_out(dev, i,
qdev_get_gpio_in(DEVICE(rvcpu),
(sswi) ? IRQ_S_SOFT : IRQ_M_SOFT));
}
return dev;
}
static void riscv_aclint_register_types(void)
{
type_register_static(&riscv_aclint_mtimer_info);
type_register_static(&riscv_aclint_swi_info);
}
type_init(riscv_aclint_register_types)

View File

@ -1,287 +0,0 @@
/*
* SiFive CLINT (Core Local Interruptor)
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017 SiFive, Inc.
*
* This provides real-time clock, timer and interprocessor interrupts.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/sysbus.h"
#include "target/riscv/cpu.h"
#include "hw/qdev-properties.h"
#include "hw/intc/sifive_clint.h"
#include "qemu/timer.h"
static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq)
{
return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
timebase_freq, NANOSECONDS_PER_SECOND);
}
/*
* Called when timecmp is written to update the QEMU timer or immediately
* trigger timer interrupt if mtimecmp <= current timer value.
*/
static void sifive_clint_write_timecmp(RISCVCPU *cpu, uint64_t value,
uint32_t timebase_freq)
{
uint64_t next;
uint64_t diff;
uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq);
cpu->env.timecmp = value;
if (cpu->env.timecmp <= rtc_r) {
/* if we're setting an MTIMECMP value in the "past",
immediately raise the timer interrupt */
riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1));
return;
}
/* otherwise, set up the future timer interrupt */
riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(0));
diff = cpu->env.timecmp - rtc_r;
/* back to ns (note args switched in muldiv64) */
uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
/*
* check if ns_diff overflowed and check if the addition would potentially
* overflow
*/
if ((NANOSECONDS_PER_SECOND > timebase_freq && ns_diff < diff) ||
ns_diff > INT64_MAX) {
next = INT64_MAX;
} else {
/*
* as it is very unlikely qemu_clock_get_ns will return a value
* greater than INT64_MAX, no additional check is needed for an
* unsigned integer overflow.
*/
next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns_diff;
/*
* if ns_diff is INT64_MAX next may still be outside the range
* of a signed integer.
*/
next = MIN(next, INT64_MAX);
}
timer_mod(cpu->env.timer, next);
}
/*
* Callback used when the timer set using timer_mod expires.
* Should raise the timer interrupt line
*/
static void sifive_clint_timer_cb(void *opaque)
{
RISCVCPU *cpu = opaque;
riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1));
}
/* CPU wants to read rtc or timecmp register */
static uint64_t sifive_clint_read(void *opaque, hwaddr addr, unsigned size)
{
SiFiveCLINTState *clint = opaque;
if (addr >= clint->sip_base &&
addr < clint->sip_base + (clint->num_harts << 2)) {
size_t hartid = clint->hartid_base + ((addr - clint->sip_base) >> 2);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
error_report("clint: invalid timecmp hartid: %zu", hartid);
} else if ((addr & 0x3) == 0) {
return (env->mip & MIP_MSIP) > 0;
} else {
error_report("clint: invalid read: %08x", (uint32_t)addr);
return 0;
}
} else if (addr >= clint->timecmp_base &&
addr < clint->timecmp_base + (clint->num_harts << 3)) {
size_t hartid = clint->hartid_base +
((addr - clint->timecmp_base) >> 3);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
error_report("clint: invalid timecmp hartid: %zu", hartid);
} else if ((addr & 0x7) == 0) {
/* timecmp_lo */
uint64_t timecmp = env->timecmp;
return timecmp & 0xFFFFFFFF;
} else if ((addr & 0x7) == 4) {
/* timecmp_hi */
uint64_t timecmp = env->timecmp;
return (timecmp >> 32) & 0xFFFFFFFF;
} else {
error_report("clint: invalid read: %08x", (uint32_t)addr);
return 0;
}
} else if (addr == clint->time_base) {
/* time_lo */
return cpu_riscv_read_rtc(clint->timebase_freq) & 0xFFFFFFFF;
} else if (addr == clint->time_base + 4) {
/* time_hi */
return (cpu_riscv_read_rtc(clint->timebase_freq) >> 32) & 0xFFFFFFFF;
}
error_report("clint: invalid read: %08x", (uint32_t)addr);
return 0;
}
/* CPU wrote to rtc or timecmp register */
static void sifive_clint_write(void *opaque, hwaddr addr, uint64_t value,
unsigned size)
{
SiFiveCLINTState *clint = opaque;
if (addr >= clint->sip_base &&
addr < clint->sip_base + (clint->num_harts << 2)) {
size_t hartid = clint->hartid_base + ((addr - clint->sip_base) >> 2);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
error_report("clint: invalid timecmp hartid: %zu", hartid);
} else if ((addr & 0x3) == 0) {
riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MSIP, BOOL_TO_MASK(value));
} else {
error_report("clint: invalid sip write: %08x", (uint32_t)addr);
}
return;
} else if (addr >= clint->timecmp_base &&
addr < clint->timecmp_base + (clint->num_harts << 3)) {
size_t hartid = clint->hartid_base +
((addr - clint->timecmp_base) >> 3);
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
error_report("clint: invalid timecmp hartid: %zu", hartid);
} else if ((addr & 0x7) == 0) {
/* timecmp_lo */
uint64_t timecmp_hi = env->timecmp >> 32;
sifive_clint_write_timecmp(RISCV_CPU(cpu),
timecmp_hi << 32 | (value & 0xFFFFFFFF), clint->timebase_freq);
return;
} else if ((addr & 0x7) == 4) {
/* timecmp_hi */
uint64_t timecmp_lo = env->timecmp;
sifive_clint_write_timecmp(RISCV_CPU(cpu),
value << 32 | (timecmp_lo & 0xFFFFFFFF), clint->timebase_freq);
} else {
error_report("clint: invalid timecmp write: %08x", (uint32_t)addr);
}
return;
} else if (addr == clint->time_base) {
/* time_lo */
error_report("clint: time_lo write not implemented");
return;
} else if (addr == clint->time_base + 4) {
/* time_hi */
error_report("clint: time_hi write not implemented");
return;
}
error_report("clint: invalid write: %08x", (uint32_t)addr);
}
static const MemoryRegionOps sifive_clint_ops = {
.read = sifive_clint_read,
.write = sifive_clint_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid = {
.min_access_size = 4,
.max_access_size = 8
}
};
static Property sifive_clint_properties[] = {
DEFINE_PROP_UINT32("hartid-base", SiFiveCLINTState, hartid_base, 0),
DEFINE_PROP_UINT32("num-harts", SiFiveCLINTState, num_harts, 0),
DEFINE_PROP_UINT32("sip-base", SiFiveCLINTState, sip_base, 0),
DEFINE_PROP_UINT32("timecmp-base", SiFiveCLINTState, timecmp_base, 0),
DEFINE_PROP_UINT32("time-base", SiFiveCLINTState, time_base, 0),
DEFINE_PROP_UINT32("aperture-size", SiFiveCLINTState, aperture_size, 0),
DEFINE_PROP_UINT32("timebase-freq", SiFiveCLINTState, timebase_freq, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void sifive_clint_realize(DeviceState *dev, Error **errp)
{
SiFiveCLINTState *s = SIFIVE_CLINT(dev);
memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_clint_ops, s,
TYPE_SIFIVE_CLINT, s->aperture_size);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio);
}
static void sifive_clint_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sifive_clint_realize;
device_class_set_props(dc, sifive_clint_properties);
}
static const TypeInfo sifive_clint_info = {
.name = TYPE_SIFIVE_CLINT,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SiFiveCLINTState),
.class_init = sifive_clint_class_init,
};
static void sifive_clint_register_types(void)
{
type_register_static(&sifive_clint_info);
}
type_init(sifive_clint_register_types)
/*
* Create CLINT device.
*/
DeviceState *sifive_clint_create(hwaddr addr, hwaddr size,
uint32_t hartid_base, uint32_t num_harts, uint32_t sip_base,
uint32_t timecmp_base, uint32_t time_base, uint32_t timebase_freq,
bool provide_rdtime)
{
int i;
for (i = 0; i < num_harts; i++) {
CPUState *cpu = qemu_get_cpu(hartid_base + i);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
continue;
}
if (provide_rdtime) {
riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq);
}
env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
&sifive_clint_timer_cb, cpu);
env->timecmp = 0;
}
DeviceState *dev = qdev_new(TYPE_SIFIVE_CLINT);
qdev_prop_set_uint32(dev, "hartid-base", hartid_base);
qdev_prop_set_uint32(dev, "num-harts", num_harts);
qdev_prop_set_uint32(dev, "sip-base", sip_base);
qdev_prop_set_uint32(dev, "timecmp-base", timecmp_base);
qdev_prop_set_uint32(dev, "time-base", time_base);
qdev_prop_set_uint32(dev, "aperture-size", size);
qdev_prop_set_uint32(dev, "timebase-freq", timebase_freq);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
return dev;
}

View File

@ -29,6 +29,7 @@
#include "hw/intc/sifive_plic.h"
#include "target/riscv/cpu.h"
#include "migration/vmstate.h"
#include "hw/irq.h"
#define RISCV_DEBUG_PLIC 0
@ -139,18 +140,14 @@ static void sifive_plic_update(SiFivePLICState *plic)
for (addrid = 0; addrid < plic->num_addrs; addrid++) {
uint32_t hartid = plic->addr_config[addrid].hartid;
PLICMode mode = plic->addr_config[addrid].mode;
CPUState *cpu = qemu_get_cpu(hartid);
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
if (!env) {
continue;
}
int level = sifive_plic_irqs_pending(plic, addrid);
switch (mode) {
case PLICMode_M:
riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MEIP, BOOL_TO_MASK(level));
qemu_set_irq(plic->m_external_irqs[hartid - plic->hartid_base], level);
break;
case PLICMode_S:
riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_SEIP, BOOL_TO_MASK(level));
qemu_set_irq(plic->s_external_irqs[hartid - plic->hartid_base], level);
break;
default:
break;
@ -456,6 +453,12 @@ static void sifive_plic_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &plic->mmio);
qdev_init_gpio_in(dev, sifive_plic_irq_request, plic->num_sources);
plic->s_external_irqs = g_malloc(sizeof(qemu_irq) * plic->num_harts);
qdev_init_gpio_out(dev, plic->s_external_irqs, plic->num_harts);
plic->m_external_irqs = g_malloc(sizeof(qemu_irq) * plic->num_harts);
qdev_init_gpio_out(dev, plic->m_external_irqs, plic->num_harts);
/* We can't allow the supervisor to control SEIP as this would allow the
* supervisor to clear a pending external interrupt which will result in
* lost a interrupt in the case a PLIC is attached. The SEIP bit must be
@ -520,6 +523,7 @@ type_init(sifive_plic_register_types)
* Create PLIC device.
*/
DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
uint32_t num_harts,
uint32_t hartid_base, uint32_t num_sources,
uint32_t num_priorities, uint32_t priority_base,
uint32_t pending_base, uint32_t enable_base,
@ -527,6 +531,8 @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
uint32_t context_stride, uint32_t aperture_size)
{
DeviceState *dev = qdev_new(TYPE_SIFIVE_PLIC);
int i;
assert(enable_stride == (enable_stride & -enable_stride));
assert(context_stride == (context_stride & -context_stride));
qdev_prop_set_string(dev, "hart-config", hart_config);
@ -542,5 +548,15 @@ DeviceState *sifive_plic_create(hwaddr addr, char *hart_config,
qdev_prop_set_uint32(dev, "aperture-size", aperture_size);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
for (i = 0; i < num_harts; i++) {
CPUState *cpu = qemu_get_cpu(hartid_base + i);
qdev_connect_gpio_out(dev, i,
qdev_get_gpio_in(DEVICE(cpu), IRQ_S_EXT));
qdev_connect_gpio_out(dev, num_harts + i,
qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT));
}
return dev;
}

View File

@ -101,14 +101,26 @@
#define AST2600_CLK_STOP_CTRL_CLR TO_REG(0x84)
#define AST2600_CLK_STOP_CTRL2 TO_REG(0x90)
#define AST2600_CLK_STOP_CTRL2_CLR TO_REG(0x94)
#define AST2600_DEBUG_CTRL TO_REG(0xC8)
#define AST2600_DEBUG_CTRL2 TO_REG(0xD8)
#define AST2600_SDRAM_HANDSHAKE TO_REG(0x100)
#define AST2600_HPLL_PARAM TO_REG(0x200)
#define AST2600_HPLL_EXT TO_REG(0x204)
#define AST2600_APLL_PARAM TO_REG(0x210)
#define AST2600_APLL_EXT TO_REG(0x214)
#define AST2600_MPLL_PARAM TO_REG(0x220)
#define AST2600_MPLL_EXT TO_REG(0x224)
#define AST2600_EPLL_PARAM TO_REG(0x240)
#define AST2600_EPLL_EXT TO_REG(0x244)
#define AST2600_DPLL_PARAM TO_REG(0x260)
#define AST2600_DPLL_EXT TO_REG(0x264)
#define AST2600_CLK_SEL TO_REG(0x300)
#define AST2600_CLK_SEL2 TO_REG(0x304)
#define AST2600_CLK_SEL3 TO_REG(0x310)
#define AST2600_CLK_SEL3 TO_REG(0x308)
#define AST2600_CLK_SEL4 TO_REG(0x310)
#define AST2600_CLK_SEL5 TO_REG(0x314)
#define AST2600_UARTCLK TO_REG(0x338)
#define AST2600_HUARTCLK TO_REG(0x33C)
#define AST2600_HW_STRAP1 TO_REG(0x500)
#define AST2600_HW_STRAP1_CLR TO_REG(0x504)
#define AST2600_HW_STRAP1_PROT TO_REG(0x508)
@ -433,6 +445,8 @@ static uint32_t aspeed_silicon_revs[] = {
AST2500_A1_SILICON_REV,
AST2600_A0_SILICON_REV,
AST2600_A1_SILICON_REV,
AST2600_A2_SILICON_REV,
AST2600_A3_SILICON_REV,
};
bool is_supported_silicon_rev(uint32_t silicon_rev)
@ -651,16 +665,28 @@ static const MemoryRegionOps aspeed_ast2600_scu_ops = {
.valid.unaligned = false,
};
static const uint32_t ast2600_a1_resets[ASPEED_AST2600_SCU_NR_REGS] = {
static const uint32_t ast2600_a3_resets[ASPEED_AST2600_SCU_NR_REGS] = {
[AST2600_SYS_RST_CTRL] = 0xF7C3FED8,
[AST2600_SYS_RST_CTRL2] = 0xFFFFFFFC,
[AST2600_SYS_RST_CTRL2] = 0x0DFFFFFC,
[AST2600_CLK_STOP_CTRL] = 0xFFFF7F8A,
[AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0,
[AST2600_DEBUG_CTRL] = 0x00000FFF,
[AST2600_DEBUG_CTRL2] = 0x000000FF,
[AST2600_SDRAM_HANDSHAKE] = 0x00000000,
[AST2600_HPLL_PARAM] = 0x1000405F,
[AST2600_HPLL_PARAM] = 0x1000408F,
[AST2600_APLL_PARAM] = 0x1000405F,
[AST2600_MPLL_PARAM] = 0x1008405F,
[AST2600_EPLL_PARAM] = 0x1004077F,
[AST2600_DPLL_PARAM] = 0x1078405F,
[AST2600_CLK_SEL] = 0xF3940000,
[AST2600_CLK_SEL2] = 0x00700000,
[AST2600_CLK_SEL3] = 0x00000000,
[AST2600_CLK_SEL4] = 0xF3F40000,
[AST2600_CLK_SEL5] = 0x30000000,
[AST2600_UARTCLK] = 0x00014506,
[AST2600_HUARTCLK] = 0x000145C0,
[AST2600_CHIP_ID0] = 0x1234ABCD,
[AST2600_CHIP_ID1] = 0x88884444,
};
static void aspeed_ast2600_scu_reset(DeviceState *dev)
@ -675,7 +701,7 @@ static void aspeed_ast2600_scu_reset(DeviceState *dev)
* of actual revision. QEMU and Linux only support A1 onwards so this is
* sufficient.
*/
s->regs[AST2600_SILICON_REV] = AST2600_A1_SILICON_REV;
s->regs[AST2600_SILICON_REV] = AST2600_A3_SILICON_REV;
s->regs[AST2600_SILICON_REV2] = s->silicon_rev;
s->regs[AST2600_HW_STRAP1] = s->hw_strap1;
s->regs[AST2600_HW_STRAP2] = s->hw_strap2;
@ -689,7 +715,7 @@ static void aspeed_2600_scu_class_init(ObjectClass *klass, void *data)
dc->desc = "ASPEED 2600 System Control Unit";
dc->reset = aspeed_ast2600_scu_reset;
asc->resets = ast2600_a1_resets;
asc->resets = ast2600_a3_resets;
asc->calc_hpll = aspeed_2500_scu_calc_hpll; /* No change since AST2500 */
asc->apb_divider = 4;
asc->nr_regs = ASPEED_AST2600_SCU_NR_REGS;

View File

@ -272,7 +272,7 @@ static void pca955x_get_led(Object *obj, Visitor *v, const char *name,
* reading the INPUTx reg
*/
reg = PCA9552_LS0 + led / 4;
state = (pca955x_read(s, reg) >> (led % 8)) & 0x3;
state = (pca955x_read(s, reg) >> ((led % 4) * 2)) & 0x3;
visit_type_str(v, name, (char **)&led_state[state], errp);
}

View File

@ -269,6 +269,21 @@ static uint64_t zynq_slcr_compute_clock(const uint64_t periods[],
zynq_slcr_compute_clock((plls), (state)->regs[reg], \
reg ## _ ## enable_field ## _SHIFT)
static void zynq_slcr_compute_clocks_internal(ZynqSLCRState *s, uint64_t ps_clk)
{
uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]);
uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]);
uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]);
uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll};
/* compute uartX reference clocks */
clock_set(s->uart0_ref_clk,
ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0));
clock_set(s->uart1_ref_clk,
ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1));
}
/**
* Compute and set the ouputs clocks periods.
* But do not propagate them further. Connected clocks
@ -283,17 +298,7 @@ static void zynq_slcr_compute_clocks(ZynqSLCRState *s)
ps_clk = 0;
}
uint64_t io_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_IO_PLL_CTRL]);
uint64_t arm_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_ARM_PLL_CTRL]);
uint64_t ddr_pll = zynq_slcr_compute_pll(ps_clk, s->regs[R_DDR_PLL_CTRL]);
uint64_t uart_mux[4] = {io_pll, io_pll, arm_pll, ddr_pll};
/* compute uartX reference clocks */
clock_set(s->uart0_ref_clk,
ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT0));
clock_set(s->uart1_ref_clk,
ZYNQ_COMPUTE_CLK(s, uart_mux, R_UART_CLK_CTRL, CLKACT1));
zynq_slcr_compute_clocks_internal(s, ps_clk);
}
/**
@ -416,7 +421,7 @@ static void zynq_slcr_reset_hold(Object *obj)
ZynqSLCRState *s = ZYNQ_SLCR(obj);
/* will disable all output clocks */
zynq_slcr_compute_clocks(s);
zynq_slcr_compute_clocks_internal(s, 0);
zynq_slcr_propagate_clocks(s);
}
@ -425,7 +430,7 @@ static void zynq_slcr_reset_exit(Object *obj)
ZynqSLCRState *s = ZYNQ_SLCR(obj);
/* will compute output clocks according to ps_clk and registers */
zynq_slcr_compute_clocks(s);
zynq_slcr_compute_clocks_internal(s, clock_get(s->ps_clk));
zynq_slcr_propagate_clocks(s);
}

View File

@ -1746,10 +1746,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
VirtIONet *n = qemu_get_nic_opaque(nc);
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n);
VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
size_t lens[VIRTQUEUE_MAX_SIZE];
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
struct virtio_net_hdr_mrg_rxbuf mhdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset;
size_t offset, i, guest_offset, j;
ssize_t err;
if (!virtio_net_can_receive(nc)) {
return -1;
@ -1780,6 +1783,12 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
total = 0;
if (i == VIRTQUEUE_MAX_SIZE) {
virtio_error(vdev, "virtio-net unexpected long buffer chain");
err = size;
goto err;
}
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
if (i) {
@ -1791,7 +1800,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
n->guest_hdr_len, n->host_hdr_len,
vdev->guest_features);
}
return -1;
err = -1;
goto err;
}
if (elem->in_num < 1) {
@ -1799,7 +1809,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
"virtio-net receive queue contains no in buffers");
virtqueue_detach_element(q->rx_vq, elem, 0);
g_free(elem);
return -1;
err = -1;
goto err;
}
sg = elem->in_sg;
@ -1836,12 +1847,13 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
if (!n->mergeable_rx_bufs && offset < size) {
virtqueue_unpop(q->rx_vq, elem, total);
g_free(elem);
return size;
err = size;
goto err;
}
/* signal other side */
virtqueue_fill(q->rx_vq, elem, total, i++);
g_free(elem);
elems[i] = elem;
lens[i] = total;
i++;
}
if (mhdr_cnt) {
@ -1851,10 +1863,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
&mhdr.num_buffers, sizeof mhdr.num_buffers);
}
for (j = 0; j < i; j++) {
/* signal other side */
virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
g_free(elems[j]);
}
virtqueue_flush(q->rx_vq, i);
virtio_notify(vdev, q->rx_vq);
return size;
err:
for (j = 0; j < i; j++) {
g_free(elems[j]);
}
return err;
}
static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,

View File

@ -3893,6 +3893,10 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
return ns->status;
}
if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) {
return NVME_INVALID_FIELD;
}
req->ns = ns;
switch (req->cmd.opcode) {
@ -5191,7 +5195,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
uint32_t nsid = le32_to_cpu(req->cmd.nsid);
uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
bool attach = !(dw10 & 0xf);
uint8_t sel = dw10 & 0xf;
uint16_t *nr_ids = &list[0];
uint16_t *ids = &list[1];
uint16_t ret;
@ -5224,7 +5228,8 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
}
if (attach) {
switch (sel) {
case NVME_NS_ATTACHMENT_ATTACH:
if (nvme_ns(ctrl, nsid)) {
return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
}
@ -5235,7 +5240,10 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
nvme_attach_ns(ctrl, ns);
nvme_select_iocs_ns(ctrl, ns);
} else {
break;
case NVME_NS_ATTACHMENT_DETACH:
if (!nvme_ns(ctrl, nsid)) {
return NVME_NS_NOT_ATTACHED | NVME_DNR;
}
@ -5244,6 +5252,11 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
ns->attached--;
nvme_update_dmrsl(ctrl);
break;
default:
return NVME_INVALID_FIELD | NVME_DNR;
}
/*
@ -5466,6 +5479,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
if (NVME_CMD_FLAGS_FUSE(req->cmd.flags)) {
return NVME_INVALID_FIELD;
}
switch (req->cmd.opcode) {
case NVME_ADM_CMD_DELETE_SQ:
return nvme_del_sq(n, req);
@ -5623,14 +5640,6 @@ static int nvme_start_ctrl(NvmeCtrl *n)
trace_pci_nvme_err_startfail_sq();
return -1;
}
if (unlikely(!asq)) {
trace_pci_nvme_err_startfail_nbarasq();
return -1;
}
if (unlikely(!acq)) {
trace_pci_nvme_err_startfail_nbaracq();
return -1;
}
if (unlikely(asq & (page_size - 1))) {
trace_pci_nvme_err_startfail_asq_misaligned(asq);
return -1;

View File

@ -159,8 +159,6 @@ pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx
pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16""
pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
pci_nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null"
pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64""
pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64""
pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u"

Some files were not shown because too many files have changed in this diff Show More