Merge remote-tracking branch 'upstream/master' into main
This commit is contained in:
commit
f14fb258f3
@ -2,8 +2,6 @@
|
||||
#include "hw/core/cpu.h"
|
||||
#include "exec/replay-core.h"
|
||||
|
||||
bool enable_cpu_pm = false;
|
||||
|
||||
void cpu_resume(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
@ -1706,7 +1706,7 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
|
||||
size_t i;
|
||||
int done = 0;
|
||||
const char *drvname = NULL;
|
||||
VMChangeStateEntry *e;
|
||||
VMChangeStateEntry *vmse;
|
||||
AudioState *s;
|
||||
struct audio_driver *driver;
|
||||
/* silence gcc warning about uninitialized variable */
|
||||
@ -1824,8 +1824,8 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
|
||||
s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
|
||||
}
|
||||
|
||||
e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!e) {
|
||||
vmse = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
|
||||
if (!vmse) {
|
||||
dolog ("warning: Could not register change state handler\n"
|
||||
"(Audio can continue looping even after stopping the VM)\n");
|
||||
}
|
||||
@ -1900,10 +1900,8 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
cap = audio_pcm_capture_find_specific(s, as);
|
||||
if (cap) {
|
||||
QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
|
||||
return cap;
|
||||
} else {
|
||||
HWVoiceOut *hw;
|
||||
CaptureVoiceOut *cap;
|
||||
|
||||
cap = g_malloc0(sizeof(*cap));
|
||||
|
||||
@ -1937,8 +1935,9 @@ CaptureVoiceOut *AUD_add_capture(
|
||||
QLIST_FOREACH(hw, &s->hw_head_out, entries) {
|
||||
audio_attach_capture (hw);
|
||||
}
|
||||
return cap;
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
|
||||
|
@ -70,6 +70,9 @@ typedef struct QJackClient {
|
||||
int buffersize;
|
||||
jack_port_t **port;
|
||||
QJackBuffer fifo;
|
||||
|
||||
/* Used as workspace by qjack_process() */
|
||||
float **process_buffers;
|
||||
}
|
||||
QJackClient;
|
||||
|
||||
@ -267,22 +270,21 @@ static int qjack_process(jack_nframes_t nframes, void *arg)
|
||||
}
|
||||
|
||||
/* get the buffers for the ports */
|
||||
float *buffers[c->nchannels];
|
||||
for (int i = 0; i < c->nchannels; ++i) {
|
||||
buffers[i] = jack_port_get_buffer(c->port[i], nframes);
|
||||
c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes);
|
||||
}
|
||||
|
||||
if (c->out) {
|
||||
if (likely(c->enabled)) {
|
||||
qjack_buffer_read_l(&c->fifo, buffers, nframes);
|
||||
qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes);
|
||||
} else {
|
||||
for (int i = 0; i < c->nchannels; ++i) {
|
||||
memset(buffers[i], 0, nframes * sizeof(float));
|
||||
memset(c->process_buffers[i], 0, nframes * sizeof(float));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (likely(c->enabled)) {
|
||||
qjack_buffer_write_l(&c->fifo, buffers, nframes);
|
||||
qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -400,7 +402,8 @@ static void qjack_client_connect_ports(QJackClient *c)
|
||||
static int qjack_client_init(QJackClient *c)
|
||||
{
|
||||
jack_status_t status;
|
||||
char client_name[jack_client_name_size()];
|
||||
int client_name_len = jack_client_name_size(); /* includes NUL */
|
||||
g_autofree char *client_name = g_new(char, client_name_len);
|
||||
jack_options_t options = JackNullOption;
|
||||
|
||||
if (c->state == QJACK_STATE_RUNNING) {
|
||||
@ -409,7 +412,7 @@ static int qjack_client_init(QJackClient *c)
|
||||
|
||||
c->connect_ports = true;
|
||||
|
||||
snprintf(client_name, sizeof(client_name), "%s-%s",
|
||||
snprintf(client_name, client_name_len, "%s-%s",
|
||||
c->out ? "out" : "in",
|
||||
c->opt->client_name ? c->opt->client_name : audio_application_name());
|
||||
|
||||
@ -447,6 +450,9 @@ static int qjack_client_init(QJackClient *c)
|
||||
jack_get_client_name(c->client));
|
||||
}
|
||||
|
||||
/* Allocate working buffer for process callback */
|
||||
c->process_buffers = g_new(float *, c->nchannels);
|
||||
|
||||
jack_set_process_callback(c->client, qjack_process , c);
|
||||
jack_set_port_registration_callback(c->client, qjack_port_registration, c);
|
||||
jack_set_xrun_callback(c->client, qjack_xrun, c);
|
||||
@ -578,6 +584,7 @@ static void qjack_client_fini_locked(QJackClient *c)
|
||||
|
||||
qjack_buffer_free(&c->fifo);
|
||||
g_free(c->port);
|
||||
g_free(c->process_buffers);
|
||||
|
||||
c->state = QJACK_STATE_DISCONNECTED;
|
||||
/* fallthrough */
|
||||
|
@ -387,7 +387,8 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
|
||||
bdrv_do_drained_begin(bs, parent, false);
|
||||
}
|
||||
|
||||
void bdrv_drained_begin(BlockDriverState *bs)
|
||||
void coroutine_mixed_fn
|
||||
bdrv_drained_begin(BlockDriverState *bs)
|
||||
{
|
||||
IO_OR_GS_CODE();
|
||||
bdrv_do_drained_begin(bs, NULL, true);
|
||||
@ -506,7 +507,7 @@ void bdrv_drain_all_begin_nopoll(void)
|
||||
}
|
||||
}
|
||||
|
||||
void bdrv_drain_all_begin(void)
|
||||
void coroutine_mixed_fn bdrv_drain_all_begin(void)
|
||||
{
|
||||
BlockDriverState *bs = NULL;
|
||||
|
||||
|
44
block/nbd.c
44
block/nbd.c
@ -339,7 +339,7 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
|
||||
* We have connected, but must fail for other reasons.
|
||||
* Send NBD_CMD_DISC as a courtesy to the server.
|
||||
*/
|
||||
NBDRequest request = { .type = NBD_CMD_DISC };
|
||||
NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
|
||||
|
||||
nbd_send_request(s->ioc, &request);
|
||||
|
||||
@ -463,7 +463,8 @@ static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
|
||||
nbd_channel_error(s, ret);
|
||||
return ret;
|
||||
}
|
||||
if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
|
||||
if (nbd_reply_is_structured(&s->reply) &&
|
||||
s->info.mode < NBD_MODE_STRUCTURED) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -519,6 +520,7 @@ nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
request->cookie = INDEX_TO_COOKIE(i);
|
||||
request->mode = s->info.mode;
|
||||
|
||||
assert(s->ioc);
|
||||
|
||||
@ -608,7 +610,7 @@ static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
|
||||
static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
|
||||
NBDStructuredReplyChunk *chunk,
|
||||
uint8_t *payload, uint64_t orig_length,
|
||||
NBDExtent *extent, Error **errp)
|
||||
NBDExtent32 *extent, Error **errp)
|
||||
{
|
||||
uint32_t context_id;
|
||||
|
||||
@ -866,7 +868,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
|
||||
}
|
||||
|
||||
/* handle structured reply chunk */
|
||||
assert(s->info.structured_reply);
|
||||
assert(s->info.mode >= NBD_MODE_STRUCTURED);
|
||||
chunk = &s->reply.structured;
|
||||
|
||||
if (chunk->type == NBD_REPLY_TYPE_NONE) {
|
||||
@ -1070,7 +1072,8 @@ nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
|
||||
void *payload = NULL;
|
||||
Error *local_err = NULL;
|
||||
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
|
||||
NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
|
||||
s->info.mode >= NBD_MODE_STRUCTURED,
|
||||
qiov, &reply, &payload)
|
||||
{
|
||||
int ret;
|
||||
@ -1115,7 +1118,7 @@ nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
|
||||
|
||||
static int coroutine_fn
|
||||
nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
|
||||
uint64_t length, NBDExtent *extent,
|
||||
uint64_t length, NBDExtent32 *extent,
|
||||
int *request_ret, Error **errp)
|
||||
{
|
||||
NBDReplyChunkIter iter;
|
||||
@ -1302,10 +1305,11 @@ nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
NBDRequest request = {
|
||||
.type = NBD_CMD_WRITE_ZEROES,
|
||||
.from = offset,
|
||||
.len = bytes, /* .len is uint32_t actually */
|
||||
.len = bytes,
|
||||
};
|
||||
|
||||
assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
|
||||
/* rely on max_pwrite_zeroes */
|
||||
assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
|
||||
|
||||
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
|
||||
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
|
||||
@ -1352,10 +1356,11 @@ nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
NBDRequest request = {
|
||||
.type = NBD_CMD_TRIM,
|
||||
.from = offset,
|
||||
.len = bytes, /* len is uint32_t */
|
||||
.len = bytes,
|
||||
};
|
||||
|
||||
assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
|
||||
/* rely on max_pdiscard */
|
||||
assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
|
||||
|
||||
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
|
||||
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
|
||||
@ -1370,15 +1375,14 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
|
||||
int64_t *pnum, int64_t *map, BlockDriverState **file)
|
||||
{
|
||||
int ret, request_ret;
|
||||
NBDExtent extent = { 0 };
|
||||
NBDExtent32 extent = { 0 };
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
Error *local_err = NULL;
|
||||
|
||||
NBDRequest request = {
|
||||
.type = NBD_CMD_BLOCK_STATUS,
|
||||
.from = offset,
|
||||
.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
|
||||
MIN(bytes, s->info.size - offset)),
|
||||
.len = MIN(bytes, s->info.size - offset),
|
||||
.flags = NBD_CMD_FLAG_REQ_ONE,
|
||||
};
|
||||
|
||||
@ -1388,6 +1392,10 @@ static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
|
||||
*file = bs;
|
||||
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
|
||||
}
|
||||
if (s->info.mode < NBD_MODE_EXTENDED) {
|
||||
request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
|
||||
request.len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Work around the fact that the block layer doesn't do
|
||||
@ -1463,7 +1471,7 @@ static void nbd_yank(void *opaque)
|
||||
static void nbd_client_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
NBDRequest request = { .type = NBD_CMD_DISC };
|
||||
NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
|
||||
|
||||
if (s->ioc) {
|
||||
nbd_send_request(s->ioc, &request);
|
||||
@ -1952,6 +1960,14 @@ static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
bs->bl.max_pwrite_zeroes = max;
|
||||
bs->bl.max_transfer = max;
|
||||
|
||||
/*
|
||||
* Assume that if the server supports extended headers, it also
|
||||
* supports unlimited size zero and trim commands.
|
||||
*/
|
||||
if (s->info.mode >= NBD_MODE_EXTENDED) {
|
||||
bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
|
||||
}
|
||||
|
||||
if (s->info.opt_block &&
|
||||
s->info.opt_block > bs->bl.opt_transfer) {
|
||||
bs->bl.opt_transfer = s->info.opt_block;
|
||||
|
@ -5288,7 +5288,7 @@ static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
|
||||
return spec_info;
|
||||
}
|
||||
|
||||
static int qcow2_has_zero_init(BlockDriverState *bs)
|
||||
static int coroutine_mixed_fn qcow2_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
bool preallocated;
|
||||
|
@ -570,8 +570,8 @@ static void coroutine_fn bdrv_qed_open_entry(void *opaque)
|
||||
qemu_co_mutex_unlock(&s->table_lock);
|
||||
}
|
||||
|
||||
static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
Error **errp)
|
||||
static int coroutine_mixed_fn bdrv_qed_open(BlockDriverState *bs, QDict *options,
|
||||
int flags, Error **errp)
|
||||
{
|
||||
QEDOpenCo qoc = {
|
||||
.bs = bs,
|
||||
|
@ -317,8 +317,8 @@ static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tg
|
||||
* @tgm: the current ThrottleGroupMember
|
||||
* @direction: the ThrottleDirection
|
||||
*/
|
||||
static void schedule_next_request(ThrottleGroupMember *tgm,
|
||||
ThrottleDirection direction)
|
||||
static void coroutine_mixed_fn schedule_next_request(ThrottleGroupMember *tgm,
|
||||
ThrottleDirection direction)
|
||||
{
|
||||
ThrottleState *ts = tgm->throttle_state;
|
||||
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
||||
|
@ -167,7 +167,7 @@ iscsi_xcopy(void *src_lun, uint64_t src_off, void *dst_lun, uint64_t dst_off, ui
|
||||
nbd_parse_blockstatus_compliance(const char *err) "ignoring extra data from non-compliant server: %s"
|
||||
nbd_structured_read_compliance(const char *type) "server sent non-compliant unaligned read %s chunk"
|
||||
nbd_read_reply_entry_fail(int ret, const char *err) "ret = %d, err: %s"
|
||||
nbd_co_request_fail(uint64_t from, uint32_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu32 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
|
||||
nbd_co_request_fail(uint64_t from, uint64_t len, uint64_t handle, uint16_t flags, uint16_t type, const char *name, int ret, const char *err) "Request failed { .from = %" PRIu64", .len = %" PRIu64 ", .handle = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) } ret = %d, err: %s"
|
||||
nbd_client_handshake(const char *export_name) "export '%s'"
|
||||
nbd_client_handshake_success(const char *export_name) "export '%s'"
|
||||
nbd_reconnect_attempt(unsigned in_flight) "in_flight %u"
|
||||
|
@ -14,7 +14,7 @@ static struct pa_block *pa_space_find_block(struct pa_space *ps, uint64_t pa)
|
||||
|
||||
for (i = 0; i < ps->block_nr; i++) {
|
||||
if (ps->block[i].paddr <= pa &&
|
||||
pa <= ps->block[i].paddr + ps->block[i].size) {
|
||||
pa < ps->block[i].paddr + ps->block[i].size) {
|
||||
return ps->block + i;
|
||||
}
|
||||
}
|
||||
@ -33,6 +33,30 @@ static uint8_t *pa_space_resolve(struct pa_space *ps, uint64_t pa)
|
||||
return block->addr + (pa - block->paddr);
|
||||
}
|
||||
|
||||
static void pa_block_align(struct pa_block *b)
|
||||
{
|
||||
uint64_t low_align = ((b->paddr - 1) | ELF2DMP_PAGE_MASK) + 1 - b->paddr;
|
||||
uint64_t high_align = (b->paddr + b->size) & ELF2DMP_PAGE_MASK;
|
||||
|
||||
if (low_align == 0 && high_align == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (low_align + high_align < b->size) {
|
||||
printf("Block 0x%"PRIx64"+:0x%"PRIx64" will be aligned to "
|
||||
"0x%"PRIx64"+:0x%"PRIx64"\n", b->paddr, b->size,
|
||||
b->paddr + low_align, b->size - low_align - high_align);
|
||||
b->size -= low_align + high_align;
|
||||
} else {
|
||||
printf("Block 0x%"PRIx64"+:0x%"PRIx64" is too small to align\n",
|
||||
b->paddr, b->size);
|
||||
b->size = 0;
|
||||
}
|
||||
|
||||
b->addr += low_align;
|
||||
b->paddr += low_align;
|
||||
}
|
||||
|
||||
int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
|
||||
{
|
||||
Elf64_Half phdr_nr = elf_getphdrnum(qemu_elf->map);
|
||||
@ -60,10 +84,13 @@ int pa_space_create(struct pa_space *ps, QEMU_Elf *qemu_elf)
|
||||
.paddr = phdr[i].p_paddr,
|
||||
.size = phdr[i].p_filesz,
|
||||
};
|
||||
block_i++;
|
||||
pa_block_align(&ps->block[block_i]);
|
||||
block_i = ps->block[block_i].size ? (block_i + 1) : block_i;
|
||||
}
|
||||
}
|
||||
|
||||
ps->block_nr = block_i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#define ELF2DMP_PAGE_BITS 12
|
||||
#define ELF2DMP_PAGE_SIZE (1ULL << ELF2DMP_PAGE_BITS)
|
||||
#define ELF2DMP_PAGE_MASK (ELF2DMP_PAGE_SIZE - 1)
|
||||
#define ELF2DMP_PFN_MASK (~(ELF2DMP_PAGE_SIZE - 1))
|
||||
|
||||
#define INVALID_PA UINT64_MAX
|
||||
|
@ -20,6 +20,7 @@
|
||||
#define PE_NAME "ntoskrnl.exe"
|
||||
|
||||
#define INITIAL_MXCSR 0x1f80
|
||||
#define MAX_NUMBER_OF_RUNS 42
|
||||
|
||||
typedef struct idt_desc {
|
||||
uint16_t offset1; /* offset bits 0..15 */
|
||||
@ -234,6 +235,42 @@ static int fix_dtb(struct va_space *vs, QEMU_Elf *qe)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void try_merge_runs(struct pa_space *ps,
|
||||
WinDumpPhyMemDesc64 *PhysicalMemoryBlock)
|
||||
{
|
||||
unsigned int merge_cnt = 0, run_idx = 0;
|
||||
|
||||
PhysicalMemoryBlock->NumberOfRuns = 0;
|
||||
|
||||
for (size_t idx = 0; idx < ps->block_nr; idx++) {
|
||||
struct pa_block *blk = ps->block + idx;
|
||||
struct pa_block *next = blk + 1;
|
||||
|
||||
PhysicalMemoryBlock->NumberOfPages += blk->size / ELF2DMP_PAGE_SIZE;
|
||||
|
||||
if (idx + 1 != ps->block_nr && blk->paddr + blk->size == next->paddr) {
|
||||
printf("Block #%zu 0x%"PRIx64"+:0x%"PRIx64" and %u previous will be"
|
||||
" merged\n", idx, blk->paddr, blk->size, merge_cnt);
|
||||
merge_cnt++;
|
||||
} else {
|
||||
struct pa_block *first_merged = blk - merge_cnt;
|
||||
|
||||
printf("Block #%zu 0x%"PRIx64"+:0x%"PRIx64" and %u previous will be"
|
||||
" merged to 0x%"PRIx64"+:0x%"PRIx64" (run #%u)\n",
|
||||
idx, blk->paddr, blk->size, merge_cnt, first_merged->paddr,
|
||||
blk->paddr + blk->size - first_merged->paddr, run_idx);
|
||||
PhysicalMemoryBlock->Run[run_idx] = (WinDumpPhyMemRun64) {
|
||||
.BasePage = first_merged->paddr / ELF2DMP_PAGE_SIZE,
|
||||
.PageCount = (blk->paddr + blk->size - first_merged->paddr) /
|
||||
ELF2DMP_PAGE_SIZE,
|
||||
};
|
||||
PhysicalMemoryBlock->NumberOfRuns++;
|
||||
run_idx++;
|
||||
merge_cnt = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps,
|
||||
struct va_space *vs, uint64_t KdDebuggerDataBlock,
|
||||
KDDEBUGGER_DATA64 *kdbg, uint64_t KdVersionBlock, int nr_cpus)
|
||||
@ -244,7 +281,6 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps,
|
||||
KUSD_OFFSET_PRODUCT_TYPE);
|
||||
DBGKD_GET_VERSION64 kvb;
|
||||
WinDumpHeader64 h;
|
||||
size_t i;
|
||||
|
||||
QEMU_BUILD_BUG_ON(KUSD_OFFSET_SUITE_MASK >= ELF2DMP_PAGE_SIZE);
|
||||
QEMU_BUILD_BUG_ON(KUSD_OFFSET_PRODUCT_TYPE >= ELF2DMP_PAGE_SIZE);
|
||||
@ -282,13 +318,17 @@ static int fill_header(WinDumpHeader64 *hdr, struct pa_space *ps,
|
||||
.RequiredDumpSpace = sizeof(h),
|
||||
};
|
||||
|
||||
for (i = 0; i < ps->block_nr; i++) {
|
||||
h.PhysicalMemoryBlock.NumberOfPages +=
|
||||
ps->block[i].size / ELF2DMP_PAGE_SIZE;
|
||||
h.PhysicalMemoryBlock.Run[i] = (WinDumpPhyMemRun64) {
|
||||
.BasePage = ps->block[i].paddr / ELF2DMP_PAGE_SIZE,
|
||||
.PageCount = ps->block[i].size / ELF2DMP_PAGE_SIZE,
|
||||
};
|
||||
if (h.PhysicalMemoryBlock.NumberOfRuns <= MAX_NUMBER_OF_RUNS) {
|
||||
for (size_t idx = 0; idx < ps->block_nr; idx++) {
|
||||
h.PhysicalMemoryBlock.NumberOfPages +=
|
||||
ps->block[idx].size / ELF2DMP_PAGE_SIZE;
|
||||
h.PhysicalMemoryBlock.Run[idx] = (WinDumpPhyMemRun64) {
|
||||
.BasePage = ps->block[idx].paddr / ELF2DMP_PAGE_SIZE,
|
||||
.PageCount = ps->block[idx].size / ELF2DMP_PAGE_SIZE,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
try_merge_runs(ps, &h.PhysicalMemoryBlock);
|
||||
}
|
||||
|
||||
h.RequiredDumpSpace +=
|
||||
@ -400,9 +440,10 @@ static int write_dump(struct pa_space *ps,
|
||||
for (i = 0; i < ps->block_nr; i++) {
|
||||
struct pa_block *b = &ps->block[i];
|
||||
|
||||
printf("Writing block #%zu/%zu to file...\n", i, ps->block_nr);
|
||||
printf("Writing block #%zu/%zu of %"PRIu64" bytes to file...\n", i,
|
||||
ps->block_nr, b->size);
|
||||
if (fwrite(b->addr, b->size, 1, dmp_file) != 1) {
|
||||
eprintf("Failed to write dump header\n");
|
||||
eprintf("Failed to write block\n");
|
||||
fclose(dmp_file);
|
||||
return 1;
|
||||
}
|
||||
@ -411,89 +452,64 @@ static int write_dump(struct pa_space *ps,
|
||||
return fclose(dmp_file);
|
||||
}
|
||||
|
||||
static bool pe_check_export_name(uint64_t base, void *start_addr,
|
||||
struct va_space *vs)
|
||||
{
|
||||
IMAGE_EXPORT_DIRECTORY export_dir;
|
||||
const char *pe_name;
|
||||
|
||||
if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_EXPORT_DIRECTORY,
|
||||
&export_dir, sizeof(export_dir), vs)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pe_name = va_space_resolve(vs, base + export_dir.Name);
|
||||
if (!pe_name) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return !strcmp(pe_name, PE_NAME);
|
||||
}
|
||||
|
||||
static int pe_get_pdb_symstore_hash(uint64_t base, void *start_addr,
|
||||
char *hash, struct va_space *vs)
|
||||
static bool pe_check_pdb_name(uint64_t base, void *start_addr,
|
||||
struct va_space *vs, OMFSignatureRSDS *rsds)
|
||||
{
|
||||
const char sign_rsds[4] = "RSDS";
|
||||
IMAGE_DEBUG_DIRECTORY debug_dir;
|
||||
OMFSignatureRSDS rsds;
|
||||
char *pdb_name;
|
||||
size_t pdb_name_sz;
|
||||
size_t i;
|
||||
char pdb_name[sizeof(PDB_NAME)];
|
||||
|
||||
if (pe_get_data_dir_entry(base, start_addr, IMAGE_FILE_DEBUG_DIRECTORY,
|
||||
&debug_dir, sizeof(debug_dir), vs)) {
|
||||
eprintf("Failed to get Debug Directory\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (debug_dir.Type != IMAGE_DEBUG_TYPE_CODEVIEW) {
|
||||
return 1;
|
||||
eprintf("Debug Directory type is not CodeView\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (va_space_rw(vs,
|
||||
base + debug_dir.AddressOfRawData,
|
||||
&rsds, sizeof(rsds), 0)) {
|
||||
return 1;
|
||||
rsds, sizeof(*rsds), 0)) {
|
||||
eprintf("Failed to resolve OMFSignatureRSDS\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
printf("CodeView signature is \'%.4s\'\n", rsds.Signature);
|
||||
|
||||
if (memcmp(&rsds.Signature, sign_rsds, sizeof(sign_rsds))) {
|
||||
return 1;
|
||||
if (memcmp(&rsds->Signature, sign_rsds, sizeof(sign_rsds))) {
|
||||
eprintf("CodeView signature is \'%.4s\', \'%s\' expected\n",
|
||||
rsds->Signature, sign_rsds);
|
||||
return false;
|
||||
}
|
||||
|
||||
pdb_name_sz = debug_dir.SizeOfData - sizeof(rsds);
|
||||
pdb_name = malloc(pdb_name_sz);
|
||||
if (!pdb_name) {
|
||||
return 1;
|
||||
if (debug_dir.SizeOfData - sizeof(*rsds) != sizeof(PDB_NAME)) {
|
||||
eprintf("PDB name size doesn't match\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (va_space_rw(vs, base + debug_dir.AddressOfRawData +
|
||||
offsetof(OMFSignatureRSDS, name), pdb_name, pdb_name_sz, 0)) {
|
||||
free(pdb_name);
|
||||
return 1;
|
||||
offsetof(OMFSignatureRSDS, name), pdb_name, sizeof(PDB_NAME),
|
||||
0)) {
|
||||
eprintf("Failed to resolve PDB name\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
printf("PDB name is \'%s\', \'%s\' expected\n", pdb_name, PDB_NAME);
|
||||
|
||||
if (strcmp(pdb_name, PDB_NAME)) {
|
||||
eprintf("Unexpected PDB name, it seems the kernel isn't found\n");
|
||||
free(pdb_name);
|
||||
return 1;
|
||||
}
|
||||
return !strcmp(pdb_name, PDB_NAME);
|
||||
}
|
||||
|
||||
free(pdb_name);
|
||||
|
||||
sprintf(hash, "%.08x%.04x%.04x%.02x%.02x", rsds.guid.a, rsds.guid.b,
|
||||
rsds.guid.c, rsds.guid.d[0], rsds.guid.d[1]);
|
||||
static void pe_get_pdb_symstore_hash(OMFSignatureRSDS *rsds, char *hash)
|
||||
{
|
||||
sprintf(hash, "%.08x%.04x%.04x%.02x%.02x", rsds->guid.a, rsds->guid.b,
|
||||
rsds->guid.c, rsds->guid.d[0], rsds->guid.d[1]);
|
||||
hash += 20;
|
||||
for (i = 0; i < 6; i++, hash += 2) {
|
||||
sprintf(hash, "%.02x", rsds.guid.e[i]);
|
||||
for (unsigned int i = 0; i < 6; i++, hash += 2) {
|
||||
sprintf(hash, "%.02x", rsds->guid.e[i]);
|
||||
}
|
||||
|
||||
sprintf(hash, "%.01x", rsds.age);
|
||||
|
||||
return 0;
|
||||
sprintf(hash, "%.01x", rsds->age);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
@ -515,6 +531,7 @@ int main(int argc, char *argv[])
|
||||
KDDEBUGGER_DATA64 *kdbg;
|
||||
uint64_t KdVersionBlock;
|
||||
bool kernel_found = false;
|
||||
OMFSignatureRSDS rsds;
|
||||
|
||||
if (argc != 3) {
|
||||
eprintf("usage:\n\t%s elf_file dmp_file\n", argv[0]);
|
||||
@ -562,7 +579,8 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
if (*(uint16_t *)nt_start_addr == 0x5a4d) { /* MZ */
|
||||
if (pe_check_export_name(KernBase, nt_start_addr, &vs)) {
|
||||
printf("Checking candidate KernBase = 0x%016"PRIx64"\n", KernBase);
|
||||
if (pe_check_pdb_name(KernBase, nt_start_addr, &vs, &rsds)) {
|
||||
kernel_found = true;
|
||||
break;
|
||||
}
|
||||
@ -578,11 +596,7 @@ int main(int argc, char *argv[])
|
||||
printf("KernBase = 0x%016"PRIx64", signature is \'%.2s\'\n", KernBase,
|
||||
(char *)nt_start_addr);
|
||||
|
||||
if (pe_get_pdb_symstore_hash(KernBase, nt_start_addr, pdb_hash, &vs)) {
|
||||
eprintf("Failed to get PDB symbol store hash\n");
|
||||
err = 1;
|
||||
goto out_ps;
|
||||
}
|
||||
pe_get_pdb_symstore_hash(&rsds, pdb_hash);
|
||||
|
||||
sprintf(pdb_url, "%s%s/%s/%s", SYM_URL_BASE, PDB_NAME, pdb_hash, PDB_NAME);
|
||||
printf("PDB URL is %s\n", pdb_url);
|
||||
|
@ -160,7 +160,7 @@ static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number)
|
||||
static int pdb_init_segments(struct pdb_reader *r)
|
||||
{
|
||||
char *segs;
|
||||
unsigned stream_idx = r->sidx.segments;
|
||||
unsigned stream_idx = r->segments;
|
||||
|
||||
segs = pdb_ds_read_file(r, stream_idx);
|
||||
if (!segs) {
|
||||
@ -177,9 +177,6 @@ static int pdb_init_symbols(struct pdb_reader *r)
|
||||
{
|
||||
int err = 0;
|
||||
PDB_SYMBOLS *symbols;
|
||||
PDB_STREAM_INDEXES *sidx = &r->sidx;
|
||||
|
||||
memset(sidx, -1, sizeof(*sidx));
|
||||
|
||||
symbols = pdb_ds_read_file(r, 3);
|
||||
if (!symbols) {
|
||||
@ -188,15 +185,11 @@ static int pdb_init_symbols(struct pdb_reader *r)
|
||||
|
||||
r->symbols = symbols;
|
||||
|
||||
if (symbols->stream_index_size != sizeof(PDB_STREAM_INDEXES)) {
|
||||
err = 1;
|
||||
goto out_symbols;
|
||||
}
|
||||
|
||||
memcpy(sidx, (const char *)symbols + sizeof(PDB_SYMBOLS) +
|
||||
r->segments = *(uint16_t *)((const char *)symbols + sizeof(PDB_SYMBOLS) +
|
||||
symbols->module_size + symbols->offset_size +
|
||||
symbols->hash_size + symbols->srcmodule_size +
|
||||
symbols->pdbimport_size + symbols->unknown2_size, sizeof(*sidx));
|
||||
symbols->pdbimport_size + symbols->unknown2_size +
|
||||
offsetof(PDB_STREAM_INDEXES, segments));
|
||||
|
||||
/* Read global symbol table */
|
||||
r->modimage = pdb_ds_read_file(r, symbols->gsym_file);
|
||||
|
@ -227,7 +227,7 @@ struct pdb_reader {
|
||||
} ds;
|
||||
uint32_t file_used[1024];
|
||||
PDB_SYMBOLS *symbols;
|
||||
PDB_STREAM_INDEXES sidx;
|
||||
uint16_t segments;
|
||||
uint8_t *modimage;
|
||||
char *segs;
|
||||
size_t segs_size;
|
||||
|
@ -165,10 +165,40 @@ static bool check_ehdr(QEMU_Elf *qe)
|
||||
return true;
|
||||
}
|
||||
|
||||
int QEMU_Elf_init(QEMU_Elf *qe, const char *filename)
|
||||
static int QEMU_Elf_map(QEMU_Elf *qe, const char *filename)
|
||||
{
|
||||
#ifdef CONFIG_LINUX
|
||||
struct stat st;
|
||||
int fd;
|
||||
|
||||
printf("Using Linux mmap\n");
|
||||
|
||||
fd = open(filename, O_RDONLY, 0);
|
||||
if (fd == -1) {
|
||||
eprintf("Failed to open ELF dump file \'%s\'\n", filename);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (fstat(fd, &st)) {
|
||||
eprintf("Failed to get size of ELF dump file\n");
|
||||
close(fd);
|
||||
return 1;
|
||||
}
|
||||
qe->size = st.st_size;
|
||||
|
||||
qe->map = mmap(NULL, qe->size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_NORESERVE, fd, 0);
|
||||
if (qe->map == MAP_FAILED) {
|
||||
eprintf("Failed to map ELF file\n");
|
||||
close(fd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
#else
|
||||
GError *gerr = NULL;
|
||||
int err = 0;
|
||||
|
||||
printf("Using GLib mmap\n");
|
||||
|
||||
qe->gmf = g_mapped_file_new(filename, TRUE, &gerr);
|
||||
if (gerr) {
|
||||
@ -179,29 +209,43 @@ int QEMU_Elf_init(QEMU_Elf *qe, const char *filename)
|
||||
|
||||
qe->map = g_mapped_file_get_contents(qe->gmf);
|
||||
qe->size = g_mapped_file_get_length(qe->gmf);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void QEMU_Elf_unmap(QEMU_Elf *qe)
|
||||
{
|
||||
#ifdef CONFIG_LINUX
|
||||
munmap(qe->map, qe->size);
|
||||
#else
|
||||
g_mapped_file_unref(qe->gmf);
|
||||
#endif
|
||||
}
|
||||
|
||||
int QEMU_Elf_init(QEMU_Elf *qe, const char *filename)
|
||||
{
|
||||
if (QEMU_Elf_map(qe, filename)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!check_ehdr(qe)) {
|
||||
eprintf("Input file has the wrong format\n");
|
||||
err = 1;
|
||||
goto out_unmap;
|
||||
QEMU_Elf_unmap(qe);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (init_states(qe)) {
|
||||
eprintf("Failed to extract QEMU CPU states\n");
|
||||
err = 1;
|
||||
goto out_unmap;
|
||||
QEMU_Elf_unmap(qe);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
g_mapped_file_unref(qe->gmf);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void QEMU_Elf_exit(QEMU_Elf *qe)
|
||||
{
|
||||
exit_states(qe);
|
||||
g_mapped_file_unref(qe->gmf);
|
||||
QEMU_Elf_unmap(qe);
|
||||
}
|
||||
|
@ -32,7 +32,9 @@ typedef struct QEMUCPUState {
|
||||
int is_system(QEMUCPUState *s);
|
||||
|
||||
typedef struct QEMU_Elf {
|
||||
#ifndef CONFIG_LINUX
|
||||
GMappedFile *gmf;
|
||||
#endif
|
||||
size_t size;
|
||||
void *map;
|
||||
QEMUCPUState **state;
|
||||
|
@ -277,14 +277,6 @@ deprecated; use the new name ``dtb-randomness`` instead. The new name
|
||||
better reflects the way this property affects all random data within
|
||||
the device tree blob, not just the ``kaslr-seed`` node.
|
||||
|
||||
``pc-i440fx-1.4`` up to ``pc-i440fx-1.7`` (since 7.0)
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
These old machine types are quite neglected nowadays and thus might have
|
||||
various pitfalls with regards to live migration. Use a newer machine type
|
||||
instead.
|
||||
|
||||
|
||||
Backend options
|
||||
---------------
|
||||
|
||||
|
@ -715,8 +715,8 @@ mips ``fulong2e`` machine alias (removed in 6.0)
|
||||
|
||||
This machine has been renamed ``fuloong2e``.
|
||||
|
||||
``pc-0.10`` up to ``pc-1.3`` (removed in 4.0 up to 6.0)
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
``pc-0.10`` up to ``pc-i440fx-1.7`` (removed in 4.0 up to 8.2)
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
These machine types were very old and likely could not be used for live
|
||||
migration from old QEMU versions anymore. Use a newer machine type instead.
|
||||
|
@ -288,3 +288,7 @@
|
||||
driver = "hda-duplex"
|
||||
bus = "ich9-hda-audio.0"
|
||||
cad = "0"
|
||||
audiodev = "audiodev0"
|
||||
|
||||
[audiodev "audiodev0"]
|
||||
driver = "none" # CHANGE ME
|
||||
|
@ -248,3 +248,7 @@
|
||||
driver = "hda-duplex"
|
||||
bus = "sound.0"
|
||||
cad = "0"
|
||||
audiodev = "audiodev0"
|
||||
|
||||
[audiodev "audiodev0"]
|
||||
driver = "none" # CHANGE ME
|
||||
|
@ -63,12 +63,12 @@ which stores ``val`` to ``ptr`` as an ``{endian}`` order value
|
||||
of size ``sz`` bytes.
|
||||
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<ld[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
|
||||
- ``\<st[bwlq]\(_[hbl]e\)\?_p\>``
|
||||
- ``\<st24\(_[hbl]e\)\?_p\>``
|
||||
- ``\<ldn_\([hbl]e\)?_p\>``
|
||||
- ``\<stn_\([hbl]e\)?_p\>``
|
||||
- ``\<ldn_\([hbl]e\)\?_p\>``
|
||||
- ``\<stn_\([hbl]e\)\?_p\>``
|
||||
|
||||
``cpu_{ld,st}*_mmu``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
@ -121,8 +121,8 @@ store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)``
|
||||
- ``_le`` : little endian
|
||||
|
||||
Regexes for git grep:
|
||||
- ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>``
|
||||
- ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>``
|
||||
- ``\<cpu_ld[bwlq]\(_[bl]e\)\?_mmu\>``
|
||||
- ``\<cpu_st[bwlq]\(_[bl]e\)\?_mmu\>``
|
||||
|
||||
|
||||
``cpu_{ld,st}*_mmuidx_ra``
|
||||
@ -155,8 +155,8 @@ store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
|
||||
- ``_le`` : little endian
|
||||
|
||||
Regexes for git grep:
|
||||
- ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_mmuidx_ra\>``
|
||||
- ``\<cpu_st[bwlq](_[bl]e)\?_mmuidx_ra\>``
|
||||
- ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_mmuidx_ra\>``
|
||||
- ``\<cpu_st[bwlq]\(_[bl]e\)\?_mmuidx_ra\>``
|
||||
|
||||
``cpu_{ld,st}*_data_ra``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -193,8 +193,8 @@ store: ``cpu_st{size}{end}_data_ra(env, ptr, val, ra)``
|
||||
- ``_le`` : little endian
|
||||
|
||||
Regexes for git grep:
|
||||
- ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_data_ra\>``
|
||||
- ``\<cpu_st[bwlq](_[bl]e)\?_data_ra\>``
|
||||
- ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_data_ra\>``
|
||||
- ``\<cpu_st[bwlq]\(_[bl]e\)\?_data_ra\>``
|
||||
|
||||
``cpu_{ld,st}*_data``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -231,9 +231,9 @@ store: ``cpu_st{size}{end}_data(env, ptr, val)``
|
||||
- ``_be`` : big endian
|
||||
- ``_le`` : little endian
|
||||
|
||||
Regexes for git grep
|
||||
- ``\<cpu_ld[us]\?[bwlq](_[bl]e)\?_data\>``
|
||||
- ``\<cpu_st[bwlq](_[bl]e)\?_data\+\>``
|
||||
Regexes for git grep:
|
||||
- ``\<cpu_ld[us]\?[bwlq]\(_[bl]e\)\?_data\>``
|
||||
- ``\<cpu_st[bwlq]\(_[bl]e\)\?_data\+\>``
|
||||
|
||||
``cpu_ld*_code``
|
||||
~~~~~~~~~~~~~~~~
|
||||
@ -296,7 +296,7 @@ swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)``
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<translator_ld[us]\?[bwlq]\(_swap\)\?\>``
|
||||
|
||||
``helper_{ld,st}*_mmu``
|
||||
@ -325,7 +325,7 @@ store: ``helper_{size}_mmu(env, addr, val, opindex, retaddr)``
|
||||
- ``l`` : 32 bits
|
||||
- ``q`` : 64 bits
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<helper_ld[us]\?[bwlq]_mmu\>``
|
||||
- ``\<helper_st[bwlq]_mmu\>``
|
||||
|
||||
@ -382,7 +382,7 @@ succeeded using a MemTxResult return code.
|
||||
|
||||
The ``_{endian}`` suffix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<address_space_\(read\|write\|rw\)\>``
|
||||
- ``\<address_space_ldu\?[bwql]\(_[lb]e\)\?\>``
|
||||
- ``\<address_space_st[bwql]\(_[lb]e\)\?\>``
|
||||
@ -400,7 +400,7 @@ Note that portions of the write which attempt to write data to a
|
||||
device will be silently ignored -- only real RAM and ROM will
|
||||
be written to.
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``address_space_write_rom``
|
||||
|
||||
``{ld,st}*_phys``
|
||||
@ -438,7 +438,7 @@ device doing the access has no way to report such an error.
|
||||
|
||||
The ``_{endian}_`` infix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<ldu\?[bwlq]\(_[bl]e\)\?_phys\>``
|
||||
- ``\<st[bwlq]\(_[bl]e\)\?_phys\>``
|
||||
|
||||
@ -462,7 +462,7 @@ For new code they are better avoided:
|
||||
|
||||
``cpu_physical_memory_rw``
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<cpu_physical_memory_\(read\|write\|rw\)\>``
|
||||
|
||||
``cpu_memory_rw_debug``
|
||||
@ -497,7 +497,7 @@ make sure our existing code is doing things correctly.
|
||||
|
||||
``dma_memory_rw``
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<dma_memory_\(read\|write\|rw\)\>``
|
||||
- ``\<ldu\?[bwlq]\(_[bl]e\)\?_dma\>``
|
||||
- ``\<st[bwlq]\(_[bl]e\)\?_dma\>``
|
||||
@ -538,7 +538,7 @@ correct address space for that device.
|
||||
|
||||
The ``_{endian}_`` infix is omitted for byte accesses.
|
||||
|
||||
Regexes for git grep
|
||||
Regexes for git grep:
|
||||
- ``\<pci_dma_\(read\|write\|rw\)\>``
|
||||
- ``\<ldu\?[bwlq]\(_[bl]e\)\?_pci_dma\>``
|
||||
- ``\<st[bwlq]\(_[bl]e\)\?_pci_dma\>``
|
||||
|
@ -42,6 +42,7 @@ the following architecture extensions:
|
||||
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
|
||||
- FEAT_GTG (Guest translation granule size)
|
||||
- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state)
|
||||
- FEAT_HBC (Hinted conditional branches)
|
||||
- FEAT_HCX (Support for the HCRX_EL2 register)
|
||||
- FEAT_HPDS (Hierarchical permission disables)
|
||||
- FEAT_HPDS2 (Translation table page-based hardware attributes)
|
||||
@ -57,6 +58,7 @@ the following architecture extensions:
|
||||
- FEAT_LSE (Large System Extensions)
|
||||
- FEAT_LSE2 (Large System Extensions v2)
|
||||
- FEAT_LVA (Large Virtual Address space)
|
||||
- FEAT_MOPS (Standardization of memory operations)
|
||||
- FEAT_MTE (Memory Tagging Extension)
|
||||
- FEAT_MTE2 (Memory Tagging Extension)
|
||||
- FEAT_MTE3 (MTE Asymmetric Fault Handling)
|
||||
|
@ -761,6 +761,10 @@ static void do_cpu_reset(void *opaque)
|
||||
if (cpu_isar_feature(aa64_hcx, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_HXEN;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_fgt, cpu)) {
|
||||
env->cp15.scr_el3 |= SCR_FGTEN;
|
||||
}
|
||||
|
||||
/* AArch64 kernels never boot in secure mode */
|
||||
assert(!info->secure_boot);
|
||||
/* This hook is only supported for AArch32 currently:
|
||||
|
@ -61,6 +61,7 @@
|
||||
#define ARCH_TIMER_S_EL1_IRQ 13
|
||||
#define ARCH_TIMER_NS_EL1_IRQ 14
|
||||
#define ARCH_TIMER_NS_EL2_IRQ 10
|
||||
#define ARCH_TIMER_NS_EL2_VIRT_IRQ 12
|
||||
|
||||
enum {
|
||||
SBSA_FLASH,
|
||||
@ -489,6 +490,7 @@ static void create_gic(SBSAMachineState *sms, MemoryRegion *mem)
|
||||
[GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ,
|
||||
[GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ,
|
||||
[GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ,
|
||||
[GTIMER_HYPVIRT] = ARCH_TIMER_NS_EL2_VIRT_IRQ,
|
||||
};
|
||||
|
||||
for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) {
|
||||
|
@ -675,7 +675,9 @@ static void hda_audio_stream(HDACodecDevice *hda, uint32_t stnr, bool running, b
|
||||
}
|
||||
}
|
||||
|
||||
static int hda_audio_init(HDACodecDevice *hda, const struct desc_codec *desc)
|
||||
static void hda_audio_init(HDACodecDevice *hda,
|
||||
const struct desc_codec *desc,
|
||||
Error **errp)
|
||||
{
|
||||
HDAAudioState *a = HDA_AUDIO(hda);
|
||||
HDAAudioStream *st;
|
||||
@ -718,7 +720,6 @@ static int hda_audio_init(HDACodecDevice *hda, const struct desc_codec *desc)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hda_audio_exit(HDACodecDevice *hda)
|
||||
@ -848,37 +849,40 @@ static Property hda_audio_properties[] = {
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static int hda_audio_init_output(HDACodecDevice *hda)
|
||||
static void hda_audio_init_output(HDACodecDevice *hda, Error **errp)
|
||||
{
|
||||
HDAAudioState *a = HDA_AUDIO(hda);
|
||||
const struct desc_codec *desc = &output_nomixemu;
|
||||
|
||||
if (!a->mixer) {
|
||||
return hda_audio_init(hda, &output_nomixemu);
|
||||
} else {
|
||||
return hda_audio_init(hda, &output_mixemu);
|
||||
desc = &output_mixemu;
|
||||
}
|
||||
|
||||
hda_audio_init(hda, desc, errp);
|
||||
}
|
||||
|
||||
static int hda_audio_init_duplex(HDACodecDevice *hda)
|
||||
static void hda_audio_init_duplex(HDACodecDevice *hda, Error **errp)
|
||||
{
|
||||
HDAAudioState *a = HDA_AUDIO(hda);
|
||||
const struct desc_codec *desc = &duplex_nomixemu;
|
||||
|
||||
if (!a->mixer) {
|
||||
return hda_audio_init(hda, &duplex_nomixemu);
|
||||
} else {
|
||||
return hda_audio_init(hda, &duplex_mixemu);
|
||||
desc = &duplex_mixemu;
|
||||
}
|
||||
|
||||
hda_audio_init(hda, desc, errp);
|
||||
}
|
||||
|
||||
static int hda_audio_init_micro(HDACodecDevice *hda)
|
||||
static void hda_audio_init_micro(HDACodecDevice *hda, Error **errp)
|
||||
{
|
||||
HDAAudioState *a = HDA_AUDIO(hda);
|
||||
const struct desc_codec *desc = µ_nomixemu;
|
||||
|
||||
if (!a->mixer) {
|
||||
return hda_audio_init(hda, µ_nomixemu);
|
||||
} else {
|
||||
return hda_audio_init(hda, µ_mixemu);
|
||||
desc = µ_mixemu;
|
||||
}
|
||||
|
||||
hda_audio_init(hda, desc, errp);
|
||||
}
|
||||
|
||||
static void hda_audio_base_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -71,9 +71,7 @@ static void hda_codec_dev_realize(DeviceState *qdev, Error **errp)
|
||||
return;
|
||||
}
|
||||
bus->next_cad = dev->cad + 1;
|
||||
if (cdc->init(dev) != 0) {
|
||||
error_setg(errp, "HDA audio init failed");
|
||||
}
|
||||
cdc->init(dev, errp);
|
||||
}
|
||||
|
||||
static void hda_codec_dev_unrealize(DeviceState *qdev)
|
||||
|
@ -31,7 +31,7 @@ struct HDACodecBus {
|
||||
struct HDACodecDeviceClass {
|
||||
DeviceClass parent_class;
|
||||
|
||||
int (*init)(HDACodecDevice *dev);
|
||||
void (*init)(HDACodecDevice *dev, Error **errp);
|
||||
void (*exit)(HDACodecDevice *dev);
|
||||
void (*command)(HDACodecDevice *dev, uint32_t nid, uint32_t data);
|
||||
void (*stream)(HDACodecDevice *dev, uint32_t stnr, bool running, bool output);
|
||||
|
@ -276,7 +276,8 @@ static int lm4549_post_load(void *opaque, int version_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque)
|
||||
void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque,
|
||||
Error **errp)
|
||||
{
|
||||
struct audsettings as;
|
||||
|
||||
|
@ -36,7 +36,8 @@ typedef struct {
|
||||
extern const VMStateDescription vmstate_lm4549_state;
|
||||
|
||||
|
||||
void lm4549_init(lm4549_state *s, lm4549_callback data_req, void *opaque);
|
||||
void lm4549_init(lm4549_state *s, lm4549_callback data_req, void *opaque,
|
||||
Error **errp);
|
||||
uint32_t lm4549_read(lm4549_state *s, hwaddr offset);
|
||||
void lm4549_write(lm4549_state *s, hwaddr offset, uint32_t value);
|
||||
uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right);
|
||||
|
@ -564,7 +564,7 @@ static void pl041_realize(DeviceState *dev, Error **errp)
|
||||
}
|
||||
|
||||
/* Init the codec */
|
||||
lm4549_init(&s->codec, &pl041_request_data, (void *)s);
|
||||
lm4549_init(&s->codec, &pl041_request_data, (void *)s, errp);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_pl041_regfile = {
|
||||
|
@ -1385,6 +1385,11 @@ static void xlnx_dp_reset(DeviceState *dev)
|
||||
xlnx_dp_update_irq(s);
|
||||
}
|
||||
|
||||
static Property xlnx_dp_device_properties[] = {
|
||||
DEFINE_AUDIO_PROPERTIES(XlnxDPState, aud_card),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void xlnx_dp_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
@ -1392,6 +1397,7 @@ static void xlnx_dp_class_init(ObjectClass *oc, void *data)
|
||||
dc->realize = xlnx_dp_realize;
|
||||
dc->vmsd = &vmstate_dp;
|
||||
dc->reset = xlnx_dp_reset;
|
||||
device_class_set_props(dc, xlnx_dp_device_properties);
|
||||
}
|
||||
|
||||
static const TypeInfo xlnx_dp_info = {
|
||||
|
@ -279,7 +279,7 @@ static void smb_ioport_writeb(void *opaque, hwaddr addr, uint64_t val,
|
||||
if (!read && s->smb_index == s->smb_data0) {
|
||||
uint8_t prot = (s->smb_ctl >> 2) & 0x07;
|
||||
uint8_t cmd = s->smb_cmd;
|
||||
uint8_t addr = s->smb_addr >> 1;
|
||||
uint8_t smb_addr = s->smb_addr >> 1;
|
||||
int ret;
|
||||
|
||||
if (prot == PROT_I2C_BLOCK_READ) {
|
||||
@ -287,7 +287,7 @@ static void smb_ioport_writeb(void *opaque, hwaddr addr, uint64_t val,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = smbus_write_block(s->smbus, addr, cmd, s->smb_data,
|
||||
ret = smbus_write_block(s->smbus, smb_addr, cmd, s->smb_data,
|
||||
s->smb_data0, !s->i2c_enable);
|
||||
if (ret < 0) {
|
||||
s->smb_stat |= STS_DEV_ERR;
|
||||
|
60
hw/i386/pc.c
60
hw/i386/pc.c
@ -359,60 +359,6 @@ GlobalProperty pc_compat_2_0[] = {
|
||||
};
|
||||
const size_t pc_compat_2_0_len = G_N_ELEMENTS(pc_compat_2_0);
|
||||
|
||||
GlobalProperty pc_compat_1_7[] = {
|
||||
PC_CPU_MODEL_IDS("1.7.0")
|
||||
{ TYPE_USB_DEVICE, "msos-desc", "no" },
|
||||
{ "PIIX4_PM", ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, "off" },
|
||||
{ "hpet", HPET_INTCAP, "4" },
|
||||
};
|
||||
const size_t pc_compat_1_7_len = G_N_ELEMENTS(pc_compat_1_7);
|
||||
|
||||
GlobalProperty pc_compat_1_6[] = {
|
||||
PC_CPU_MODEL_IDS("1.6.0")
|
||||
{ "e1000", "mitigation", "off" },
|
||||
{ "qemu64-" TYPE_X86_CPU, "model", "2" },
|
||||
{ "qemu32-" TYPE_X86_CPU, "model", "3" },
|
||||
{ "i440FX-pcihost", "short_root_bus", "1" },
|
||||
{ "q35-pcihost", "short_root_bus", "1" },
|
||||
};
|
||||
const size_t pc_compat_1_6_len = G_N_ELEMENTS(pc_compat_1_6);
|
||||
|
||||
GlobalProperty pc_compat_1_5[] = {
|
||||
PC_CPU_MODEL_IDS("1.5.0")
|
||||
{ "Conroe-" TYPE_X86_CPU, "model", "2" },
|
||||
{ "Conroe-" TYPE_X86_CPU, "min-level", "2" },
|
||||
{ "Penryn-" TYPE_X86_CPU, "model", "2" },
|
||||
{ "Penryn-" TYPE_X86_CPU, "min-level", "2" },
|
||||
{ "Nehalem-" TYPE_X86_CPU, "model", "2" },
|
||||
{ "Nehalem-" TYPE_X86_CPU, "min-level", "2" },
|
||||
{ "virtio-net-pci", "any_layout", "off" },
|
||||
{ TYPE_X86_CPU, "pmu", "on" },
|
||||
{ "i440FX-pcihost", "short_root_bus", "0" },
|
||||
{ "q35-pcihost", "short_root_bus", "0" },
|
||||
};
|
||||
const size_t pc_compat_1_5_len = G_N_ELEMENTS(pc_compat_1_5);
|
||||
|
||||
GlobalProperty pc_compat_1_4[] = {
|
||||
PC_CPU_MODEL_IDS("1.4.0")
|
||||
{ "scsi-hd", "discard_granularity", "0" },
|
||||
{ "scsi-cd", "discard_granularity", "0" },
|
||||
{ "ide-hd", "discard_granularity", "0" },
|
||||
{ "ide-cd", "discard_granularity", "0" },
|
||||
{ "virtio-blk-pci", "discard_granularity", "0" },
|
||||
/* DEV_NVECTORS_UNSPECIFIED as a uint32_t string: */
|
||||
{ "virtio-serial-pci", "vectors", "0xFFFFFFFF" },
|
||||
{ "virtio-net-pci", "ctrl_guest_offloads", "off" },
|
||||
{ "e1000", "romfile", "pxe-e1000.rom" },
|
||||
{ "ne2k_pci", "romfile", "pxe-ne2k_pci.rom" },
|
||||
{ "pcnet", "romfile", "pxe-pcnet.rom" },
|
||||
{ "rtl8139", "romfile", "pxe-rtl8139.rom" },
|
||||
{ "virtio-net-pci", "romfile", "pxe-virtio.rom" },
|
||||
{ "486-" TYPE_X86_CPU, "model", "0" },
|
||||
{ "n270" "-" TYPE_X86_CPU, "movbe", "off" },
|
||||
{ "Westmere" "-" TYPE_X86_CPU, "pclmulqdq", "off" },
|
||||
};
|
||||
const size_t pc_compat_1_4_len = G_N_ELEMENTS(pc_compat_1_4);
|
||||
|
||||
GSIState *pc_gsi_create(qemu_irq **irqs, bool pci_enabled)
|
||||
{
|
||||
GSIState *s;
|
||||
@ -1293,9 +1239,9 @@ void pc_basic_device_init(struct PCMachineState *pcms,
|
||||
exit(1);
|
||||
}
|
||||
/*
|
||||
* For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 and
|
||||
* earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, IRQ8 and
|
||||
* IRQ2.
|
||||
* For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-*,
|
||||
* use IRQ16~23, IRQ8 and IRQ2. If the user has already set
|
||||
* the property, use whatever mask they specified.
|
||||
*/
|
||||
uint8_t compat = object_property_get_uint(OBJECT(hpet),
|
||||
HPET_INTCAP, NULL);
|
||||
|
@ -423,27 +423,6 @@ static void pc_compat_2_0_fn(MachineState *machine)
|
||||
pc_compat_2_1_fn(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_7_fn(MachineState *machine)
|
||||
{
|
||||
pc_compat_2_0_fn(machine);
|
||||
x86_cpu_change_kvm_default("x2apic", NULL);
|
||||
}
|
||||
|
||||
static void pc_compat_1_6_fn(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_7_fn(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_5_fn(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_6_fn(machine);
|
||||
}
|
||||
|
||||
static void pc_compat_1_4_fn(MachineState *machine)
|
||||
{
|
||||
pc_compat_1_5_fn(machine);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISAPC
|
||||
static void pc_init_isa(MachineState *machine)
|
||||
{
|
||||
@ -876,58 +855,6 @@ static void pc_i440fx_2_0_machine_options(MachineClass *m)
|
||||
DEFINE_I440FX_MACHINE(v2_0, "pc-i440fx-2.0", pc_compat_2_0_fn,
|
||||
pc_i440fx_2_0_machine_options);
|
||||
|
||||
static void pc_i440fx_1_7_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
|
||||
pc_i440fx_2_0_machine_options(m);
|
||||
m->hw_version = "1.7.0";
|
||||
m->default_machine_opts = NULL;
|
||||
m->option_rom_has_mr = true;
|
||||
m->deprecation_reason = "old and unattended - use a newer version instead";
|
||||
compat_props_add(m->compat_props, pc_compat_1_7, pc_compat_1_7_len);
|
||||
pcmc->smbios_defaults = false;
|
||||
pcmc->gigabyte_align = false;
|
||||
pcmc->legacy_acpi_table_size = 6414;
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v1_7, "pc-i440fx-1.7", pc_compat_1_7_fn,
|
||||
pc_i440fx_1_7_machine_options);
|
||||
|
||||
static void pc_i440fx_1_6_machine_options(MachineClass *m)
|
||||
{
|
||||
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
|
||||
|
||||
pc_i440fx_1_7_machine_options(m);
|
||||
m->hw_version = "1.6.0";
|
||||
m->rom_file_has_mr = false;
|
||||
compat_props_add(m->compat_props, pc_compat_1_6, pc_compat_1_6_len);
|
||||
pcmc->has_acpi_build = false;
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v1_6, "pc-i440fx-1.6", pc_compat_1_6_fn,
|
||||
pc_i440fx_1_6_machine_options);
|
||||
|
||||
static void pc_i440fx_1_5_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_1_6_machine_options(m);
|
||||
m->hw_version = "1.5.0";
|
||||
compat_props_add(m->compat_props, pc_compat_1_5, pc_compat_1_5_len);
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v1_5, "pc-i440fx-1.5", pc_compat_1_5_fn,
|
||||
pc_i440fx_1_5_machine_options);
|
||||
|
||||
static void pc_i440fx_1_4_machine_options(MachineClass *m)
|
||||
{
|
||||
pc_i440fx_1_5_machine_options(m);
|
||||
m->hw_version = "1.4.0";
|
||||
compat_props_add(m->compat_props, pc_compat_1_4, pc_compat_1_4_len);
|
||||
}
|
||||
|
||||
DEFINE_I440FX_MACHINE(v1_4, "pc-i440fx-1.4", pc_compat_1_4_fn,
|
||||
pc_i440fx_1_4_machine_options);
|
||||
|
||||
#ifdef CONFIG_ISAPC
|
||||
static void isapc_machine_options(MachineClass *m)
|
||||
{
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "hw/input/tsc2xxx.h"
|
||||
#include "hw/irq.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#define TSC_DATA_REGISTERS_PAGE 0x0
|
||||
#define TSC_CONTROL_REGISTERS_PAGE 0x1
|
||||
@ -1069,20 +1070,10 @@ static const VMStateDescription vmstate_tsc2301 = {
|
||||
.fields = vmstatefields_tsc210x,
|
||||
};
|
||||
|
||||
uWireSlave *tsc2102_init(qemu_irq pint)
|
||||
static void tsc210x_init(TSC210xState *s,
|
||||
const char *name,
|
||||
const VMStateDescription *vmsd)
|
||||
{
|
||||
TSC210xState *s;
|
||||
|
||||
s = g_new0(TSC210xState, 1);
|
||||
s->x = 160;
|
||||
s->y = 160;
|
||||
s->pressure = 0;
|
||||
s->precision = s->nextprecision = 0;
|
||||
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
|
||||
s->pint = pint;
|
||||
s->model = 0x2102;
|
||||
s->name = "tsc2102";
|
||||
|
||||
s->tr[0] = 0;
|
||||
s->tr[1] = 1;
|
||||
s->tr[2] = 1;
|
||||
@ -1104,13 +1095,29 @@ uWireSlave *tsc2102_init(qemu_irq pint)
|
||||
|
||||
tsc210x_reset(s);
|
||||
|
||||
qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1,
|
||||
"QEMU TSC2102-driven Touchscreen");
|
||||
qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1, name);
|
||||
|
||||
AUD_register_card(s->name, &s->card);
|
||||
|
||||
qemu_register_reset((void *) tsc210x_reset, s);
|
||||
vmstate_register(NULL, 0, &vmstate_tsc2102, s);
|
||||
vmstate_register(NULL, 0, vmsd, s);
|
||||
}
|
||||
|
||||
uWireSlave *tsc2102_init(qemu_irq pint)
|
||||
{
|
||||
TSC210xState *s;
|
||||
|
||||
s = g_new0(TSC210xState, 1);
|
||||
s->x = 160;
|
||||
s->y = 160;
|
||||
s->pressure = 0;
|
||||
s->precision = s->nextprecision = 0;
|
||||
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc210x_timer_tick, s);
|
||||
s->pint = pint;
|
||||
s->model = 0x2102;
|
||||
s->name = "tsc2102";
|
||||
|
||||
tsc210x_init(s, "QEMU TSC2102-driven Touchscreen", &vmstate_tsc2102);
|
||||
|
||||
return &s->chip;
|
||||
}
|
||||
@ -1131,34 +1138,7 @@ uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav)
|
||||
s->model = 0x2301;
|
||||
s->name = "tsc2301";
|
||||
|
||||
s->tr[0] = 0;
|
||||
s->tr[1] = 1;
|
||||
s->tr[2] = 1;
|
||||
s->tr[3] = 0;
|
||||
s->tr[4] = 1;
|
||||
s->tr[5] = 0;
|
||||
s->tr[6] = 1;
|
||||
s->tr[7] = 0;
|
||||
|
||||
s->chip.opaque = s;
|
||||
s->chip.send = (void *) tsc210x_write;
|
||||
s->chip.receive = (void *) tsc210x_read;
|
||||
|
||||
s->codec.opaque = s;
|
||||
s->codec.tx_swallow = (void *) tsc210x_i2s_swallow;
|
||||
s->codec.set_rate = (void *) tsc210x_i2s_set_rate;
|
||||
s->codec.in.fifo = s->in_fifo;
|
||||
s->codec.out.fifo = s->out_fifo;
|
||||
|
||||
tsc210x_reset(s);
|
||||
|
||||
qemu_add_mouse_event_handler(tsc210x_touchscreen_event, s, 1,
|
||||
"QEMU TSC2301-driven Touchscreen");
|
||||
|
||||
AUD_register_card(s->name, &s->card);
|
||||
|
||||
qemu_register_reset((void *) tsc210x_reset, s);
|
||||
vmstate_register(NULL, 0, &vmstate_tsc2301, s);
|
||||
tsc210x_init(s, "QEMU TSC2301-driven Touchscreen", &vmstate_tsc2301);
|
||||
|
||||
return &s->chip;
|
||||
}
|
||||
|
@ -114,6 +114,40 @@ static const MemoryRegionOps dma_dummy_ops = {
|
||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
||||
};
|
||||
|
||||
static void mips_jazz_init_net(NICInfo *nd, IOMMUMemoryRegion *rc4030_dma_mr,
|
||||
DeviceState *rc4030, MemoryRegion *dp8393x_prom)
|
||||
{
|
||||
DeviceState *dev;
|
||||
SysBusDevice *sysbus;
|
||||
int checksum, i;
|
||||
uint8_t *prom;
|
||||
|
||||
qemu_check_nic_model(nd, "dp83932");
|
||||
|
||||
dev = qdev_new("dp8393x");
|
||||
qdev_set_nic_properties(dev, nd);
|
||||
qdev_prop_set_uint8(dev, "it_shift", 2);
|
||||
qdev_prop_set_bit(dev, "big_endian", TARGET_BIG_ENDIAN);
|
||||
object_property_set_link(OBJECT(dev), "dma_mr",
|
||||
OBJECT(rc4030_dma_mr), &error_abort);
|
||||
sysbus = SYS_BUS_DEVICE(dev);
|
||||
sysbus_realize_and_unref(sysbus, &error_fatal);
|
||||
sysbus_mmio_map(sysbus, 0, 0x80001000);
|
||||
sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 4));
|
||||
|
||||
/* Add MAC address with valid checksum to PROM */
|
||||
prom = memory_region_get_ram_ptr(dp8393x_prom);
|
||||
checksum = 0;
|
||||
for (i = 0; i < 6; i++) {
|
||||
prom[i] = nd->macaddr.a[i];
|
||||
checksum += prom[i];
|
||||
if (checksum > 0xff) {
|
||||
checksum = (checksum + 1) & 0xff;
|
||||
}
|
||||
}
|
||||
prom[7] = 0xff - checksum;
|
||||
}
|
||||
|
||||
#define MAGNUM_BIOS_SIZE_MAX 0x7e000
|
||||
#define MAGNUM_BIOS_SIZE \
|
||||
(BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
|
||||
@ -138,7 +172,6 @@ static void mips_jazz_init(MachineState *machine,
|
||||
MemoryRegion *rtc = g_new(MemoryRegion, 1);
|
||||
MemoryRegion *dma_dummy = g_new(MemoryRegion, 1);
|
||||
MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1);
|
||||
NICInfo *nd;
|
||||
DeviceState *dev, *rc4030;
|
||||
MMIOKBDState *i8042;
|
||||
SysBusDevice *sysbus;
|
||||
@ -281,47 +314,11 @@ static void mips_jazz_init(MachineState *machine,
|
||||
}
|
||||
|
||||
/* Network controller */
|
||||
for (n = 0; n < nb_nics; n++) {
|
||||
nd = &nd_table[n];
|
||||
if (!nd->model) {
|
||||
nd->model = g_strdup("dp83932");
|
||||
}
|
||||
if (strcmp(nd->model, "dp83932") == 0) {
|
||||
int checksum, i;
|
||||
uint8_t *prom;
|
||||
|
||||
qemu_check_nic_model(nd, "dp83932");
|
||||
|
||||
dev = qdev_new("dp8393x");
|
||||
qdev_set_nic_properties(dev, nd);
|
||||
qdev_prop_set_uint8(dev, "it_shift", 2);
|
||||
qdev_prop_set_bit(dev, "big_endian", TARGET_BIG_ENDIAN);
|
||||
object_property_set_link(OBJECT(dev), "dma_mr",
|
||||
OBJECT(rc4030_dma_mr), &error_abort);
|
||||
sysbus = SYS_BUS_DEVICE(dev);
|
||||
sysbus_realize_and_unref(sysbus, &error_fatal);
|
||||
sysbus_mmio_map(sysbus, 0, 0x80001000);
|
||||
sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(rc4030, 4));
|
||||
|
||||
/* Add MAC address with valid checksum to PROM */
|
||||
prom = memory_region_get_ram_ptr(dp8393x_prom);
|
||||
checksum = 0;
|
||||
for (i = 0; i < 6; i++) {
|
||||
prom[i] = nd->macaddr.a[i];
|
||||
checksum += prom[i];
|
||||
if (checksum > 0xff) {
|
||||
checksum = (checksum + 1) & 0xff;
|
||||
}
|
||||
}
|
||||
prom[7] = 0xff - checksum;
|
||||
break;
|
||||
} else if (is_help_option(nd->model)) {
|
||||
error_report("Supported NICs: dp83932");
|
||||
exit(1);
|
||||
} else {
|
||||
error_report("Unsupported NIC: %s", nd->model);
|
||||
exit(1);
|
||||
}
|
||||
if (nb_nics == 1) {
|
||||
mips_jazz_init_net(&nd_table[0], rc4030_dma_mr, rc4030, dp8393x_prom);
|
||||
} else if (nb_nics > 1) {
|
||||
error_report("This machine only supports one NIC");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* SCSI adapter */
|
||||
|
@ -192,7 +192,7 @@ static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length,
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
|
||||
static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr req_addr)
|
||||
{
|
||||
PCIDevice *pci = (PCIDevice *) s;
|
||||
hwaddr next_chain_addr;
|
||||
@ -201,8 +201,8 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr)
|
||||
uint32_t chain_offset;
|
||||
|
||||
chain_offset = req->scsi_io.ChainOffset;
|
||||
next_chain_addr = addr + chain_offset * sizeof(uint32_t);
|
||||
sgaddr = addr + sizeof(MPIMsgSCSIIORequest);
|
||||
next_chain_addr = req_addr + chain_offset * sizeof(uint32_t);
|
||||
sgaddr = req_addr + sizeof(MPIMsgSCSIIORequest);
|
||||
pci_dma_sglist_init(&req->qsg, pci, 4);
|
||||
left = req->scsi_io.DataLength;
|
||||
|
||||
|
@ -1628,9 +1628,10 @@ static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
|
||||
* Since the existing code only checks/updates bits 8-15 of the block
|
||||
* size, restrict ourselves to the same requirement for now to ensure
|
||||
* that a block size set by a block descriptor and then read back by
|
||||
* a subsequent SCSI command will be the same
|
||||
* a subsequent SCSI command will be the same. Also disallow a block
|
||||
* size of 256 since we cannot handle anything below BDRV_SECTOR_SIZE.
|
||||
*/
|
||||
if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
|
||||
if (bs && !(bs & ~0xfe00) && bs != s->qdev.blocksize) {
|
||||
s->qdev.blocksize = bs;
|
||||
trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
|
||||
}
|
||||
|
@ -60,20 +60,22 @@ typedef enum NBDMode {
|
||||
NBD_MODE_EXPORT_NAME, /* newstyle but only OPT_EXPORT_NAME safe */
|
||||
NBD_MODE_SIMPLE, /* newstyle but only simple replies */
|
||||
NBD_MODE_STRUCTURED, /* newstyle, structured replies enabled */
|
||||
/* TODO add NBD_MODE_EXTENDED */
|
||||
NBD_MODE_EXTENDED, /* newstyle, extended headers enabled */
|
||||
} NBDMode;
|
||||
|
||||
/* Transmission phase structs
|
||||
*
|
||||
* Note: these are _NOT_ the same as the network representation of an NBD
|
||||
* request and reply!
|
||||
/* Transmission phase structs */
|
||||
|
||||
/*
|
||||
* Note: NBDRequest is _NOT_ the same as the network representation of an NBD
|
||||
* request!
|
||||
*/
|
||||
typedef struct NBDRequest {
|
||||
uint64_t cookie;
|
||||
uint64_t from;
|
||||
uint32_t len;
|
||||
uint64_t from; /* Offset touched by the command */
|
||||
uint64_t len; /* Effect length; 32 bit limit without extended headers */
|
||||
uint16_t flags; /* NBD_CMD_FLAG_* */
|
||||
uint16_t type; /* NBD_CMD_* */
|
||||
uint16_t type; /* NBD_CMD_* */
|
||||
NBDMode mode; /* Determines which network representation to use */
|
||||
} NBDRequest;
|
||||
|
||||
typedef struct NBDSimpleReply {
|
||||
@ -91,20 +93,36 @@ typedef struct NBDStructuredReplyChunk {
|
||||
uint32_t length; /* length of payload */
|
||||
} QEMU_PACKED NBDStructuredReplyChunk;
|
||||
|
||||
typedef struct NBDExtendedReplyChunk {
|
||||
uint32_t magic; /* NBD_EXTENDED_REPLY_MAGIC */
|
||||
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
|
||||
uint16_t type; /* NBD_REPLY_TYPE_* */
|
||||
uint64_t cookie; /* request handle */
|
||||
uint64_t offset; /* request offset */
|
||||
uint64_t length; /* length of payload */
|
||||
} QEMU_PACKED NBDExtendedReplyChunk;
|
||||
|
||||
typedef union NBDReply {
|
||||
NBDSimpleReply simple;
|
||||
NBDStructuredReplyChunk structured;
|
||||
NBDExtendedReplyChunk extended;
|
||||
struct {
|
||||
/*
|
||||
* @magic and @cookie fields have the same offset and size both in
|
||||
* simple reply and structured reply chunk, so let them be accessible
|
||||
* without ".simple." or ".structured." specification
|
||||
* @magic and @cookie fields have the same offset and size in all
|
||||
* forms of replies, so let them be accessible without ".simple.",
|
||||
* ".structured.", or ".extended." specifications.
|
||||
*/
|
||||
uint32_t magic;
|
||||
uint32_t _skip;
|
||||
uint64_t cookie;
|
||||
} QEMU_PACKED;
|
||||
};
|
||||
} NBDReply;
|
||||
QEMU_BUILD_BUG_ON(offsetof(NBDReply, simple.cookie) !=
|
||||
offsetof(NBDReply, cookie));
|
||||
QEMU_BUILD_BUG_ON(offsetof(NBDReply, structured.cookie) !=
|
||||
offsetof(NBDReply, cookie));
|
||||
QEMU_BUILD_BUG_ON(offsetof(NBDReply, extended.cookie) !=
|
||||
offsetof(NBDReply, cookie));
|
||||
|
||||
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
|
||||
typedef struct NBDStructuredReadData {
|
||||
@ -131,14 +149,34 @@ typedef struct NBDStructuredError {
|
||||
typedef struct NBDStructuredMeta {
|
||||
/* header's length >= 12 (at least one extent) */
|
||||
uint32_t context_id;
|
||||
/* extents follows */
|
||||
/* NBDExtent32 extents[] follows, array length implied by header */
|
||||
} QEMU_PACKED NBDStructuredMeta;
|
||||
|
||||
/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
|
||||
typedef struct NBDExtent {
|
||||
/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS */
|
||||
typedef struct NBDExtent32 {
|
||||
uint32_t length;
|
||||
uint32_t flags; /* NBD_STATE_* */
|
||||
} QEMU_PACKED NBDExtent;
|
||||
} QEMU_PACKED NBDExtent32;
|
||||
|
||||
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
|
||||
typedef struct NBDExtendedMeta {
|
||||
/* header's length >= 24 (at least one extent) */
|
||||
uint32_t context_id;
|
||||
uint32_t count; /* header length must be count * 16 + 8 */
|
||||
/* NBDExtent64 extents[count] follows */
|
||||
} QEMU_PACKED NBDExtendedMeta;
|
||||
|
||||
/* Extent array element for NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
|
||||
typedef struct NBDExtent64 {
|
||||
uint64_t length;
|
||||
uint64_t flags; /* NBD_STATE_* */
|
||||
} QEMU_PACKED NBDExtent64;
|
||||
|
||||
/* Client payload for limiting NBD_CMD_BLOCK_STATUS reply */
|
||||
typedef struct NBDBlockStatusPayload {
|
||||
uint64_t effect_length;
|
||||
/* uint32_t ids[] follows, array length implied by header */
|
||||
} QEMU_PACKED NBDBlockStatusPayload;
|
||||
|
||||
/* Transmission (export) flags: sent from server to client during handshake,
|
||||
but describe what will happen during transmission */
|
||||
@ -156,20 +194,22 @@ enum {
|
||||
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
|
||||
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
|
||||
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
|
||||
NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT = 12, /* PAYLOAD flag for BLOCK_STATUS */
|
||||
};
|
||||
|
||||
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
|
||||
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
|
||||
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
|
||||
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
|
||||
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
|
||||
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
|
||||
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
|
||||
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
|
||||
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
|
||||
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
|
||||
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
|
||||
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
|
||||
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
|
||||
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
|
||||
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
|
||||
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
|
||||
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
|
||||
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
|
||||
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
|
||||
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
|
||||
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
|
||||
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
|
||||
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
|
||||
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
|
||||
#define NBD_FLAG_BLOCK_STAT_PAYLOAD (1 << NBD_FLAG_BLOCK_STAT_PAYLOAD_BIT)
|
||||
|
||||
/* New-style handshake (global) flags, sent from server to client, and
|
||||
control what will happen during handshake phase. */
|
||||
@ -192,6 +232,7 @@ enum {
|
||||
#define NBD_OPT_STRUCTURED_REPLY (8)
|
||||
#define NBD_OPT_LIST_META_CONTEXT (9)
|
||||
#define NBD_OPT_SET_META_CONTEXT (10)
|
||||
#define NBD_OPT_EXTENDED_HEADERS (11)
|
||||
|
||||
/* Option reply types. */
|
||||
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
|
||||
@ -209,6 +250,8 @@ enum {
|
||||
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
|
||||
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
|
||||
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
|
||||
#define NBD_REP_ERR_TOO_BIG NBD_REP_ERR(9) /* Payload size overflow */
|
||||
#define NBD_REP_ERR_EXT_HEADER_REQD NBD_REP_ERR(10) /* Need extended headers */
|
||||
|
||||
/* Info types, used during NBD_REP_INFO */
|
||||
#define NBD_INFO_EXPORT 0
|
||||
@ -217,12 +260,14 @@ enum {
|
||||
#define NBD_INFO_BLOCK_SIZE 3
|
||||
|
||||
/* Request flags, sent from client to server during transmission phase */
|
||||
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
|
||||
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
|
||||
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
|
||||
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
|
||||
* reply chunk */
|
||||
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
|
||||
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
|
||||
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
|
||||
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
|
||||
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) \
|
||||
/* only one extent in BLOCK_STATUS reply chunk */
|
||||
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
|
||||
#define NBD_CMD_FLAG_PAYLOAD_LEN (1 << 5) \
|
||||
/* length describes payload, not effect; only with ext header */
|
||||
|
||||
/* Supported request types */
|
||||
enum {
|
||||
@ -248,22 +293,31 @@ enum {
|
||||
*/
|
||||
#define NBD_MAX_STRING_SIZE 4096
|
||||
|
||||
/* Two types of reply structures */
|
||||
/* Two types of request structures, a given client will only use 1 */
|
||||
#define NBD_REQUEST_MAGIC 0x25609513
|
||||
#define NBD_EXTENDED_REQUEST_MAGIC 0x21e41c71
|
||||
|
||||
/*
|
||||
* Three types of reply structures, but what a client expects depends
|
||||
* on NBD_OPT_STRUCTURED_REPLY and NBD_OPT_EXTENDED_HEADERS.
|
||||
*/
|
||||
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
|
||||
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
|
||||
#define NBD_EXTENDED_REPLY_MAGIC 0x6e8a278c
|
||||
|
||||
/* Structured reply flags */
|
||||
/* Chunk reply flags (for structured and extended replies) */
|
||||
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
|
||||
|
||||
/* Structured reply types */
|
||||
/* Chunk reply types */
|
||||
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
|
||||
|
||||
#define NBD_REPLY_TYPE_NONE 0
|
||||
#define NBD_REPLY_TYPE_OFFSET_DATA 1
|
||||
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
|
||||
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
|
||||
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
|
||||
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
|
||||
#define NBD_REPLY_TYPE_NONE 0
|
||||
#define NBD_REPLY_TYPE_OFFSET_DATA 1
|
||||
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
|
||||
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
|
||||
#define NBD_REPLY_TYPE_BLOCK_STATUS_EXT 6
|
||||
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
|
||||
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
|
||||
|
||||
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
|
||||
#define NBD_STATE_HOLE (1 << 0)
|
||||
@ -305,7 +359,7 @@ typedef struct NBDExportInfo {
|
||||
|
||||
/* In-out fields, set by client before nbd_receive_negotiate() and
|
||||
* updated by server results during nbd_receive_negotiate() */
|
||||
bool structured_reply;
|
||||
NBDMode mode; /* input maximum mode tolerated; output actual mode chosen */
|
||||
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
|
||||
|
||||
/* Set by server results during nbd_receive_negotiate() and
|
||||
|
@ -197,4 +197,10 @@
|
||||
#define BUILTIN_SUBCLL_BROKEN
|
||||
#endif
|
||||
|
||||
#if __has_attribute(annotate)
|
||||
#define QEMU_ANNOTATE(x) __attribute__((annotate(x)))
|
||||
#else
|
||||
#define QEMU_ANNOTATE(x)
|
||||
#endif
|
||||
|
||||
#endif /* COMPILER_H */
|
||||
|
@ -185,7 +185,7 @@ extern "C" {
|
||||
* }
|
||||
*/
|
||||
#ifdef __clang__
|
||||
#define coroutine_fn __attribute__((__annotate__("coroutine_fn")))
|
||||
#define coroutine_fn QEMU_ANNOTATE("coroutine_fn")
|
||||
#else
|
||||
#define coroutine_fn
|
||||
#endif
|
||||
@ -195,7 +195,7 @@ extern "C" {
|
||||
* but can handle running in non-coroutine context too.
|
||||
*/
|
||||
#ifdef __clang__
|
||||
#define coroutine_mixed_fn __attribute__((__annotate__("coroutine_mixed_fn")))
|
||||
#define coroutine_mixed_fn QEMU_ANNOTATE("coroutine_mixed_fn")
|
||||
#else
|
||||
#define coroutine_mixed_fn
|
||||
#endif
|
||||
@ -224,7 +224,7 @@ extern "C" {
|
||||
* }
|
||||
*/
|
||||
#ifdef __clang__
|
||||
#define no_coroutine_fn __attribute__((__annotate__("no_coroutine_fn")))
|
||||
#define no_coroutine_fn QEMU_ANNOTATE("no_coroutine_fn")
|
||||
#else
|
||||
#define no_coroutine_fn
|
||||
#endif
|
||||
|
@ -979,6 +979,28 @@ static inline int64_t cpu_get_host_ticks(void)
|
||||
return cur - ofs;
|
||||
}
|
||||
|
||||
#elif defined(__riscv) && __riscv_xlen == 32
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
uint32_t lo, hi, tmph;
|
||||
do {
|
||||
asm volatile("RDTIMEH %0\n\t"
|
||||
"RDTIME %1\n\t"
|
||||
"RDTIMEH %2"
|
||||
: "=r"(hi), "=r"(lo), "=r"(tmph));
|
||||
} while (unlikely(tmph != hi));
|
||||
return lo | (uint64_t)hi << 32;
|
||||
}
|
||||
|
||||
#elif defined(__riscv) && __riscv_xlen > 32
|
||||
static inline int64_t cpu_get_host_ticks(void)
|
||||
{
|
||||
int64_t val;
|
||||
|
||||
asm volatile("RDTIME %0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#else
|
||||
/* The host CPU doesn't have an easily accessible cycle counter.
|
||||
Just return a monotonically increasing value. This will be
|
||||
|
@ -402,6 +402,12 @@ enum
|
||||
ARM_HWCAP_ARM_VFPD32 = 1 << 19,
|
||||
ARM_HWCAP_ARM_LPAE = 1 << 20,
|
||||
ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
|
||||
ARM_HWCAP_ARM_FPHP = 1 << 22,
|
||||
ARM_HWCAP_ARM_ASIMDHP = 1 << 23,
|
||||
ARM_HWCAP_ARM_ASIMDDP = 1 << 24,
|
||||
ARM_HWCAP_ARM_ASIMDFHM = 1 << 25,
|
||||
ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26,
|
||||
ARM_HWCAP_ARM_I8MM = 1 << 27,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -410,6 +416,8 @@ enum {
|
||||
ARM_HWCAP2_ARM_SHA1 = 1 << 2,
|
||||
ARM_HWCAP2_ARM_SHA2 = 1 << 3,
|
||||
ARM_HWCAP2_ARM_CRC32 = 1 << 4,
|
||||
ARM_HWCAP2_ARM_SB = 1 << 5,
|
||||
ARM_HWCAP2_ARM_SSBS = 1 << 6,
|
||||
};
|
||||
|
||||
/* The commpage only exists for 32 bit kernels */
|
||||
@ -498,6 +506,16 @@ uint32_t get_elf_hwcap(void)
|
||||
}
|
||||
}
|
||||
GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
|
||||
/*
|
||||
* MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same
|
||||
* isar_feature function for both. The kernel reports them as two hwcaps.
|
||||
*/
|
||||
GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP);
|
||||
GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP);
|
||||
GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP);
|
||||
GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM);
|
||||
GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16);
|
||||
GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM);
|
||||
|
||||
return hwcaps;
|
||||
}
|
||||
@ -512,6 +530,8 @@ uint32_t get_elf_hwcap2(void)
|
||||
GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
|
||||
GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
|
||||
GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
|
||||
GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB);
|
||||
GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS);
|
||||
return hwcaps;
|
||||
}
|
||||
|
||||
@ -540,6 +560,12 @@ const char *elf_hwcap_str(uint32_t bit)
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16",
|
||||
[__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm",
|
||||
};
|
||||
|
||||
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
|
||||
@ -553,6 +579,8 @@ const char *elf_hwcap2_str(uint32_t bit)
|
||||
[__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1",
|
||||
[__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2",
|
||||
[__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32",
|
||||
[__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb",
|
||||
[__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs",
|
||||
};
|
||||
|
||||
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
|
||||
@ -696,6 +724,20 @@ enum {
|
||||
ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
|
||||
ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
|
||||
ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
|
||||
ARM_HWCAP2_A64_WFXT = 1ULL << 31,
|
||||
ARM_HWCAP2_A64_EBF16 = 1ULL << 32,
|
||||
ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33,
|
||||
ARM_HWCAP2_A64_CSSC = 1ULL << 34,
|
||||
ARM_HWCAP2_A64_RPRFM = 1ULL << 35,
|
||||
ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36,
|
||||
ARM_HWCAP2_A64_SME2 = 1ULL << 37,
|
||||
ARM_HWCAP2_A64_SME2P1 = 1ULL << 38,
|
||||
ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39,
|
||||
ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40,
|
||||
ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41,
|
||||
ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
|
||||
ARM_HWCAP2_A64_MOPS = 1ULL << 43,
|
||||
ARM_HWCAP2_A64_HBC = 1ULL << 44,
|
||||
};
|
||||
|
||||
#define ELF_HWCAP get_elf_hwcap()
|
||||
@ -773,6 +815,8 @@ uint32_t get_elf_hwcap2(void)
|
||||
GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
|
||||
GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
|
||||
GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
|
||||
GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC);
|
||||
GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS);
|
||||
|
||||
return hwcaps;
|
||||
}
|
||||
@ -844,13 +888,27 @@ const char *elf_hwcap2_str(uint32_t bit)
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "sme_i16i64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "sme_f64f64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "sme_i8i32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "sme_f16f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "sme_b16f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "sme_f32f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "sme_fa64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64",
|
||||
[__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
|
||||
[__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
|
||||
};
|
||||
|
||||
return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
|
||||
|
@ -1763,8 +1763,9 @@ if gnutls.found()
|
||||
method: 'pkg-config')
|
||||
endif
|
||||
keyutils = not_found
|
||||
if get_option('keyring').enabled()
|
||||
keyutils = dependency('libkeyutils', required: false, method: 'pkg-config')
|
||||
if not get_option('libkeyutils').auto() or have_block
|
||||
keyutils = dependency('libkeyutils', required: get_option('libkeyutils'),
|
||||
method: 'pkg-config')
|
||||
endif
|
||||
|
||||
has_gettid = cc.has_function('gettid')
|
||||
@ -4266,6 +4267,7 @@ endif
|
||||
summary_info += {'AF_ALG support': have_afalg}
|
||||
summary_info += {'rng-none': get_option('rng_none')}
|
||||
summary_info += {'Linux keyring': have_keyring}
|
||||
summary_info += {'Linux keyutils': keyutils}
|
||||
summary(summary_info, bool_yn: true, section: 'Crypto')
|
||||
|
||||
# UI
|
||||
|
@ -121,6 +121,8 @@ option('avx512bw', type: 'feature', value: 'auto',
|
||||
description: 'AVX512BW optimizations')
|
||||
option('keyring', type: 'feature', value: 'auto',
|
||||
description: 'Linux keyring support')
|
||||
option('libkeyutils', type: 'feature', value: 'auto',
|
||||
description: 'Linux keyutils support')
|
||||
|
||||
option('af_xdp', type : 'feature', value : 'auto',
|
||||
description: 'AF_XDP network backend support')
|
||||
|
@ -98,6 +98,7 @@ static int migration_maybe_pause(MigrationState *s,
|
||||
int *current_active_state,
|
||||
int new_state);
|
||||
static void migrate_fd_cancel(MigrationState *s);
|
||||
static int await_return_path_close_on_source(MigrationState *s);
|
||||
|
||||
static bool migration_needs_multiple_sockets(void)
|
||||
{
|
||||
@ -153,6 +154,7 @@ void migration_object_init(void)
|
||||
qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0);
|
||||
|
||||
qemu_mutex_init(¤t_incoming->page_request_mutex);
|
||||
qemu_cond_init(¤t_incoming->page_request_cond);
|
||||
current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
|
||||
|
||||
migration_object_check(current_migration, &error_fatal);
|
||||
@ -367,7 +369,7 @@ int migrate_send_rp_req_pages(MigrationIncomingState *mis,
|
||||
* things like g_tree_lookup() will return TRUE (1) when found.
|
||||
*/
|
||||
g_tree_insert(mis->page_requested, aligned, (gpointer)1);
|
||||
mis->page_requested_count++;
|
||||
qatomic_inc(&mis->page_requested_count);
|
||||
trace_postcopy_page_req_add(aligned, mis->page_requested_count);
|
||||
}
|
||||
}
|
||||
@ -1177,11 +1179,11 @@ static void migrate_fd_cleanup(MigrationState *s)
|
||||
qemu_fclose(tmp);
|
||||
}
|
||||
|
||||
if (s->postcopy_qemufile_src) {
|
||||
migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
|
||||
qemu_fclose(s->postcopy_qemufile_src);
|
||||
s->postcopy_qemufile_src = NULL;
|
||||
}
|
||||
/*
|
||||
* We already cleaned up to_dst_file, so errors from the return
|
||||
* path might be due to that, ignore them.
|
||||
*/
|
||||
await_return_path_close_on_source(s);
|
||||
|
||||
assert(!migration_is_active(s));
|
||||
|
||||
@ -1245,7 +1247,7 @@ static void migrate_fd_error(MigrationState *s, const Error *error)
|
||||
static void migrate_fd_cancel(MigrationState *s)
|
||||
{
|
||||
int old_state ;
|
||||
QEMUFile *f = migrate_get_current()->to_dst_file;
|
||||
|
||||
trace_migrate_fd_cancel();
|
||||
|
||||
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
|
||||
@ -1271,11 +1273,13 @@ static void migrate_fd_cancel(MigrationState *s)
|
||||
* If we're unlucky the migration code might be stuck somewhere in a
|
||||
* send/write while the network has failed and is waiting to timeout;
|
||||
* if we've got shutdown(2) available then we can force it to quit.
|
||||
* The outgoing qemu file gets closed in migrate_fd_cleanup that is
|
||||
* called in a bh, so there is no race against this cancel.
|
||||
*/
|
||||
if (s->state == MIGRATION_STATUS_CANCELLING && f) {
|
||||
qemu_file_shutdown(f);
|
||||
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
||||
WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
|
||||
if (s->to_dst_file) {
|
||||
qemu_file_shutdown(s->to_dst_file);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
|
||||
Error *local_err = NULL;
|
||||
@ -1535,12 +1539,14 @@ void qmp_migrate_pause(Error **errp)
|
||||
{
|
||||
MigrationState *ms = migrate_get_current();
|
||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
||||
/* Source side, during postcopy */
|
||||
qemu_mutex_lock(&ms->qemu_file_lock);
|
||||
ret = qemu_file_shutdown(ms->to_dst_file);
|
||||
if (ms->to_dst_file) {
|
||||
ret = qemu_file_shutdown(ms->to_dst_file);
|
||||
}
|
||||
qemu_mutex_unlock(&ms->qemu_file_lock);
|
||||
if (ret) {
|
||||
error_setg(errp, "Failed to pause source migration");
|
||||
@ -1788,18 +1794,6 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true to retry, false to quit */
|
||||
static bool postcopy_pause_return_path_thread(MigrationState *s)
|
||||
{
|
||||
trace_postcopy_pause_return_path();
|
||||
|
||||
qemu_sem_wait(&s->postcopy_pause_rp_sem);
|
||||
|
||||
trace_postcopy_pause_return_path_continued();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
|
||||
{
|
||||
RAMBlock *block = qemu_ram_block_by_name(block_name);
|
||||
@ -1883,7 +1877,6 @@ static void *source_return_path_thread(void *opaque)
|
||||
trace_source_return_path_thread_entry();
|
||||
rcu_register_thread();
|
||||
|
||||
retry:
|
||||
while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
|
||||
migration_is_setup_or_active(ms->state)) {
|
||||
trace_source_return_path_thread_loop_top();
|
||||
@ -2005,38 +1998,17 @@ retry:
|
||||
}
|
||||
|
||||
out:
|
||||
res = qemu_file_get_error(rp);
|
||||
if (res) {
|
||||
if (res && migration_in_postcopy()) {
|
||||
/*
|
||||
* Maybe there is something we can do: it looks like a
|
||||
* network down issue, and we pause for a recovery.
|
||||
*/
|
||||
migration_release_dst_files(ms);
|
||||
rp = NULL;
|
||||
if (postcopy_pause_return_path_thread(ms)) {
|
||||
/*
|
||||
* Reload rp, reset the rest. Referencing it is safe since
|
||||
* it's reset only by us above, or when migration completes
|
||||
*/
|
||||
rp = ms->rp_state.from_dst_file;
|
||||
ms->rp_state.error = false;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
if (qemu_file_get_error(rp)) {
|
||||
trace_source_return_path_thread_bad_end();
|
||||
mark_source_rp_bad(ms);
|
||||
}
|
||||
|
||||
trace_source_return_path_thread_end();
|
||||
migration_release_dst_files(ms);
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int open_return_path_on_source(MigrationState *ms,
|
||||
bool create_thread)
|
||||
static int open_return_path_on_source(MigrationState *ms)
|
||||
{
|
||||
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
|
||||
if (!ms->rp_state.from_dst_file) {
|
||||
@ -2045,11 +2017,6 @@ static int open_return_path_on_source(MigrationState *ms,
|
||||
|
||||
trace_open_return_path_on_source();
|
||||
|
||||
if (!create_thread) {
|
||||
/* We're done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
qemu_thread_create(&ms->rp_state.rp_thread, "return path",
|
||||
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
|
||||
ms->rp_state.rp_thread_created = true;
|
||||
@ -2062,24 +2029,39 @@ static int open_return_path_on_source(MigrationState *ms,
|
||||
/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
|
||||
static int await_return_path_close_on_source(MigrationState *ms)
|
||||
{
|
||||
/*
|
||||
* If this is a normal exit then the destination will send a SHUT and the
|
||||
* rp_thread will exit, however if there's an error we need to cause
|
||||
* it to exit.
|
||||
*/
|
||||
if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
|
||||
/*
|
||||
* shutdown(2), if we have it, will cause it to unblock if it's stuck
|
||||
* waiting for the destination.
|
||||
*/
|
||||
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
||||
mark_source_rp_bad(ms);
|
||||
int ret;
|
||||
|
||||
if (!ms->rp_state.rp_thread_created) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace_migration_return_path_end_before();
|
||||
|
||||
/*
|
||||
* If this is a normal exit then the destination will send a SHUT
|
||||
* and the rp_thread will exit, however if there's an error we
|
||||
* need to cause it to exit. shutdown(2), if we have it, will
|
||||
* cause it to unblock if it's stuck waiting for the destination.
|
||||
*/
|
||||
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
|
||||
if (ms->to_dst_file && ms->rp_state.from_dst_file &&
|
||||
qemu_file_get_error(ms->to_dst_file)) {
|
||||
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
trace_await_return_path_close_on_source_joining();
|
||||
qemu_thread_join(&ms->rp_state.rp_thread);
|
||||
ms->rp_state.rp_thread_created = false;
|
||||
trace_await_return_path_close_on_source_close();
|
||||
return ms->rp_state.error;
|
||||
|
||||
ret = ms->rp_state.error;
|
||||
ms->rp_state.error = false;
|
||||
|
||||
migration_release_dst_files(ms);
|
||||
|
||||
trace_migration_return_path_end_after(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -2375,20 +2357,8 @@ static void migration_completion(MigrationState *s)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* If rp was opened we must clean up the thread before
|
||||
* cleaning everything else up (since if there are no failures
|
||||
* it will wait for the destination to send it's status in
|
||||
* a SHUT command).
|
||||
*/
|
||||
if (s->rp_state.rp_thread_created) {
|
||||
int rp_error;
|
||||
trace_migration_return_path_end_before();
|
||||
rp_error = await_return_path_close_on_source(s);
|
||||
trace_migration_return_path_end_after(rp_error);
|
||||
if (rp_error) {
|
||||
goto fail;
|
||||
}
|
||||
if (await_return_path_close_on_source(s)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (qemu_file_get_error(s->to_dst_file)) {
|
||||
@ -2565,6 +2535,13 @@ static MigThrError postcopy_pause(MigrationState *s)
|
||||
qemu_file_shutdown(file);
|
||||
qemu_fclose(file);
|
||||
|
||||
/*
|
||||
* We're already pausing, so ignore any errors on the return
|
||||
* path and just wait for the thread to finish. It will be
|
||||
* re-created when we resume.
|
||||
*/
|
||||
await_return_path_close_on_source(s);
|
||||
|
||||
migrate_set_state(&s->state, s->state,
|
||||
MIGRATION_STATUS_POSTCOPY_PAUSED);
|
||||
|
||||
@ -2582,12 +2559,6 @@ static MigThrError postcopy_pause(MigrationState *s)
|
||||
if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
||||
/* Woken up by a recover procedure. Give it a shot */
|
||||
|
||||
/*
|
||||
* Firstly, let's wake up the return path now, with a new
|
||||
* return path channel.
|
||||
*/
|
||||
qemu_sem_post(&s->postcopy_pause_rp_sem);
|
||||
|
||||
/* Do the resume logic */
|
||||
if (postcopy_do_resume(s) == 0) {
|
||||
/* Let's continue! */
|
||||
@ -3277,7 +3248,7 @@ void migrate_fd_connect(MigrationState *s, Error *error_in)
|
||||
* QEMU uses the return path.
|
||||
*/
|
||||
if (migrate_postcopy_ram() || migrate_return_path()) {
|
||||
if (open_return_path_on_source(s, !resume)) {
|
||||
if (open_return_path_on_source(s)) {
|
||||
error_setg(&local_err, "Unable to open return-path for postcopy");
|
||||
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
||||
migrate_set_error(s, local_err);
|
||||
@ -3341,7 +3312,6 @@ static void migration_instance_finalize(Object *obj)
|
||||
qemu_sem_destroy(&ms->rate_limit_sem);
|
||||
qemu_sem_destroy(&ms->pause_sem);
|
||||
qemu_sem_destroy(&ms->postcopy_pause_sem);
|
||||
qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
|
||||
qemu_sem_destroy(&ms->rp_state.rp_sem);
|
||||
qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
|
||||
qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
|
||||
@ -3361,7 +3331,6 @@ static void migration_instance_init(Object *obj)
|
||||
migrate_params_init(&ms->parameters);
|
||||
|
||||
qemu_sem_init(&ms->postcopy_pause_sem, 0);
|
||||
qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
|
||||
qemu_sem_init(&ms->rp_state.rp_sem, 0);
|
||||
qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
|
||||
qemu_sem_init(&ms->rate_limit_sem, 0);
|
||||
|
@ -196,7 +196,10 @@ struct MigrationIncomingState {
|
||||
|
||||
/* A tree of pages that we requested to the source VM */
|
||||
GTree *page_requested;
|
||||
/* For debugging purpose only, but would be nice to keep */
|
||||
/*
|
||||
* For postcopy only, count the number of requested page faults that
|
||||
* still haven't been resolved.
|
||||
*/
|
||||
int page_requested_count;
|
||||
/*
|
||||
* The mutex helps to maintain the requested pages that we sent to the
|
||||
@ -210,6 +213,14 @@ struct MigrationIncomingState {
|
||||
* contains valid information.
|
||||
*/
|
||||
QemuMutex page_request_mutex;
|
||||
/*
|
||||
* If postcopy preempt is enabled, there is a chance that the main
|
||||
* thread finished loading its data before the preempt channel has
|
||||
* finished loading the urgent pages. If that happens, the two threads
|
||||
* will use this condvar to synchronize, so the main thread will always
|
||||
* wait until all pages received.
|
||||
*/
|
||||
QemuCond page_request_cond;
|
||||
|
||||
/*
|
||||
* Number of devices that have yet to approve switchover. When this reaches
|
||||
@ -382,7 +393,6 @@ struct MigrationState {
|
||||
|
||||
/* Needed by postcopy-pause state */
|
||||
QemuSemaphore postcopy_pause_sem;
|
||||
QemuSemaphore postcopy_pause_rp_sem;
|
||||
/*
|
||||
* Whether we abort the migration if decompression errors are
|
||||
* detected at the destination. It is left at false for qemu
|
||||
|
@ -599,6 +599,30 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
|
||||
if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) {
|
||||
/* Notify the fast load thread to quit */
|
||||
mis->preempt_thread_status = PREEMPT_THREAD_QUIT;
|
||||
/*
|
||||
* Update preempt_thread_status before reading count. Note: mutex
|
||||
* lock only provide ACQUIRE semantic, and it doesn't stops this
|
||||
* write to be reordered after reading the count.
|
||||
*/
|
||||
smp_mb();
|
||||
/*
|
||||
* It's possible that the preempt thread is still handling the last
|
||||
* pages to arrive which were requested by guest page faults.
|
||||
* Making sure nothing is left behind by waiting on the condvar if
|
||||
* that unlikely case happened.
|
||||
*/
|
||||
WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
|
||||
if (qatomic_read(&mis->page_requested_count)) {
|
||||
/*
|
||||
* It is guaranteed to receive a signal later, because the
|
||||
* count>0 now, so it's destined to be decreased to zero
|
||||
* very soon by the preempt thread.
|
||||
*/
|
||||
qemu_cond_wait(&mis->page_request_cond,
|
||||
&mis->page_request_mutex);
|
||||
}
|
||||
}
|
||||
/* Notify the fast load thread to quit */
|
||||
if (mis->postcopy_qemufile_dst) {
|
||||
qemu_file_shutdown(mis->postcopy_qemufile_dst);
|
||||
}
|
||||
@ -1277,8 +1301,20 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
|
||||
*/
|
||||
if (g_tree_lookup(mis->page_requested, host_addr)) {
|
||||
g_tree_remove(mis->page_requested, host_addr);
|
||||
mis->page_requested_count--;
|
||||
int left_pages = qatomic_dec_fetch(&mis->page_requested_count);
|
||||
|
||||
trace_postcopy_page_req_del(host_addr, mis->page_requested_count);
|
||||
/* Order the update of count and read of preempt status */
|
||||
smp_mb();
|
||||
if (mis->preempt_thread_status == PREEMPT_THREAD_QUIT &&
|
||||
left_pages == 0) {
|
||||
/*
|
||||
* This probably means the main thread is waiting for us.
|
||||
* Notify that we've finished receiving the last requested
|
||||
* page.
|
||||
*/
|
||||
qemu_cond_signal(&mis->page_request_cond);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&mis->page_request_mutex);
|
||||
mark_postcopy_blocktime_end((uintptr_t)host_addr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* QEMU Block driver for NBD
|
||||
* QEMU Block driver for NBD
|
||||
*
|
||||
* Copyright (c) 2021 Virtuozzo International GmbH.
|
||||
*
|
||||
@ -93,7 +93,7 @@ NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
|
||||
.do_negotiation = do_negotiation,
|
||||
|
||||
.initial_info.request_sizes = true,
|
||||
.initial_info.structured_reply = true,
|
||||
.initial_info.mode = NBD_MODE_STRUCTURED,
|
||||
.initial_info.base_allocation = true,
|
||||
.initial_info.x_dirty_bitmap = g_strdup(x_dirty_bitmap),
|
||||
.initial_info.name = g_strdup(export_name ?: "")
|
||||
|
22
nbd/client.c
22
nbd/client.c
@ -879,7 +879,7 @@ static int nbd_list_meta_contexts(QIOChannel *ioc,
|
||||
*/
|
||||
static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
const char *hostname, QIOChannel **outioc,
|
||||
bool structured_reply, bool *zeroes,
|
||||
NBDMode max_mode, bool *zeroes,
|
||||
Error **errp)
|
||||
{
|
||||
ERRP_GUARD();
|
||||
@ -953,7 +953,7 @@ static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
if (fixedNewStyle) {
|
||||
int result = 0;
|
||||
|
||||
if (structured_reply) {
|
||||
if (max_mode >= NBD_MODE_STRUCTURED) {
|
||||
result = nbd_request_simple_option(ioc,
|
||||
NBD_OPT_STRUCTURED_REPLY,
|
||||
false, errp);
|
||||
@ -1022,20 +1022,19 @@ int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
trace_nbd_receive_negotiate_name(info->name);
|
||||
|
||||
result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
|
||||
info->structured_reply, &zeroes, errp);
|
||||
info->mode, &zeroes, errp);
|
||||
if (result < 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
info->structured_reply = false;
|
||||
info->mode = result;
|
||||
info->base_allocation = false;
|
||||
if (tlscreds && *outioc) {
|
||||
ioc = *outioc;
|
||||
}
|
||||
|
||||
switch ((NBDMode)result) {
|
||||
switch (info->mode) {
|
||||
case NBD_MODE_STRUCTURED:
|
||||
info->structured_reply = true;
|
||||
if (base_allocation) {
|
||||
result = nbd_negotiate_simple_meta_context(ioc, info, errp);
|
||||
if (result < 0) {
|
||||
@ -1144,8 +1143,8 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
QIOChannel *sioc = NULL;
|
||||
|
||||
*info = NULL;
|
||||
result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc, true,
|
||||
NULL, errp);
|
||||
result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc,
|
||||
NBD_MODE_STRUCTURED, NULL, errp);
|
||||
if (tlscreds && sioc) {
|
||||
ioc = sioc;
|
||||
}
|
||||
@ -1176,7 +1175,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
memset(&array[count - 1], 0, sizeof(*array));
|
||||
array[count - 1].name = name;
|
||||
array[count - 1].description = desc;
|
||||
array[count - 1].structured_reply = result == NBD_MODE_STRUCTURED;
|
||||
array[count - 1].mode = result;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@ -1209,6 +1208,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
/* Lone export name is implied, but we can parse length and flags */
|
||||
array = g_new0(NBDExportInfo, 1);
|
||||
array->name = g_strdup("");
|
||||
array->mode = NBD_MODE_OLDSTYLE;
|
||||
count = 1;
|
||||
|
||||
if (nbd_negotiate_finish_oldstyle(ioc, array, errp) < 0) {
|
||||
@ -1218,7 +1218,7 @@ int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
|
||||
/* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
|
||||
* errors now that we have the information we wanted. */
|
||||
if (nbd_drop(ioc, 124, NULL) == 0) {
|
||||
NBDRequest request = { .type = NBD_CMD_DISC };
|
||||
NBDRequest request = { .type = NBD_CMD_DISC, .mode = result };
|
||||
|
||||
nbd_send_request(ioc, &request);
|
||||
}
|
||||
@ -1348,6 +1348,8 @@ int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
|
||||
{
|
||||
uint8_t buf[NBD_REQUEST_SIZE];
|
||||
|
||||
assert(request->mode <= NBD_MODE_STRUCTURED); /* TODO handle extended */
|
||||
assert(request->len <= UINT32_MAX);
|
||||
trace_nbd_send_request(request->from, request->len, request->cookie,
|
||||
request->flags, request->type,
|
||||
nbd_cmd_lookup(request->type));
|
||||
|
12
nbd/common.c
12
nbd/common.c
@ -79,6 +79,8 @@ const char *nbd_opt_lookup(uint32_t opt)
|
||||
return "list meta context";
|
||||
case NBD_OPT_SET_META_CONTEXT:
|
||||
return "set meta context";
|
||||
case NBD_OPT_EXTENDED_HEADERS:
|
||||
return "extended headers";
|
||||
default:
|
||||
return "<unknown>";
|
||||
}
|
||||
@ -112,6 +114,10 @@ const char *nbd_rep_lookup(uint32_t rep)
|
||||
return "server shutting down";
|
||||
case NBD_REP_ERR_BLOCK_SIZE_REQD:
|
||||
return "block size required";
|
||||
case NBD_REP_ERR_TOO_BIG:
|
||||
return "option payload too big";
|
||||
case NBD_REP_ERR_EXT_HEADER_REQD:
|
||||
return "extended headers required";
|
||||
default:
|
||||
return "<unknown>";
|
||||
}
|
||||
@ -170,7 +176,9 @@ const char *nbd_reply_type_lookup(uint16_t type)
|
||||
case NBD_REPLY_TYPE_OFFSET_HOLE:
|
||||
return "hole";
|
||||
case NBD_REPLY_TYPE_BLOCK_STATUS:
|
||||
return "block status";
|
||||
return "block status (32-bit)";
|
||||
case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
|
||||
return "block status (64-bit)";
|
||||
case NBD_REPLY_TYPE_ERROR:
|
||||
return "generic error";
|
||||
case NBD_REPLY_TYPE_ERROR_OFFSET:
|
||||
@ -261,6 +269,8 @@ const char *nbd_mode_lookup(NBDMode mode)
|
||||
return "simple headers";
|
||||
case NBD_MODE_STRUCTURED:
|
||||
return "structured replies";
|
||||
case NBD_MODE_EXTENDED:
|
||||
return "extended headers";
|
||||
default:
|
||||
return "<unknown>";
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* NBD Internal Declarations
|
||||
*
|
||||
* Copyright (C) 2016 Red Hat, Inc.
|
||||
* Copyright Red Hat
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
@ -44,7 +44,6 @@
|
||||
#define NBD_OLDSTYLE_NEGOTIATE_SIZE (8 + 8 + 8 + 4 + 124)
|
||||
|
||||
#define NBD_INIT_MAGIC 0x4e42444d41474943LL /* ASCII "NBDMAGIC" */
|
||||
#define NBD_REQUEST_MAGIC 0x25609513
|
||||
#define NBD_OPTS_MAGIC 0x49484156454F5054LL /* ASCII "IHAVEOPT" */
|
||||
#define NBD_CLIENT_MAGIC 0x0000420281861253LL
|
||||
#define NBD_REP_MAGIC 0x0003e889045565a9LL
|
||||
|
174
nbd/server.c
174
nbd/server.c
@ -143,7 +143,7 @@ struct NBDClient {
|
||||
|
||||
uint32_t check_align; /* If non-zero, check for aligned client requests */
|
||||
|
||||
bool structured_reply;
|
||||
NBDMode mode;
|
||||
NBDExportMetaContexts export_meta;
|
||||
|
||||
uint32_t opt; /* Current option being negotiated */
|
||||
@ -502,7 +502,7 @@ static int nbd_negotiate_handle_export_name(NBDClient *client, bool no_zeroes,
|
||||
}
|
||||
|
||||
myflags = client->exp->nbdflags;
|
||||
if (client->structured_reply) {
|
||||
if (client->mode >= NBD_MODE_STRUCTURED) {
|
||||
myflags |= NBD_FLAG_SEND_DF;
|
||||
}
|
||||
trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags);
|
||||
@ -687,7 +687,7 @@ static int nbd_negotiate_handle_info(NBDClient *client, Error **errp)
|
||||
|
||||
/* Send NBD_INFO_EXPORT always */
|
||||
myflags = exp->nbdflags;
|
||||
if (client->structured_reply) {
|
||||
if (client->mode >= NBD_MODE_STRUCTURED) {
|
||||
myflags |= NBD_FLAG_SEND_DF;
|
||||
}
|
||||
trace_nbd_negotiate_new_style_size_flags(exp->size, myflags);
|
||||
@ -985,7 +985,8 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
|
||||
size_t i;
|
||||
size_t count = 0;
|
||||
|
||||
if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) {
|
||||
if (client->opt == NBD_OPT_SET_META_CONTEXT &&
|
||||
client->mode < NBD_MODE_STRUCTURED) {
|
||||
return nbd_opt_invalid(client, errp,
|
||||
"request option '%s' when structured reply "
|
||||
"is not negotiated",
|
||||
@ -1122,10 +1123,12 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
|
||||
if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) {
|
||||
return -EIO;
|
||||
}
|
||||
client->mode = NBD_MODE_EXPORT_NAME;
|
||||
trace_nbd_negotiate_options_flags(flags);
|
||||
if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) {
|
||||
fixedNewstyle = true;
|
||||
flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE;
|
||||
client->mode = NBD_MODE_SIMPLE;
|
||||
}
|
||||
if (flags & NBD_FLAG_C_NO_ZEROES) {
|
||||
no_zeroes = true;
|
||||
@ -1162,7 +1165,7 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
|
||||
client->optlen = length;
|
||||
|
||||
if (length > NBD_MAX_BUFFER_SIZE) {
|
||||
error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
|
||||
error_setg(errp, "len (%" PRIu32 ") is larger than max len (%u)",
|
||||
length, NBD_MAX_BUFFER_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1261,13 +1264,13 @@ static int nbd_negotiate_options(NBDClient *client, Error **errp)
|
||||
case NBD_OPT_STRUCTURED_REPLY:
|
||||
if (length) {
|
||||
ret = nbd_reject_length(client, false, errp);
|
||||
} else if (client->structured_reply) {
|
||||
} else if (client->mode >= NBD_MODE_STRUCTURED) {
|
||||
ret = nbd_negotiate_send_rep_err(
|
||||
client, NBD_REP_ERR_INVALID, errp,
|
||||
"structured reply already negotiated");
|
||||
} else {
|
||||
ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
|
||||
client->structured_reply = true;
|
||||
client->mode = NBD_MODE_STRUCTURED;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1434,7 +1437,7 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque
|
||||
request->type = lduw_be_p(buf + 6);
|
||||
request->cookie = ldq_be_p(buf + 8);
|
||||
request->from = ldq_be_p(buf + 16);
|
||||
request->len = ldl_be_p(buf + 24);
|
||||
request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */
|
||||
|
||||
trace_nbd_receive_request(magic, request->flags, request->type,
|
||||
request->from, request->len);
|
||||
@ -1884,7 +1887,7 @@ static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint32_t error,
|
||||
void *data,
|
||||
size_t len,
|
||||
uint64_t len,
|
||||
Error **errp)
|
||||
{
|
||||
NBDSimpleReply reply;
|
||||
@ -1895,7 +1898,10 @@ static int coroutine_fn nbd_co_send_simple_reply(NBDClient *client,
|
||||
};
|
||||
|
||||
assert(!len || !nbd_err);
|
||||
assert(!client->structured_reply || request->type != NBD_CMD_READ);
|
||||
assert(len <= NBD_MAX_BUFFER_SIZE);
|
||||
assert(client->mode < NBD_MODE_STRUCTURED ||
|
||||
(client->mode == NBD_MODE_STRUCTURED &&
|
||||
request->type != NBD_CMD_READ));
|
||||
trace_nbd_co_send_simple_reply(request->cookie, nbd_err,
|
||||
nbd_err_lookup(nbd_err), len);
|
||||
set_be_simple_reply(&reply, nbd_err, request->cookie);
|
||||
@ -1951,7 +1957,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint64_t offset,
|
||||
void *data,
|
||||
size_t size,
|
||||
uint64_t size,
|
||||
bool final,
|
||||
Error **errp)
|
||||
{
|
||||
@ -1963,7 +1969,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
|
||||
{.iov_base = data, .iov_len = size}
|
||||
};
|
||||
|
||||
assert(size);
|
||||
assert(size && size <= NBD_MAX_BUFFER_SIZE);
|
||||
trace_nbd_co_send_chunk_read(request->cookie, offset, data, size);
|
||||
set_be_chunk(client, iov, 3, final ? NBD_REPLY_FLAG_DONE : 0,
|
||||
NBD_REPLY_TYPE_OFFSET_DATA, request);
|
||||
@ -1971,7 +1977,7 @@ static int coroutine_fn nbd_co_send_chunk_read(NBDClient *client,
|
||||
|
||||
return nbd_co_send_iov(client, iov, 3, errp);
|
||||
}
|
||||
/*ebb*/
|
||||
|
||||
static int coroutine_fn nbd_co_send_chunk_error(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint32_t error,
|
||||
@ -2006,13 +2012,14 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
|
||||
NBDRequest *request,
|
||||
uint64_t offset,
|
||||
uint8_t *data,
|
||||
size_t size,
|
||||
uint64_t size,
|
||||
Error **errp)
|
||||
{
|
||||
int ret = 0;
|
||||
NBDExport *exp = client->exp;
|
||||
size_t progress = 0;
|
||||
|
||||
assert(size <= NBD_MAX_BUFFER_SIZE);
|
||||
while (progress < size) {
|
||||
int64_t pnum;
|
||||
int status = blk_co_block_status_above(exp->common.blk, NULL,
|
||||
@ -2067,7 +2074,7 @@ static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
|
||||
}
|
||||
|
||||
typedef struct NBDExtentArray {
|
||||
NBDExtent *extents;
|
||||
NBDExtent32 *extents;
|
||||
unsigned int nb_alloc;
|
||||
unsigned int count;
|
||||
uint64_t total_length;
|
||||
@ -2080,7 +2087,7 @@ static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc)
|
||||
NBDExtentArray *ea = g_new0(NBDExtentArray, 1);
|
||||
|
||||
ea->nb_alloc = nb_alloc;
|
||||
ea->extents = g_new(NBDExtent, nb_alloc);
|
||||
ea->extents = g_new(NBDExtent32, nb_alloc);
|
||||
ea->can_add = true;
|
||||
|
||||
return ea;
|
||||
@ -2143,7 +2150,7 @@ static int nbd_extent_array_add(NBDExtentArray *ea,
|
||||
}
|
||||
|
||||
ea->total_length += length;
|
||||
ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags};
|
||||
ea->extents[ea->count] = (NBDExtent32) {.length = length, .flags = flags};
|
||||
ea->count++;
|
||||
|
||||
return 0;
|
||||
@ -2310,11 +2317,16 @@ static int coroutine_fn nbd_co_send_bitmap(NBDClient *client,
|
||||
* to the client (although the caller may still need to disconnect after
|
||||
* reporting the error).
|
||||
*/
|
||||
static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
|
||||
static int coroutine_fn nbd_co_receive_request(NBDRequestData *req,
|
||||
NBDRequest *request,
|
||||
Error **errp)
|
||||
{
|
||||
NBDClient *client = req->client;
|
||||
int valid_flags;
|
||||
bool check_length = false;
|
||||
bool check_rofs = false;
|
||||
bool allocate_buffer = false;
|
||||
unsigned payload_len = 0;
|
||||
int valid_flags = NBD_CMD_FLAG_FUA;
|
||||
int ret;
|
||||
|
||||
g_assert(qemu_in_coroutine());
|
||||
@ -2326,60 +2338,94 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
|
||||
|
||||
trace_nbd_co_receive_request_decode_type(request->cookie, request->type,
|
||||
nbd_cmd_lookup(request->type));
|
||||
|
||||
if (request->type != NBD_CMD_WRITE) {
|
||||
/* No payload, we are ready to read the next request. */
|
||||
req->complete = true;
|
||||
}
|
||||
|
||||
if (request->type == NBD_CMD_DISC) {
|
||||
switch (request->type) {
|
||||
case NBD_CMD_DISC:
|
||||
/* Special case: we're going to disconnect without a reply,
|
||||
* whether or not flags, from, or len are bogus */
|
||||
req->complete = true;
|
||||
return -EIO;
|
||||
|
||||
case NBD_CMD_READ:
|
||||
if (client->mode >= NBD_MODE_STRUCTURED) {
|
||||
valid_flags |= NBD_CMD_FLAG_DF;
|
||||
}
|
||||
check_length = true;
|
||||
allocate_buffer = true;
|
||||
break;
|
||||
|
||||
case NBD_CMD_WRITE:
|
||||
payload_len = request->len;
|
||||
check_length = true;
|
||||
allocate_buffer = true;
|
||||
check_rofs = true;
|
||||
break;
|
||||
|
||||
case NBD_CMD_FLUSH:
|
||||
break;
|
||||
|
||||
case NBD_CMD_TRIM:
|
||||
check_rofs = true;
|
||||
break;
|
||||
|
||||
case NBD_CMD_CACHE:
|
||||
check_length = true;
|
||||
break;
|
||||
|
||||
case NBD_CMD_WRITE_ZEROES:
|
||||
valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
|
||||
check_rofs = true;
|
||||
break;
|
||||
|
||||
case NBD_CMD_BLOCK_STATUS:
|
||||
valid_flags |= NBD_CMD_FLAG_REQ_ONE;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Unrecognized, will fail later */
|
||||
;
|
||||
}
|
||||
|
||||
if (request->type == NBD_CMD_READ || request->type == NBD_CMD_WRITE ||
|
||||
request->type == NBD_CMD_CACHE)
|
||||
{
|
||||
if (request->len > NBD_MAX_BUFFER_SIZE) {
|
||||
error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
|
||||
request->len, NBD_MAX_BUFFER_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (request->type != NBD_CMD_CACHE) {
|
||||
req->data = blk_try_blockalign(client->exp->common.blk,
|
||||
request->len);
|
||||
if (req->data == NULL) {
|
||||
error_setg(errp, "No memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Payload and buffer handling. */
|
||||
if (!payload_len) {
|
||||
req->complete = true;
|
||||
}
|
||||
if (check_length && request->len > NBD_MAX_BUFFER_SIZE) {
|
||||
/* READ, WRITE, CACHE */
|
||||
error_setg(errp, "len (%" PRIu64 ") is larger than max len (%u)",
|
||||
request->len, NBD_MAX_BUFFER_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (allocate_buffer) {
|
||||
/* READ, WRITE */
|
||||
req->data = blk_try_blockalign(client->exp->common.blk,
|
||||
request->len);
|
||||
if (req->data == NULL) {
|
||||
error_setg(errp, "No memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (request->type == NBD_CMD_WRITE) {
|
||||
if (nbd_read(client->ioc, req->data, request->len, "CMD_WRITE data",
|
||||
errp) < 0)
|
||||
{
|
||||
if (payload_len) {
|
||||
/* WRITE */
|
||||
assert(req->data);
|
||||
ret = nbd_read(client->ioc, req->data, payload_len,
|
||||
"CMD_WRITE data", errp);
|
||||
if (ret < 0) {
|
||||
return -EIO;
|
||||
}
|
||||
req->complete = true;
|
||||
|
||||
trace_nbd_co_receive_request_payload_received(request->cookie,
|
||||
request->len);
|
||||
payload_len);
|
||||
}
|
||||
|
||||
/* Sanity checks. */
|
||||
if (client->exp->nbdflags & NBD_FLAG_READ_ONLY &&
|
||||
(request->type == NBD_CMD_WRITE ||
|
||||
request->type == NBD_CMD_WRITE_ZEROES ||
|
||||
request->type == NBD_CMD_TRIM)) {
|
||||
if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) {
|
||||
/* WRITE, TRIM, WRITE_ZEROES */
|
||||
error_setg(errp, "Export is read-only");
|
||||
return -EROFS;
|
||||
}
|
||||
if (request->from > client->exp->size ||
|
||||
request->len > client->exp->size - request->from) {
|
||||
error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
|
||||
error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu64
|
||||
", Size: %" PRIu64, request->from, request->len,
|
||||
client->exp->size);
|
||||
return (request->type == NBD_CMD_WRITE ||
|
||||
@ -2396,14 +2442,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, NBDRequest *
|
||||
request->len,
|
||||
client->check_align);
|
||||
}
|
||||
valid_flags = NBD_CMD_FLAG_FUA;
|
||||
if (request->type == NBD_CMD_READ && client->structured_reply) {
|
||||
valid_flags |= NBD_CMD_FLAG_DF;
|
||||
} else if (request->type == NBD_CMD_WRITE_ZEROES) {
|
||||
valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
|
||||
} else if (request->type == NBD_CMD_BLOCK_STATUS) {
|
||||
valid_flags |= NBD_CMD_FLAG_REQ_ONE;
|
||||
}
|
||||
if (request->flags & ~valid_flags) {
|
||||
error_setg(errp, "unsupported flags for command %s (got 0x%x)",
|
||||
nbd_cmd_lookup(request->type), request->flags);
|
||||
@ -2423,7 +2461,7 @@ static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
|
||||
const char *error_msg,
|
||||
Error **errp)
|
||||
{
|
||||
if (client->structured_reply && ret < 0) {
|
||||
if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) {
|
||||
return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp);
|
||||
} else {
|
||||
return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0,
|
||||
@ -2441,6 +2479,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
|
||||
NBDExport *exp = client->exp;
|
||||
|
||||
assert(request->type == NBD_CMD_READ);
|
||||
assert(request->len <= NBD_MAX_BUFFER_SIZE);
|
||||
|
||||
/* XXX: NBD Protocol only documents use of FUA with WRITE */
|
||||
if (request->flags & NBD_CMD_FLAG_FUA) {
|
||||
@ -2451,8 +2490,8 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
|
||||
}
|
||||
}
|
||||
|
||||
if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
|
||||
request->len)
|
||||
if (client->mode >= NBD_MODE_STRUCTURED &&
|
||||
!(request->flags & NBD_CMD_FLAG_DF) && request->len)
|
||||
{
|
||||
return nbd_co_send_sparse_read(client, request, request->from,
|
||||
data, request->len, errp);
|
||||
@ -2464,7 +2503,7 @@ static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
|
||||
"reading from file failed", errp);
|
||||
}
|
||||
|
||||
if (client->structured_reply) {
|
||||
if (client->mode >= NBD_MODE_STRUCTURED) {
|
||||
if (request->len) {
|
||||
return nbd_co_send_chunk_read(client, request, request->from, data,
|
||||
request->len, true, errp);
|
||||
@ -2491,6 +2530,7 @@ static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request,
|
||||
NBDExport *exp = client->exp;
|
||||
|
||||
assert(request->type == NBD_CMD_CACHE);
|
||||
assert(request->len <= NBD_MAX_BUFFER_SIZE);
|
||||
|
||||
ret = blk_co_preadv(exp->common.blk, request->from, request->len,
|
||||
NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
|
||||
@ -2524,6 +2564,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
if (request->flags & NBD_CMD_FLAG_FUA) {
|
||||
flags |= BDRV_REQ_FUA;
|
||||
}
|
||||
assert(request->len <= NBD_MAX_BUFFER_SIZE);
|
||||
ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data,
|
||||
flags);
|
||||
return nbd_send_generic_reply(client, request, ret,
|
||||
@ -2567,6 +2608,7 @@ static coroutine_fn int nbd_handle_request(NBDClient *client,
|
||||
return nbd_send_generic_reply(client, request, -EINVAL,
|
||||
"need non-zero length", errp);
|
||||
}
|
||||
assert(request->len <= UINT32_MAX);
|
||||
if (client->export_meta.count) {
|
||||
bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
|
||||
int contexts_remaining = client->export_meta.count;
|
||||
|
@ -31,7 +31,7 @@ nbd_client_loop(void) "Doing NBD loop"
|
||||
nbd_client_loop_ret(int ret, const char *error) "NBD loop returned %d: %s"
|
||||
nbd_client_clear_queue(void) "Clearing NBD queue"
|
||||
nbd_client_clear_socket(void) "Clearing NBD socket"
|
||||
nbd_send_request(uint64_t from, uint32_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu32 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
|
||||
nbd_send_request(uint64_t from, uint64_t len, uint64_t cookie, uint16_t flags, uint16_t type, const char *name) "Sending request to server: { .from = %" PRIu64", .len = %" PRIu64 ", .cookie = %" PRIu64 ", .flags = 0x%" PRIx16 ", .type = %" PRIu16 " (%s) }"
|
||||
nbd_receive_simple_reply(int32_t error, const char *errname, uint64_t cookie) "Got simple reply: { .error = %" PRId32 " (%s), cookie = %" PRIu64" }"
|
||||
nbd_receive_structured_reply_chunk(uint16_t flags, uint16_t type, const char *name, uint64_t cookie, uint32_t length) "Got structured reply chunk: { flags = 0x%" PRIx16 ", type = %d (%s), cookie = %" PRIu64 ", length = %" PRIu32 " }"
|
||||
|
||||
@ -60,18 +60,18 @@ nbd_negotiate_options_check_option(uint32_t option, const char *name) "Checking
|
||||
nbd_negotiate_begin(void) "Beginning negotiation"
|
||||
nbd_negotiate_new_style_size_flags(uint64_t size, unsigned flags) "advertising size %" PRIu64 " and flags 0x%x"
|
||||
nbd_negotiate_success(void) "Negotiation succeeded"
|
||||
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint32_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu32 " }"
|
||||
nbd_receive_request(uint32_t magic, uint16_t flags, uint16_t type, uint64_t from, uint64_t len) "Got request: { magic = 0x%" PRIx32 ", .flags = 0x%" PRIx16 ", .type = 0x%" PRIx16 ", from = %" PRIu64 ", len = %" PRIu64 " }"
|
||||
nbd_blk_aio_attached(const char *name, void *ctx) "Export %s: Attaching clients to AIO context %p"
|
||||
nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients from AIO context %p"
|
||||
nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, int len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
|
||||
nbd_co_send_simple_reply(uint64_t cookie, uint32_t error, const char *errname, uint64_t len) "Send simple reply: cookie = %" PRIu64 ", error = %" PRIu32 " (%s), len = %" PRIu64
|
||||
nbd_co_send_chunk_done(uint64_t cookie) "Send structured reply done: cookie = %" PRIu64
|
||||
nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, size_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
|
||||
nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, size_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
|
||||
nbd_co_send_chunk_read(uint64_t cookie, uint64_t offset, void *data, uint64_t size) "Send structured read data reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %" PRIu64
|
||||
nbd_co_send_chunk_read_hole(uint64_t cookie, uint64_t offset, uint64_t size) "Send structured read hole reply: cookie = %" PRIu64 ", offset = %" PRIu64 ", len = %" PRIu64
|
||||
nbd_co_send_extents(uint64_t cookie, unsigned int extents, uint32_t id, uint64_t length, int last) "Send block status reply: cookie = %" PRIu64 ", extents = %u, context = %d (extents cover %" PRIu64 " bytes, last chunk = %d)"
|
||||
nbd_co_send_chunk_error(uint64_t cookie, int err, const char *errname, const char *msg) "Send structured error reply: cookie = %" PRIu64 ", error = %d (%s), msg = '%s'"
|
||||
nbd_co_receive_request_decode_type(uint64_t cookie, uint16_t type, const char *name) "Decoding type: cookie = %" PRIu64 ", type = %" PRIu16 " (%s)"
|
||||
nbd_co_receive_request_payload_received(uint64_t cookie, uint32_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu32
|
||||
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32
|
||||
nbd_co_receive_request_payload_received(uint64_t cookie, uint64_t len) "Payload received: cookie = %" PRIu64 ", len = %" PRIu64
|
||||
nbd_co_receive_align_compliance(const char *op, uint64_t from, uint64_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx64 ", align=0x%" PRIx32
|
||||
nbd_trip(void) "Reading request"
|
||||
|
||||
# client-connection.c
|
||||
|
BIN
pc-bios/bios.bin
BIN
pc-bios/bios.bin
Binary file not shown.
@ -295,7 +295,9 @@ static void *show_parts(void *arg)
|
||||
static void *nbd_client_thread(void *arg)
|
||||
{
|
||||
struct NbdClientOpts *opts = arg;
|
||||
NBDExportInfo info = { .request_sizes = false, .name = g_strdup("") };
|
||||
/* TODO: Revisit this if nbd.ko ever gains support for structured reply */
|
||||
NBDExportInfo info = { .request_sizes = false, .name = g_strdup(""),
|
||||
.mode = NBD_MODE_SIMPLE };
|
||||
QIOChannelSocket *sioc;
|
||||
int fd = -1;
|
||||
int ret = EXIT_FAILURE;
|
||||
|
@ -1,21 +1,30 @@
|
||||
# for qemu machine types 1.7 + older
|
||||
# need to turn off features (xhci,uas) to make it fit into 128k
|
||||
# SeaBIOS Configuration for -M isapc
|
||||
|
||||
CONFIG_QEMU=y
|
||||
CONFIG_ROM_SIZE=128
|
||||
CONFIG_ATA_DMA=n
|
||||
CONFIG_BOOTSPLASH=n
|
||||
CONFIG_XEN=n
|
||||
CONFIG_USB_OHCI=n
|
||||
CONFIG_USB_XHCI=n
|
||||
CONFIG_USB_UAS=n
|
||||
CONFIG_ATA_PIO32=n
|
||||
CONFIG_AHCI=n
|
||||
CONFIG_SDCARD=n
|
||||
CONFIG_TCGBIOS=n
|
||||
CONFIG_MPT_SCSI=n
|
||||
CONFIG_ESP_SCSI=n
|
||||
CONFIG_MEGASAS=n
|
||||
CONFIG_VIRTIO_BLK=n
|
||||
CONFIG_VIRTIO_SCSI=n
|
||||
CONFIG_PVSCSI=n
|
||||
CONFIG_ESP_SCSI=n
|
||||
CONFIG_LSI_SCSI=n
|
||||
CONFIG_MEGASAS=n
|
||||
CONFIG_MPT_SCSI=n
|
||||
CONFIG_NVME=n
|
||||
CONFIG_USE_SMM=n
|
||||
CONFIG_VGAHOOKS=n
|
||||
CONFIG_HOST_BIOS_GEOMETRY=n
|
||||
CONFIG_USB=n
|
||||
CONFIG_PMTIMER=n
|
||||
CONFIG_PCIBIOS=n
|
||||
CONFIG_DISABLE_A20=n
|
||||
CONFIG_WRITABLE_UPPERMEMORY=n
|
||||
CONFIG_TCGBIOS=n
|
||||
CONFIG_ACPI=n
|
||||
CONFIG_ACPI_PARSE=n
|
||||
CONFIG_DEBUG_SERIAL=n
|
||||
CONFIG_DEBUG_SERIAL_MMIO=n
|
||||
|
@ -115,6 +115,7 @@ controls = [
|
||||
(50, 53): 'VMCS memory type',
|
||||
54: 'INS/OUTS instruction information',
|
||||
55: 'IA32_VMX_TRUE_*_CTLS support',
|
||||
56: 'Skip checks on event error code',
|
||||
},
|
||||
msr = MSR_IA32_VMX_BASIC,
|
||||
),
|
||||
|
@ -122,6 +122,7 @@ meson_options_help() {
|
||||
printf "%s\n" ' libdaxctl libdaxctl support'
|
||||
printf "%s\n" ' libdw debuginfo support'
|
||||
printf "%s\n" ' libiscsi libiscsi userspace initiator'
|
||||
printf "%s\n" ' libkeyutils Linux keyutils support'
|
||||
printf "%s\n" ' libnfs libnfs block device driver'
|
||||
printf "%s\n" ' libpmem libpmem support'
|
||||
printf "%s\n" ' libssh ssh block device support'
|
||||
@ -345,6 +346,8 @@ _meson_option_parse() {
|
||||
--libexecdir=*) quote_sh "-Dlibexecdir=$2" ;;
|
||||
--enable-libiscsi) printf "%s" -Dlibiscsi=enabled ;;
|
||||
--disable-libiscsi) printf "%s" -Dlibiscsi=disabled ;;
|
||||
--enable-libkeyutils) printf "%s" -Dlibkeyutils=enabled ;;
|
||||
--disable-libkeyutils) printf "%s" -Dlibkeyutils=disabled ;;
|
||||
--enable-libnfs) printf "%s" -Dlibnfs=enabled ;;
|
||||
--disable-libnfs) printf "%s" -Dlibnfs=disabled ;;
|
||||
--enable-libpmem) printf "%s" -Dlibpmem=enabled ;;
|
||||
|
19
softmmu/vl.c
19
softmmu/vl.c
@ -2125,6 +2125,7 @@ static int global_init_func(void *opaque, QemuOpts *opts, Error **errp)
|
||||
static bool is_qemuopts_group(const char *group)
|
||||
{
|
||||
if (g_str_equal(group, "object") ||
|
||||
g_str_equal(group, "audiodev") ||
|
||||
g_str_equal(group, "machine") ||
|
||||
g_str_equal(group, "smp-opts") ||
|
||||
g_str_equal(group, "boot-opts")) {
|
||||
@ -2140,6 +2141,15 @@ static void qemu_record_config_group(const char *group, QDict *dict,
|
||||
Visitor *v = qobject_input_visitor_new_keyval(QOBJECT(dict));
|
||||
object_option_add_visitor(v);
|
||||
visit_free(v);
|
||||
|
||||
} else if (g_str_equal(group, "audiodev")) {
|
||||
Audiodev *dev = NULL;
|
||||
Visitor *v = qobject_input_visitor_new_keyval(QOBJECT(dict));
|
||||
if (visit_type_Audiodev(v, NULL, &dev, errp)) {
|
||||
audio_define(dev);
|
||||
}
|
||||
visit_free(v);
|
||||
|
||||
} else if (g_str_equal(group, "machine")) {
|
||||
/*
|
||||
* Cannot merge string-valued and type-safe dictionaries, so JSON
|
||||
@ -3204,7 +3214,6 @@ void qemu_init(int argc, char **argv)
|
||||
}
|
||||
break;
|
||||
case QEMU_OPTION_watchdog_action: {
|
||||
QemuOpts *opts;
|
||||
opts = qemu_opts_create(qemu_find_opts("action"), NULL, 0, &error_abort);
|
||||
qemu_opt_set(opts, "watchdog", optarg, &error_abort);
|
||||
break;
|
||||
@ -3515,16 +3524,16 @@ void qemu_init(int argc, char **argv)
|
||||
break;
|
||||
case QEMU_OPTION_compat:
|
||||
{
|
||||
CompatPolicy *opts;
|
||||
CompatPolicy *opts_policy;
|
||||
Visitor *v;
|
||||
|
||||
v = qobject_input_visitor_new_str(optarg, NULL,
|
||||
&error_fatal);
|
||||
|
||||
visit_type_CompatPolicy(v, NULL, &opts, &error_fatal);
|
||||
QAPI_CLONE_MEMBERS(CompatPolicy, &compat_policy, opts);
|
||||
visit_type_CompatPolicy(v, NULL, &opts_policy, &error_fatal);
|
||||
QAPI_CLONE_MEMBERS(CompatPolicy, &compat_policy, opts_policy);
|
||||
|
||||
qapi_free_CompatPolicy(opts);
|
||||
qapi_free_CompatPolicy(opts_policy);
|
||||
visit_free(v);
|
||||
break;
|
||||
}
|
||||
|
@ -1315,6 +1315,7 @@ void pmu_init(ARMCPU *cpu);
|
||||
#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
|
||||
#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
|
||||
#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
|
||||
#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */
|
||||
#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
|
||||
#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
|
||||
#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
|
||||
@ -2166,6 +2167,7 @@ FIELD(ID_AA64ISAR0, SHA1, 8, 4)
|
||||
FIELD(ID_AA64ISAR0, SHA2, 12, 4)
|
||||
FIELD(ID_AA64ISAR0, CRC32, 16, 4)
|
||||
FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
|
||||
FIELD(ID_AA64ISAR0, TME, 24, 4)
|
||||
FIELD(ID_AA64ISAR0, RDM, 28, 4)
|
||||
FIELD(ID_AA64ISAR0, SHA3, 32, 4)
|
||||
FIELD(ID_AA64ISAR0, SM3, 36, 4)
|
||||
@ -2200,6 +2202,13 @@ FIELD(ID_AA64ISAR2, APA3, 12, 4)
|
||||
FIELD(ID_AA64ISAR2, MOPS, 16, 4)
|
||||
FIELD(ID_AA64ISAR2, BC, 20, 4)
|
||||
FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4)
|
||||
FIELD(ID_AA64ISAR2, CLRBHB, 28, 4)
|
||||
FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4)
|
||||
FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4)
|
||||
FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4)
|
||||
FIELD(ID_AA64ISAR2, RPRFM, 48, 4)
|
||||
FIELD(ID_AA64ISAR2, CSSC, 52, 4)
|
||||
FIELD(ID_AA64ISAR2, ATS1A, 60, 4)
|
||||
|
||||
FIELD(ID_AA64PFR0, EL0, 0, 4)
|
||||
FIELD(ID_AA64PFR0, EL1, 4, 4)
|
||||
@ -2227,6 +2236,12 @@ FIELD(ID_AA64PFR1, SME, 24, 4)
|
||||
FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4)
|
||||
FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4)
|
||||
FIELD(ID_AA64PFR1, NMI, 36, 4)
|
||||
FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4)
|
||||
FIELD(ID_AA64PFR1, GCS, 44, 4)
|
||||
FIELD(ID_AA64PFR1, THE, 48, 4)
|
||||
FIELD(ID_AA64PFR1, MTEX, 52, 4)
|
||||
FIELD(ID_AA64PFR1, DF2, 56, 4)
|
||||
FIELD(ID_AA64PFR1, PFAR, 60, 4)
|
||||
|
||||
FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
|
||||
FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
|
||||
@ -2258,6 +2273,7 @@ FIELD(ID_AA64MMFR1, AFP, 44, 4)
|
||||
FIELD(ID_AA64MMFR1, NTLBPA, 48, 4)
|
||||
FIELD(ID_AA64MMFR1, TIDCP1, 52, 4)
|
||||
FIELD(ID_AA64MMFR1, CMOW, 56, 4)
|
||||
FIELD(ID_AA64MMFR1, ECBHB, 60, 4)
|
||||
|
||||
FIELD(ID_AA64MMFR2, CNP, 0, 4)
|
||||
FIELD(ID_AA64MMFR2, UAO, 4, 4)
|
||||
@ -2279,7 +2295,9 @@ FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
|
||||
FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
|
||||
FIELD(ID_AA64DFR0, PMUVER, 8, 4)
|
||||
FIELD(ID_AA64DFR0, BRPS, 12, 4)
|
||||
FIELD(ID_AA64DFR0, PMSS, 16, 4)
|
||||
FIELD(ID_AA64DFR0, WRPS, 20, 4)
|
||||
FIELD(ID_AA64DFR0, SEBEP, 24, 4)
|
||||
FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
|
||||
FIELD(ID_AA64DFR0, PMSVER, 32, 4)
|
||||
FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
|
||||
@ -2287,12 +2305,14 @@ FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
|
||||
FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4)
|
||||
FIELD(ID_AA64DFR0, MTPMU, 48, 4)
|
||||
FIELD(ID_AA64DFR0, BRBE, 52, 4)
|
||||
FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4)
|
||||
FIELD(ID_AA64DFR0, HPMN0, 60, 4)
|
||||
|
||||
FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
|
||||
FIELD(ID_AA64ZFR0, AES, 4, 4)
|
||||
FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
|
||||
FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
|
||||
FIELD(ID_AA64ZFR0, B16B16, 24, 4)
|
||||
FIELD(ID_AA64ZFR0, SHA3, 32, 4)
|
||||
FIELD(ID_AA64ZFR0, SM4, 40, 4)
|
||||
FIELD(ID_AA64ZFR0, I8MM, 44, 4)
|
||||
@ -2300,9 +2320,13 @@ FIELD(ID_AA64ZFR0, F32MM, 52, 4)
|
||||
FIELD(ID_AA64ZFR0, F64MM, 56, 4)
|
||||
|
||||
FIELD(ID_AA64SMFR0, F32F32, 32, 1)
|
||||
FIELD(ID_AA64SMFR0, BI32I32, 33, 1)
|
||||
FIELD(ID_AA64SMFR0, B16F32, 34, 1)
|
||||
FIELD(ID_AA64SMFR0, F16F32, 35, 1)
|
||||
FIELD(ID_AA64SMFR0, I8I32, 36, 4)
|
||||
FIELD(ID_AA64SMFR0, F16F16, 42, 1)
|
||||
FIELD(ID_AA64SMFR0, B16B16, 43, 1)
|
||||
FIELD(ID_AA64SMFR0, I16I32, 44, 4)
|
||||
FIELD(ID_AA64SMFR0, F64F64, 48, 1)
|
||||
FIELD(ID_AA64SMFR0, I16I64, 52, 4)
|
||||
FIELD(ID_AA64SMFR0, SMEVER, 56, 4)
|
||||
@ -3147,6 +3171,7 @@ FIELD(TBFLAG_A64, SVL, 24, 4)
|
||||
FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1)
|
||||
FIELD(TBFLAG_A64, FGT_ERET, 29, 1)
|
||||
FIELD(TBFLAG_A64, NAA, 30, 1)
|
||||
FIELD(TBFLAG_A64, ATA0, 31, 1)
|
||||
|
||||
/*
|
||||
* Helpers for using the above.
|
||||
@ -4065,6 +4090,11 @@ static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1;
|
||||
@ -4253,6 +4283,11 @@ static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id)
|
||||
return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_mops(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Feature tests for "does this exist in either 32-bit or 64-bit?"
|
||||
*/
|
||||
|
@ -5980,7 +5980,10 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
{
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
/* No features adding bits to HCRX are implemented. */
|
||||
/* FEAT_MOPS adds MSCEn and MCE2 */
|
||||
if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
|
||||
valid_mask |= HCRX_MSCEN | HCRX_MCE2;
|
||||
}
|
||||
|
||||
/* Clear RES0 bits. */
|
||||
env->cp15.hcrx_el2 = value & valid_mask;
|
||||
@ -6009,13 +6012,24 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env)
|
||||
{
|
||||
/*
|
||||
* The bits in this register behave as 0 for all purposes other than
|
||||
* direct reads of the register if:
|
||||
* - EL2 is not enabled in the current security state,
|
||||
* - SCR_EL3.HXEn is 0.
|
||||
* direct reads of the register if SCR_EL3.HXEn is 0.
|
||||
* If EL2 is not enabled in the current security state, then the
|
||||
* bit may behave as if 0, or as if 1, depending on the bit.
|
||||
* For the moment, we treat the EL2-disabled case as taking
|
||||
* priority over the HXEn-disabled case. This is true for the only
|
||||
* bit for a feature which we implement where the answer is different
|
||||
* for the two cases (MSCEn for FEAT_MOPS).
|
||||
* This may need to be revisited for future bits.
|
||||
*/
|
||||
if (!arm_is_el2_enabled(env)
|
||||
|| (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_HXEN))) {
|
||||
if (!arm_is_el2_enabled(env)) {
|
||||
uint64_t hcrx = 0;
|
||||
if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
|
||||
/* MSCEn behaves as 1 if EL2 is not enabled */
|
||||
hcrx |= HCRX_MSCEN;
|
||||
}
|
||||
return hcrx;
|
||||
}
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
|
||||
return 0;
|
||||
}
|
||||
return env->cp15.hcrx_el2;
|
||||
@ -8621,11 +8635,16 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
R_ID_AA64ZFR0_F64MM_MASK },
|
||||
{ .name = "ID_AA64SMFR0_EL1",
|
||||
.exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
|
||||
R_ID_AA64SMFR0_BI32I32_MASK |
|
||||
R_ID_AA64SMFR0_B16F32_MASK |
|
||||
R_ID_AA64SMFR0_F16F32_MASK |
|
||||
R_ID_AA64SMFR0_I8I32_MASK |
|
||||
R_ID_AA64SMFR0_F16F16_MASK |
|
||||
R_ID_AA64SMFR0_B16B16_MASK |
|
||||
R_ID_AA64SMFR0_I16I32_MASK |
|
||||
R_ID_AA64SMFR0_F64F64_MASK |
|
||||
R_ID_AA64SMFR0_I16I64_MASK |
|
||||
R_ID_AA64SMFR0_SMEVER_MASK |
|
||||
R_ID_AA64SMFR0_FA64_MASK },
|
||||
{ .name = "ID_AA64MMFR0_EL1",
|
||||
.exported_bits = R_ID_AA64MMFR0_ECV_MASK,
|
||||
@ -8676,7 +8695,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
.exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
|
||||
R_ID_AA64ISAR2_RPRES_MASK |
|
||||
R_ID_AA64ISAR2_GPA3_MASK |
|
||||
R_ID_AA64ISAR2_APA3_MASK },
|
||||
R_ID_AA64ISAR2_APA3_MASK |
|
||||
R_ID_AA64ISAR2_MOPS_MASK |
|
||||
R_ID_AA64ISAR2_BC_MASK |
|
||||
R_ID_AA64ISAR2_RPRFM_MASK |
|
||||
R_ID_AA64ISAR2_CSSC_MASK },
|
||||
{ .name = "ID_AA64ISAR*_EL1_RESERVED",
|
||||
.is_glob = true },
|
||||
};
|
||||
|
@ -1272,6 +1272,61 @@ FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */
|
||||
bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
|
||||
uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
|
||||
|
||||
/**
|
||||
* mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
|
||||
* @env: CPU env
|
||||
* @ptr: start address of memory region (dirty pointer)
|
||||
* @size: length of region (guaranteed not to cross a page boundary)
|
||||
* @desc: MTEDESC descriptor word (0 means no MTE checks)
|
||||
* Returns: the size of the region that can be copied without hitting
|
||||
* an MTE tag failure
|
||||
*
|
||||
* Note that we assume that the caller has already checked the TBI
|
||||
* and TCMA bits with mte_checks_needed() and an MTE check is definitely
|
||||
* required.
|
||||
*/
|
||||
uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t desc);
|
||||
|
||||
/**
|
||||
* mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
|
||||
* operation going in the reverse direction
|
||||
* @env: CPU env
|
||||
* @ptr: *end* address of memory region (dirty pointer)
|
||||
* @size: length of region (guaranteed not to cross a page boundary)
|
||||
* @desc: MTEDESC descriptor word (0 means no MTE checks)
|
||||
* Returns: the size of the region that can be copied without hitting
|
||||
* an MTE tag failure
|
||||
*
|
||||
* Note that we assume that the caller has already checked the TBI
|
||||
* and TCMA bits with mte_checks_needed() and an MTE check is definitely
|
||||
* required.
|
||||
*/
|
||||
uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t desc);
|
||||
|
||||
/**
|
||||
* mte_check_fail: Record an MTE tag check failure
|
||||
* @env: CPU env
|
||||
* @desc: MTEDESC descriptor word
|
||||
* @dirty_ptr: Failing dirty address
|
||||
* @ra: TCG retaddr
|
||||
*
|
||||
* This may never return (if the MTE tag checks are configured to fault).
|
||||
*/
|
||||
void mte_check_fail(CPUARMState *env, uint32_t desc,
|
||||
uint64_t dirty_ptr, uintptr_t ra);
|
||||
|
||||
/**
|
||||
* mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
|
||||
* @env: CPU env
|
||||
* @dirty_ptr: Start address of memory region (dirty pointer)
|
||||
* @size: length of region (guaranteed not to cross page boundary)
|
||||
* @desc: MTEDESC descriptor word
|
||||
*/
|
||||
void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
|
||||
uint32_t desc);
|
||||
|
||||
static inline int allocation_tag_from_addr(uint64_t ptr)
|
||||
{
|
||||
return extract64(ptr, 56, 4);
|
||||
|
@ -58,6 +58,7 @@ enum arm_exception_class {
|
||||
EC_DATAABORT = 0x24,
|
||||
EC_DATAABORT_SAME_EL = 0x25,
|
||||
EC_SPALIGNMENT = 0x26,
|
||||
EC_MOP = 0x27,
|
||||
EC_AA32_FPTRAP = 0x28,
|
||||
EC_AA64_FPTRAP = 0x2c,
|
||||
EC_SERROR = 0x2f,
|
||||
@ -334,4 +335,15 @@ static inline uint32_t syn_serror(uint32_t extra)
|
||||
return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
|
||||
}
|
||||
|
||||
static inline uint32_t syn_mop(bool is_set, bool is_setg, int options,
|
||||
bool epilogue, bool wrong_option, bool option_a,
|
||||
int destreg, int srcreg, int sizereg)
|
||||
{
|
||||
return (EC_MOP << ARM_EL_EC_SHIFT) | ARM_EL_IL |
|
||||
(is_set << 24) | (is_setg << 23) | (options << 19) |
|
||||
(epilogue << 18) | (wrong_option << 17) | (option_a << 16) |
|
||||
(destreg << 10) | (srcreg << 5) | sizereg;
|
||||
}
|
||||
|
||||
|
||||
#endif /* TARGET_ARM_SYNDROME_H */
|
||||
|
@ -126,7 +126,8 @@ CBZ sf:1 011010 nz:1 ................... rt:5 &cbz imm=%imm19
|
||||
|
||||
TBZ . 011011 nz:1 ..... .............. rt:5 &tbz imm=%imm14 bitpos=%imm31_19
|
||||
|
||||
B_cond 0101010 0 ................... 0 cond:4 imm=%imm19
|
||||
# B.cond and BC.cond
|
||||
B_cond 0101010 0 ................... c:1 cond:4 imm=%imm19
|
||||
|
||||
BR 1101011 0000 11111 000000 rn:5 00000 &r
|
||||
BLR 1101011 0001 11111 000000 rn:5 00000 &r
|
||||
@ -553,3 +554,38 @@ LDGM 11011001 11 1 ......... 00 ..... ..... @ldst_tag_mult p=0 w=0
|
||||
STZ2G 11011001 11 1 ......... 01 ..... ..... @ldst_tag p=1 w=1
|
||||
STZ2G 11011001 11 1 ......... 10 ..... ..... @ldst_tag p=0 w=0
|
||||
STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
|
||||
|
||||
# Memory operations (memset, memcpy, memmove)
|
||||
# Each of these comes in a set of three, eg SETP (prologue), SETM (main),
|
||||
# SETE (epilogue), and each of those has different flavours to
|
||||
# indicate whether memory accesses should be unpriv or non-temporal.
|
||||
# We don't distinguish temporal and non-temporal accesses, but we
|
||||
# do need to report it in syndrome register values.
|
||||
|
||||
# Memset
|
||||
&set rs rn rd unpriv nontemp
|
||||
# op2 bit 1 is nontemporal bit
|
||||
@set .. ......... rs:5 .. nontemp:1 unpriv:1 .. rn:5 rd:5 &set
|
||||
|
||||
SETP 00 011001110 ..... 00 . . 01 ..... ..... @set
|
||||
SETM 00 011001110 ..... 01 . . 01 ..... ..... @set
|
||||
SETE 00 011001110 ..... 10 . . 01 ..... ..... @set
|
||||
|
||||
# Like SET, but also setting MTE tags
|
||||
SETGP 00 011101110 ..... 00 . . 01 ..... ..... @set
|
||||
SETGM 00 011101110 ..... 01 . . 01 ..... ..... @set
|
||||
SETGE 00 011101110 ..... 10 . . 01 ..... ..... @set
|
||||
|
||||
# Memmove/Memcopy: the CPY insns allow overlapping src/dest and
|
||||
# copy in the correct direction; the CPYF insns always copy forwards.
|
||||
#
|
||||
# options has the nontemporal and unpriv bits for src and dest
|
||||
&cpy rs rn rd options
|
||||
@cpy .. ... . ..... rs:5 options:4 .. rn:5 rd:5 &cpy
|
||||
|
||||
CPYFP 00 011 0 01000 ..... .... 01 ..... ..... @cpy
|
||||
CPYFM 00 011 0 01010 ..... .... 01 ..... ..... @cpy
|
||||
CPYFE 00 011 0 01100 ..... .... 01 ..... ..... @cpy
|
||||
CPYP 00 011 1 01000 ..... .... 01 ..... ..... @cpy
|
||||
CPYM 00 011 1 01010 ..... .... 01 ..... ..... @cpy
|
||||
CPYE 00 011 1 01100 ..... .... 01 ..... ..... @cpy
|
||||
|
@ -1027,6 +1027,11 @@ void aarch64_max_tcg_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
|
||||
cpu->isar.id_aa64isar1 = t;
|
||||
|
||||
t = cpu->isar.id_aa64isar2;
|
||||
t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */
|
||||
t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */
|
||||
cpu->isar.id_aa64isar2 = t;
|
||||
|
||||
t = cpu->isar.id_aa64pfr0;
|
||||
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
|
||||
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
|
||||
|
@ -968,3 +968,881 @@ void HELPER(unaligned_access)(CPUARMState *env, uint64_t addr,
|
||||
arm_cpu_do_unaligned_access(env_cpu(env), addr, access_type,
|
||||
mmu_idx, GETPC());
|
||||
}
|
||||
|
||||
/* Memory operations (memset, memmove, memcpy) */
|
||||
|
||||
/*
|
||||
* Return true if the CPY* and SET* insns can execute; compare
|
||||
* pseudocode CheckMOPSEnabled(), though we refactor it a little.
|
||||
*/
|
||||
static bool mops_enabled(CPUARMState *env)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
|
||||
if (el < 2 &&
|
||||
(arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
|
||||
!(arm_hcrx_el2_eff(env) & HCRX_MSCEN)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (el == 0) {
|
||||
if (!el_is_in_host(env, 0)) {
|
||||
return env->cp15.sctlr_el[1] & SCTLR_MSCEN;
|
||||
} else {
|
||||
return env->cp15.sctlr_el[2] & SCTLR_MSCEN;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void check_mops_enabled(CPUARMState *env, uintptr_t ra)
|
||||
{
|
||||
if (!mops_enabled(env)) {
|
||||
raise_exception_ra(env, EXCP_UDEF, syn_uncategorized(),
|
||||
exception_target_el(env), ra);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the target exception level for an exception due
|
||||
* to mismatched arguments in a FEAT_MOPS copy or set.
|
||||
* Compare pseudocode MismatchedCpySetTargetEL()
|
||||
*/
|
||||
static int mops_mismatch_exception_target_el(CPUARMState *env)
|
||||
{
|
||||
int el = arm_current_el(env);
|
||||
|
||||
if (el > 1) {
|
||||
return el;
|
||||
}
|
||||
if (el == 0 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
|
||||
return 2;
|
||||
}
|
||||
if (el == 1 && (arm_hcrx_el2_eff(env) & HCRX_MCE2)) {
|
||||
return 2;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether an M or E instruction was executed with a CF value
|
||||
* indicating the wrong option for this implementation.
|
||||
* Assumes we are always Option A.
|
||||
*/
|
||||
static void check_mops_wrong_option(CPUARMState *env, uint32_t syndrome,
|
||||
uintptr_t ra)
|
||||
{
|
||||
if (env->CF != 0) {
|
||||
syndrome |= 1 << 17; /* Set the wrong-option bit */
|
||||
raise_exception_ra(env, EXCP_UDEF, syndrome,
|
||||
mops_mismatch_exception_target_el(env), ra);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the maximum number of bytes we can transfer starting at addr
|
||||
* without crossing a page boundary.
|
||||
*/
|
||||
static uint64_t page_limit(uint64_t addr)
|
||||
{
|
||||
return TARGET_PAGE_ALIGN(addr + 1) - addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the number of bytes we can copy starting from addr and working
|
||||
* backwards without crossing a page boundary.
|
||||
*/
|
||||
static uint64_t page_limit_rev(uint64_t addr)
|
||||
{
|
||||
return (addr & ~TARGET_PAGE_MASK) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform part of a memory set on an area of guest memory starting at
|
||||
* toaddr (a dirty address) and extending for setsize bytes.
|
||||
*
|
||||
* Returns the number of bytes actually set, which might be less than
|
||||
* setsize; the caller should loop until the whole set has been done.
|
||||
* The caller should ensure that the guest registers are correct
|
||||
* for the possibility that the first byte of the set encounters
|
||||
* an exception or watchpoint. We guarantee not to take any faults
|
||||
* for bytes other than the first.
|
||||
*/
|
||||
static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
|
||||
uint64_t setsize, uint32_t data, int memidx,
|
||||
uint32_t *mtedesc, uintptr_t ra)
|
||||
{
|
||||
void *mem;
|
||||
|
||||
setsize = MIN(setsize, page_limit(toaddr));
|
||||
if (*mtedesc) {
|
||||
uint64_t mtesize = mte_mops_probe(env, toaddr, setsize, *mtedesc);
|
||||
if (mtesize == 0) {
|
||||
/* Trap, or not. All CPU state is up to date */
|
||||
mte_check_fail(env, *mtedesc, toaddr, ra);
|
||||
/* Continue, with no further MTE checks required */
|
||||
*mtedesc = 0;
|
||||
} else {
|
||||
/* Advance to the end, or to the tag mismatch */
|
||||
setsize = MIN(setsize, mtesize);
|
||||
}
|
||||
}
|
||||
|
||||
toaddr = useronly_clean_ptr(toaddr);
|
||||
/*
|
||||
* Trapless lookup: returns NULL for invalid page, I/O,
|
||||
* watchpoints, clean pages, etc.
|
||||
*/
|
||||
mem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, memidx);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (unlikely(!mem)) {
|
||||
/*
|
||||
* Slow-path: just do one byte write. This will handle the
|
||||
* watchpoint, invalid page, etc handling correctly.
|
||||
* For clean code pages, the next iteration will see
|
||||
* the page dirty and will use the fast path.
|
||||
*/
|
||||
cpu_stb_mmuidx_ra(env, toaddr, data, memidx, ra);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
/* Easy case: just memset the host memory */
|
||||
memset(mem, data, setsize);
|
||||
return setsize;
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar, but setting tags. The architecture requires us to do this
|
||||
* in 16-byte chunks. SETP accesses are not tag checked; they set
|
||||
* the tags.
|
||||
*/
|
||||
static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
|
||||
uint64_t setsize, uint32_t data, int memidx,
|
||||
uint32_t *mtedesc, uintptr_t ra)
|
||||
{
|
||||
void *mem;
|
||||
uint64_t cleanaddr;
|
||||
|
||||
setsize = MIN(setsize, page_limit(toaddr));
|
||||
|
||||
cleanaddr = useronly_clean_ptr(toaddr);
|
||||
/*
|
||||
* Trapless lookup: returns NULL for invalid page, I/O,
|
||||
* watchpoints, clean pages, etc.
|
||||
*/
|
||||
mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (unlikely(!mem)) {
|
||||
/*
|
||||
* Slow-path: just do one write. This will handle the
|
||||
* watchpoint, invalid page, etc handling correctly.
|
||||
* The architecture requires that we do 16 bytes at a time,
|
||||
* and we know both ptr and size are 16 byte aligned.
|
||||
* For clean code pages, the next iteration will see
|
||||
* the page dirty and will use the fast path.
|
||||
*/
|
||||
uint64_t repldata = data * 0x0101010101010101ULL;
|
||||
MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);
|
||||
cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra);
|
||||
mte_mops_set_tags(env, toaddr, 16, *mtedesc);
|
||||
return 16;
|
||||
}
|
||||
#endif
|
||||
/* Easy case: just memset the host memory */
|
||||
memset(mem, data, setsize);
|
||||
mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
|
||||
return setsize;
|
||||
}
|
||||
|
||||
typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr,
|
||||
uint64_t setsize, uint32_t data,
|
||||
int memidx, uint32_t *mtedesc, uintptr_t ra);
|
||||
|
||||
/* Extract register numbers from a MOPS exception syndrome value */
|
||||
static int mops_destreg(uint32_t syndrome)
|
||||
{
|
||||
return extract32(syndrome, 10, 5);
|
||||
}
|
||||
|
||||
static int mops_srcreg(uint32_t syndrome)
|
||||
{
|
||||
return extract32(syndrome, 5, 5);
|
||||
}
|
||||
|
||||
static int mops_sizereg(uint32_t syndrome)
|
||||
{
|
||||
return extract32(syndrome, 0, 5);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if TCMA and TBI bits mean we need to do MTE checks.
|
||||
* We only need to do this once per MOPS insn, not for every page.
|
||||
*/
|
||||
static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
|
||||
{
|
||||
int bit55 = extract64(ptr, 55, 1);
|
||||
|
||||
/*
|
||||
* Note that tbi_check() returns true for "access checked" but
|
||||
* tcma_check() returns true for "access unchecked".
|
||||
*/
|
||||
if (!tbi_check(desc, bit55)) {
|
||||
return false;
|
||||
}
|
||||
return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
|
||||
}
|
||||
|
||||
/* Take an exception if the SETG addr/size are not granule aligned */
|
||||
static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t memidx, uintptr_t ra)
|
||||
{
|
||||
if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) ||
|
||||
!QEMU_IS_ALIGNED(size, TAG_GRANULE)) {
|
||||
arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
|
||||
memidx, ra);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For the Memory Set operation, our implementation chooses
|
||||
* always to use "option A", where we update Xd to the final
|
||||
* address in the SETP insn, and set Xn to be -(bytes remaining).
|
||||
* On SETM and SETE insns we only need update Xn.
|
||||
*
|
||||
* @env: CPU
|
||||
* @syndrome: syndrome value for mismatch exceptions
|
||||
* (also contains the register numbers we need to use)
|
||||
* @mtedesc: MTE descriptor word
|
||||
* @stepfn: function which does a single part of the set operation
|
||||
* @is_setg: true if this is the tag-setting SETG variant
|
||||
*/
|
||||
static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
|
||||
StepFn *stepfn, bool is_setg, uintptr_t ra)
|
||||
{
|
||||
/* Prologue: we choose to do up to the next page boundary */
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint8_t data = env->xregs[rs];
|
||||
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
|
||||
uint64_t toaddr = env->xregs[rd];
|
||||
uint64_t setsize = env->xregs[rn];
|
||||
uint64_t stagesetsize, step;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
if (setsize > INT64_MAX) {
|
||||
setsize = INT64_MAX;
|
||||
if (is_setg) {
|
||||
setsize &= ~0xf;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(is_setg)) {
|
||||
check_setg_alignment(env, toaddr, setsize, memidx, ra);
|
||||
} else if (!mte_checks_needed(toaddr, mtedesc)) {
|
||||
mtedesc = 0;
|
||||
}
|
||||
|
||||
stagesetsize = MIN(setsize, page_limit(toaddr));
|
||||
while (stagesetsize) {
|
||||
env->xregs[rd] = toaddr;
|
||||
env->xregs[rn] = setsize;
|
||||
step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
|
||||
toaddr += step;
|
||||
setsize -= step;
|
||||
stagesetsize -= step;
|
||||
}
|
||||
/* Insn completed, so update registers to the Option A format */
|
||||
env->xregs[rd] = toaddr + setsize;
|
||||
env->xregs[rn] = -setsize;
|
||||
|
||||
/* Set NZCV = 0000 to indicate we are an Option A implementation */
|
||||
env->NF = 0;
|
||||
env->ZF = 1; /* our env->ZF encoding is inverted */
|
||||
env->CF = 0;
|
||||
env->VF = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_setp(env, syndrome, mtedesc, set_step, false, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(setgp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_setp(env, syndrome, mtedesc, set_step_tags, true, GETPC());
|
||||
}
|
||||
|
||||
static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
|
||||
StepFn *stepfn, bool is_setg, uintptr_t ra)
|
||||
{
|
||||
/* Main: we choose to do all the full-page chunks */
|
||||
CPUState *cs = env_cpu(env);
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint8_t data = env->xregs[rs];
|
||||
uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
|
||||
uint64_t setsize = -env->xregs[rn];
|
||||
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
|
||||
uint64_t step, stagesetsize;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
/*
|
||||
* We're allowed to NOP out "no data to copy" before the consistency
|
||||
* checks; we choose to do so.
|
||||
*/
|
||||
if (env->xregs[rn] == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
check_mops_wrong_option(env, syndrome, ra);
|
||||
|
||||
/*
|
||||
* Our implementation will work fine even if we have an unaligned
|
||||
* destination address, and because we update Xn every time around
|
||||
* the loop below and the return value from stepfn() may be less
|
||||
* than requested, we might find toaddr is unaligned. So we don't
|
||||
* have an IMPDEF check for alignment here.
|
||||
*/
|
||||
|
||||
if (unlikely(is_setg)) {
|
||||
check_setg_alignment(env, toaddr, setsize, memidx, ra);
|
||||
} else if (!mte_checks_needed(toaddr, mtedesc)) {
|
||||
mtedesc = 0;
|
||||
}
|
||||
|
||||
/* Do the actual memset: we leave the last partial page to SETE */
|
||||
stagesetsize = setsize & TARGET_PAGE_MASK;
|
||||
while (stagesetsize > 0) {
|
||||
step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
|
||||
toaddr += step;
|
||||
setsize -= step;
|
||||
stagesetsize -= step;
|
||||
env->xregs[rn] = -setsize;
|
||||
if (stagesetsize > 0 && unlikely(cpu_loop_exit_requested(cs))) {
|
||||
cpu_loop_exit_restore(cs, ra);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_setm(env, syndrome, mtedesc, set_step, false, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(setgm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_setm(env, syndrome, mtedesc, set_step_tags, true, GETPC());
|
||||
}
|
||||
|
||||
static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
|
||||
StepFn *stepfn, bool is_setg, uintptr_t ra)
|
||||
{
|
||||
/* Epilogue: do the last partial page */
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint8_t data = env->xregs[rs];
|
||||
uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
|
||||
uint64_t setsize = -env->xregs[rn];
|
||||
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
|
||||
uint64_t step;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
/*
|
||||
* We're allowed to NOP out "no data to copy" before the consistency
|
||||
* checks; we choose to do so.
|
||||
*/
|
||||
if (setsize == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
check_mops_wrong_option(env, syndrome, ra);
|
||||
|
||||
/*
|
||||
* Our implementation has no address alignment requirements, but
|
||||
* we do want to enforce the "less than a page" size requirement,
|
||||
* so we don't need to have the "check for interrupts" here.
|
||||
*/
|
||||
if (setsize >= TARGET_PAGE_SIZE) {
|
||||
raise_exception_ra(env, EXCP_UDEF, syndrome,
|
||||
mops_mismatch_exception_target_el(env), ra);
|
||||
}
|
||||
|
||||
if (unlikely(is_setg)) {
|
||||
check_setg_alignment(env, toaddr, setsize, memidx, ra);
|
||||
} else if (!mte_checks_needed(toaddr, mtedesc)) {
|
||||
mtedesc = 0;
|
||||
}
|
||||
|
||||
/* Do the actual memset */
|
||||
while (setsize > 0) {
|
||||
step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
|
||||
toaddr += step;
|
||||
setsize -= step;
|
||||
env->xregs[rn] = -setsize;
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_sete(env, syndrome, mtedesc, set_step, false, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
|
||||
{
|
||||
do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform part of a memory copy from the guest memory at fromaddr
|
||||
* and extending for copysize bytes, to the guest memory at
|
||||
* toaddr. Both addreses are dirty.
|
||||
*
|
||||
* Returns the number of bytes actually set, which might be less than
|
||||
* copysize; the caller should loop until the whole copy has been done.
|
||||
* The caller should ensure that the guest registers are correct
|
||||
* for the possibility that the first byte of the copy encounters
|
||||
* an exception or watchpoint. We guarantee not to take any faults
|
||||
* for bytes other than the first.
|
||||
*/
|
||||
static uint64_t copy_step(CPUARMState *env, uint64_t toaddr, uint64_t fromaddr,
|
||||
uint64_t copysize, int wmemidx, int rmemidx,
|
||||
uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra)
|
||||
{
|
||||
void *rmem;
|
||||
void *wmem;
|
||||
|
||||
/* Don't cross a page boundary on either source or destination */
|
||||
copysize = MIN(copysize, page_limit(toaddr));
|
||||
copysize = MIN(copysize, page_limit(fromaddr));
|
||||
/*
|
||||
* Handle MTE tag checks: either handle the tag mismatch for byte 0,
|
||||
* or else copy up to but not including the byte with the mismatch.
|
||||
*/
|
||||
if (*rdesc) {
|
||||
uint64_t mtesize = mte_mops_probe(env, fromaddr, copysize, *rdesc);
|
||||
if (mtesize == 0) {
|
||||
mte_check_fail(env, *rdesc, fromaddr, ra);
|
||||
*rdesc = 0;
|
||||
} else {
|
||||
copysize = MIN(copysize, mtesize);
|
||||
}
|
||||
}
|
||||
if (*wdesc) {
|
||||
uint64_t mtesize = mte_mops_probe(env, toaddr, copysize, *wdesc);
|
||||
if (mtesize == 0) {
|
||||
mte_check_fail(env, *wdesc, toaddr, ra);
|
||||
*wdesc = 0;
|
||||
} else {
|
||||
copysize = MIN(copysize, mtesize);
|
||||
}
|
||||
}
|
||||
|
||||
toaddr = useronly_clean_ptr(toaddr);
|
||||
fromaddr = useronly_clean_ptr(fromaddr);
|
||||
/* Trapless lookup of whether we can get a host memory pointer */
|
||||
wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx);
|
||||
rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* If we don't have host memory for both source and dest then just
|
||||
* do a single byte copy. This will handle watchpoints, invalid pages,
|
||||
* etc correctly. For clean code pages, the next iteration will see
|
||||
* the page dirty and will use the fast path.
|
||||
*/
|
||||
if (unlikely(!rmem || !wmem)) {
|
||||
uint8_t byte;
|
||||
if (rmem) {
|
||||
byte = *(uint8_t *)rmem;
|
||||
} else {
|
||||
byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra);
|
||||
}
|
||||
if (wmem) {
|
||||
*(uint8_t *)wmem = byte;
|
||||
} else {
|
||||
cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
/* Easy case: just memmove the host memory */
|
||||
memmove(wmem, rmem, copysize);
|
||||
return copysize;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do part of a backwards memory copy. Here toaddr and fromaddr point
|
||||
* to the *last* byte to be copied.
|
||||
*/
|
||||
static uint64_t copy_step_rev(CPUARMState *env, uint64_t toaddr,
|
||||
uint64_t fromaddr,
|
||||
uint64_t copysize, int wmemidx, int rmemidx,
|
||||
uint32_t *wdesc, uint32_t *rdesc, uintptr_t ra)
|
||||
{
|
||||
void *rmem;
|
||||
void *wmem;
|
||||
|
||||
/* Don't cross a page boundary on either source or destination */
|
||||
copysize = MIN(copysize, page_limit_rev(toaddr));
|
||||
copysize = MIN(copysize, page_limit_rev(fromaddr));
|
||||
|
||||
/*
|
||||
* Handle MTE tag checks: either handle the tag mismatch for byte 0,
|
||||
* or else copy up to but not including the byte with the mismatch.
|
||||
*/
|
||||
if (*rdesc) {
|
||||
uint64_t mtesize = mte_mops_probe_rev(env, fromaddr, copysize, *rdesc);
|
||||
if (mtesize == 0) {
|
||||
mte_check_fail(env, *rdesc, fromaddr, ra);
|
||||
*rdesc = 0;
|
||||
} else {
|
||||
copysize = MIN(copysize, mtesize);
|
||||
}
|
||||
}
|
||||
if (*wdesc) {
|
||||
uint64_t mtesize = mte_mops_probe_rev(env, toaddr, copysize, *wdesc);
|
||||
if (mtesize == 0) {
|
||||
mte_check_fail(env, *wdesc, toaddr, ra);
|
||||
*wdesc = 0;
|
||||
} else {
|
||||
copysize = MIN(copysize, mtesize);
|
||||
}
|
||||
}
|
||||
|
||||
toaddr = useronly_clean_ptr(toaddr);
|
||||
fromaddr = useronly_clean_ptr(fromaddr);
|
||||
/* Trapless lookup of whether we can get a host memory pointer */
|
||||
wmem = tlb_vaddr_to_host(env, toaddr, MMU_DATA_STORE, wmemidx);
|
||||
rmem = tlb_vaddr_to_host(env, fromaddr, MMU_DATA_LOAD, rmemidx);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* If we don't have host memory for both source and dest then just
|
||||
* do a single byte copy. This will handle watchpoints, invalid pages,
|
||||
* etc correctly. For clean code pages, the next iteration will see
|
||||
* the page dirty and will use the fast path.
|
||||
*/
|
||||
if (unlikely(!rmem || !wmem)) {
|
||||
uint8_t byte;
|
||||
if (rmem) {
|
||||
byte = *(uint8_t *)rmem;
|
||||
} else {
|
||||
byte = cpu_ldub_mmuidx_ra(env, fromaddr, rmemidx, ra);
|
||||
}
|
||||
if (wmem) {
|
||||
*(uint8_t *)wmem = byte;
|
||||
} else {
|
||||
cpu_stb_mmuidx_ra(env, toaddr, byte, wmemidx, ra);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Easy case: just memmove the host memory. Note that wmem and
|
||||
* rmem here point to the *last* byte to copy.
|
||||
*/
|
||||
memmove(wmem - (copysize - 1), rmem - (copysize - 1), copysize);
|
||||
return copysize;
|
||||
}
|
||||
|
||||
/*
|
||||
* for the Memory Copy operation, our implementation chooses always
|
||||
* to use "option A", where we update Xd and Xs to the final addresses
|
||||
* in the CPYP insn, and then in CPYM and CPYE only need to update Xn.
|
||||
*
|
||||
* @env: CPU
|
||||
* @syndrome: syndrome value for mismatch exceptions
|
||||
* (also contains the register numbers we need to use)
|
||||
* @wdesc: MTE descriptor for the writes (destination)
|
||||
* @rdesc: MTE descriptor for the reads (source)
|
||||
* @move: true if this is CPY (memmove), false for CPYF (memcpy forwards)
|
||||
*/
|
||||
static void do_cpyp(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc, uint32_t move, uintptr_t ra)
|
||||
{
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
|
||||
uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
|
||||
bool forwards = true;
|
||||
uint64_t toaddr = env->xregs[rd];
|
||||
uint64_t fromaddr = env->xregs[rs];
|
||||
uint64_t copysize = env->xregs[rn];
|
||||
uint64_t stagecopysize, step;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
|
||||
if (move) {
|
||||
/*
|
||||
* Copy backwards if necessary. The direction for a non-overlapping
|
||||
* copy is IMPDEF; we choose forwards.
|
||||
*/
|
||||
if (copysize > 0x007FFFFFFFFFFFFFULL) {
|
||||
copysize = 0x007FFFFFFFFFFFFFULL;
|
||||
}
|
||||
uint64_t fs = extract64(fromaddr, 0, 56);
|
||||
uint64_t ts = extract64(toaddr, 0, 56);
|
||||
uint64_t fe = extract64(fromaddr + copysize, 0, 56);
|
||||
|
||||
if (fs < ts && fe > ts) {
|
||||
forwards = false;
|
||||
}
|
||||
} else {
|
||||
if (copysize > INT64_MAX) {
|
||||
copysize = INT64_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mte_checks_needed(fromaddr, rdesc)) {
|
||||
rdesc = 0;
|
||||
}
|
||||
if (!mte_checks_needed(toaddr, wdesc)) {
|
||||
wdesc = 0;
|
||||
}
|
||||
|
||||
if (forwards) {
|
||||
stagecopysize = MIN(copysize, page_limit(toaddr));
|
||||
stagecopysize = MIN(stagecopysize, page_limit(fromaddr));
|
||||
while (stagecopysize) {
|
||||
env->xregs[rd] = toaddr;
|
||||
env->xregs[rs] = fromaddr;
|
||||
env->xregs[rn] = copysize;
|
||||
step = copy_step(env, toaddr, fromaddr, stagecopysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
toaddr += step;
|
||||
fromaddr += step;
|
||||
copysize -= step;
|
||||
stagecopysize -= step;
|
||||
}
|
||||
/* Insn completed, so update registers to the Option A format */
|
||||
env->xregs[rd] = toaddr + copysize;
|
||||
env->xregs[rs] = fromaddr + copysize;
|
||||
env->xregs[rn] = -copysize;
|
||||
} else {
|
||||
/*
|
||||
* In a reverse copy the to and from addrs in Xs and Xd are the start
|
||||
* of the range, but it's more convenient for us to work with pointers
|
||||
* to the last byte being copied.
|
||||
*/
|
||||
toaddr += copysize - 1;
|
||||
fromaddr += copysize - 1;
|
||||
stagecopysize = MIN(copysize, page_limit_rev(toaddr));
|
||||
stagecopysize = MIN(stagecopysize, page_limit_rev(fromaddr));
|
||||
while (stagecopysize) {
|
||||
env->xregs[rn] = copysize;
|
||||
step = copy_step_rev(env, toaddr, fromaddr, stagecopysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
copysize -= step;
|
||||
stagecopysize -= step;
|
||||
toaddr -= step;
|
||||
fromaddr -= step;
|
||||
}
|
||||
/*
|
||||
* Insn completed, so update registers to the Option A format.
|
||||
* For a reverse copy this is no different to the CPYP input format.
|
||||
*/
|
||||
env->xregs[rn] = copysize;
|
||||
}
|
||||
|
||||
/* Set NZCV = 0000 to indicate we are an Option A implementation */
|
||||
env->NF = 0;
|
||||
env->ZF = 1; /* our env->ZF encoding is inverted */
|
||||
env->CF = 0;
|
||||
env->VF = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
void HELPER(cpyp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpyp(env, syndrome, wdesc, rdesc, true, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(cpyfp)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpyp(env, syndrome, wdesc, rdesc, false, GETPC());
|
||||
}
|
||||
|
||||
static void do_cpym(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc, uint32_t move, uintptr_t ra)
|
||||
{
|
||||
/* Main: we choose to copy until less than a page remaining */
|
||||
CPUState *cs = env_cpu(env);
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
|
||||
uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
|
||||
bool forwards = true;
|
||||
uint64_t toaddr, fromaddr, copysize, step;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
/* We choose to NOP out "no data to copy" before consistency checks */
|
||||
if (env->xregs[rn] == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
check_mops_wrong_option(env, syndrome, ra);
|
||||
|
||||
if (move) {
|
||||
forwards = (int64_t)env->xregs[rn] < 0;
|
||||
}
|
||||
|
||||
if (forwards) {
|
||||
toaddr = env->xregs[rd] + env->xregs[rn];
|
||||
fromaddr = env->xregs[rs] + env->xregs[rn];
|
||||
copysize = -env->xregs[rn];
|
||||
} else {
|
||||
copysize = env->xregs[rn];
|
||||
/* This toaddr and fromaddr point to the *last* byte to copy */
|
||||
toaddr = env->xregs[rd] + copysize - 1;
|
||||
fromaddr = env->xregs[rs] + copysize - 1;
|
||||
}
|
||||
|
||||
if (!mte_checks_needed(fromaddr, rdesc)) {
|
||||
rdesc = 0;
|
||||
}
|
||||
if (!mte_checks_needed(toaddr, wdesc)) {
|
||||
wdesc = 0;
|
||||
}
|
||||
|
||||
/* Our implementation has no particular parameter requirements for CPYM */
|
||||
|
||||
/* Do the actual memmove */
|
||||
if (forwards) {
|
||||
while (copysize >= TARGET_PAGE_SIZE) {
|
||||
step = copy_step(env, toaddr, fromaddr, copysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
toaddr += step;
|
||||
fromaddr += step;
|
||||
copysize -= step;
|
||||
env->xregs[rn] = -copysize;
|
||||
if (copysize >= TARGET_PAGE_SIZE &&
|
||||
unlikely(cpu_loop_exit_requested(cs))) {
|
||||
cpu_loop_exit_restore(cs, ra);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (copysize >= TARGET_PAGE_SIZE) {
|
||||
step = copy_step_rev(env, toaddr, fromaddr, copysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
toaddr -= step;
|
||||
fromaddr -= step;
|
||||
copysize -= step;
|
||||
env->xregs[rn] = copysize;
|
||||
if (copysize >= TARGET_PAGE_SIZE &&
|
||||
unlikely(cpu_loop_exit_requested(cs))) {
|
||||
cpu_loop_exit_restore(cs, ra);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(cpym)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpym(env, syndrome, wdesc, rdesc, true, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(cpyfm)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpym(env, syndrome, wdesc, rdesc, false, GETPC());
|
||||
}
|
||||
|
||||
static void do_cpye(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc, uint32_t move, uintptr_t ra)
|
||||
{
|
||||
/* Epilogue: do the last partial page */
|
||||
int rd = mops_destreg(syndrome);
|
||||
int rs = mops_srcreg(syndrome);
|
||||
int rn = mops_sizereg(syndrome);
|
||||
uint32_t rmemidx = FIELD_EX32(rdesc, MTEDESC, MIDX);
|
||||
uint32_t wmemidx = FIELD_EX32(wdesc, MTEDESC, MIDX);
|
||||
bool forwards = true;
|
||||
uint64_t toaddr, fromaddr, copysize, step;
|
||||
|
||||
check_mops_enabled(env, ra);
|
||||
|
||||
/* We choose to NOP out "no data to copy" before consistency checks */
|
||||
if (env->xregs[rn] == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
check_mops_wrong_option(env, syndrome, ra);
|
||||
|
||||
if (move) {
|
||||
forwards = (int64_t)env->xregs[rn] < 0;
|
||||
}
|
||||
|
||||
if (forwards) {
|
||||
toaddr = env->xregs[rd] + env->xregs[rn];
|
||||
fromaddr = env->xregs[rs] + env->xregs[rn];
|
||||
copysize = -env->xregs[rn];
|
||||
} else {
|
||||
copysize = env->xregs[rn];
|
||||
/* This toaddr and fromaddr point to the *last* byte to copy */
|
||||
toaddr = env->xregs[rd] + copysize - 1;
|
||||
fromaddr = env->xregs[rs] + copysize - 1;
|
||||
}
|
||||
|
||||
if (!mte_checks_needed(fromaddr, rdesc)) {
|
||||
rdesc = 0;
|
||||
}
|
||||
if (!mte_checks_needed(toaddr, wdesc)) {
|
||||
wdesc = 0;
|
||||
}
|
||||
|
||||
/* Check the size; we don't want to have do a check-for-interrupts */
|
||||
if (copysize >= TARGET_PAGE_SIZE) {
|
||||
raise_exception_ra(env, EXCP_UDEF, syndrome,
|
||||
mops_mismatch_exception_target_el(env), ra);
|
||||
}
|
||||
|
||||
/* Do the actual memmove */
|
||||
if (forwards) {
|
||||
while (copysize > 0) {
|
||||
step = copy_step(env, toaddr, fromaddr, copysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
toaddr += step;
|
||||
fromaddr += step;
|
||||
copysize -= step;
|
||||
env->xregs[rn] = -copysize;
|
||||
}
|
||||
} else {
|
||||
while (copysize > 0) {
|
||||
step = copy_step_rev(env, toaddr, fromaddr, copysize,
|
||||
wmemidx, rmemidx, &wdesc, &rdesc, ra);
|
||||
toaddr -= step;
|
||||
fromaddr -= step;
|
||||
copysize -= step;
|
||||
env->xregs[rn] = copysize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HELPER(cpye)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpye(env, syndrome, wdesc, rdesc, true, GETPC());
|
||||
}
|
||||
|
||||
void HELPER(cpyfe)(CPUARMState *env, uint32_t syndrome, uint32_t wdesc,
|
||||
uint32_t rdesc)
|
||||
{
|
||||
do_cpye(env, syndrome, wdesc, rdesc, false, GETPC());
|
||||
}
|
||||
|
@ -117,3 +117,17 @@ DEF_HELPER_FLAGS_3(stzgm_tags, TCG_CALL_NO_WG, void, env, i64, i64)
|
||||
|
||||
DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG,
|
||||
noreturn, env, i64, i32, i32)
|
||||
|
||||
DEF_HELPER_3(setp, void, env, i32, i32)
|
||||
DEF_HELPER_3(setm, void, env, i32, i32)
|
||||
DEF_HELPER_3(sete, void, env, i32, i32)
|
||||
DEF_HELPER_3(setgp, void, env, i32, i32)
|
||||
DEF_HELPER_3(setgm, void, env, i32, i32)
|
||||
DEF_HELPER_3(setge, void, env, i32, i32)
|
||||
|
||||
DEF_HELPER_4(cpyp, void, env, i32, i32, i32)
|
||||
DEF_HELPER_4(cpym, void, env, i32, i32, i32)
|
||||
DEF_HELPER_4(cpye, void, env, i32, i32, i32)
|
||||
DEF_HELPER_4(cpyfp, void, env, i32, i32, i32)
|
||||
DEF_HELPER_4(cpyfm, void, env, i32, i32, i32)
|
||||
DEF_HELPER_4(cpyfe, void, env, i32, i32, i32)
|
||||
|
@ -306,6 +306,15 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||
&& !(env->pstate & PSTATE_TCO)
|
||||
&& (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
|
||||
DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
|
||||
if (!EX_TBFLAG_A64(flags, UNPRIV)) {
|
||||
/*
|
||||
* In non-unpriv contexts (eg EL0), unpriv load/stores
|
||||
* act like normal ones; duplicate the MTE info to
|
||||
* avoid translate-a64.c having to check UNPRIV to see
|
||||
* whether it is OK to index into MTE_ACTIVE[].
|
||||
*/
|
||||
DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* And again for unprivileged accesses, if required. */
|
||||
@ -316,6 +325,18 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
|
||||
&& allocation_tag_access_enabled(env, 0, sctlr)) {
|
||||
DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
|
||||
}
|
||||
/*
|
||||
* For unpriv tag-setting accesses we alse need ATA0. Again, in
|
||||
* contexts where unpriv and normal insns are the same we
|
||||
* duplicate the ATA bit to save effort for translate-a64.c.
|
||||
*/
|
||||
if (EX_TBFLAG_A64(flags, UNPRIV)) {
|
||||
if (allocation_tag_access_enabled(env, 0, sctlr)) {
|
||||
DP_TBFLAG_A64(flags, ATA0, 1);
|
||||
}
|
||||
} else {
|
||||
DP_TBFLAG_A64(flags, ATA0, EX_TBFLAG_A64(flags, ATA));
|
||||
}
|
||||
/* Cache TCMA as well as TBI. */
|
||||
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
|
||||
}
|
||||
|
@ -50,14 +50,14 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
||||
}
|
||||
|
||||
/**
|
||||
* allocation_tag_mem:
|
||||
* allocation_tag_mem_probe:
|
||||
* @env: the cpu environment
|
||||
* @ptr_mmu_idx: the addressing regime to use for the virtual address
|
||||
* @ptr: the virtual address for which to look up tag memory
|
||||
* @ptr_access: the access to use for the virtual address
|
||||
* @ptr_size: the number of bytes in the normal memory access
|
||||
* @tag_access: the access to use for the tag memory
|
||||
* @tag_size: the number of bytes in the tag memory access
|
||||
* @probe: true to merely probe, never taking an exception
|
||||
* @ra: the return address for exception handling
|
||||
*
|
||||
* Our tag memory is formatted as a sequence of little-endian nibbles.
|
||||
@ -66,18 +66,25 @@ static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
|
||||
* for the higher addr.
|
||||
*
|
||||
* Here, resolve the physical address from the virtual address, and return
|
||||
* a pointer to the corresponding tag byte. Exit with exception if the
|
||||
* virtual address is not accessible for @ptr_access.
|
||||
*
|
||||
* The @ptr_size and @tag_size values may not have an obvious relation
|
||||
* due to the alignment of @ptr, and the number of tag checks required.
|
||||
* a pointer to the corresponding tag byte.
|
||||
*
|
||||
* If there is no tag storage corresponding to @ptr, return NULL.
|
||||
*
|
||||
* If the page is inaccessible for @ptr_access, or has a watchpoint, there are
|
||||
* three options:
|
||||
* (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
|
||||
* accessible, and do not take watchpoint traps. The calling code must
|
||||
* handle those cases in the right priority compared to MTE traps.
|
||||
* (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
|
||||
* that the page is going to be accessible. We will take watchpoint traps.
|
||||
* (3) probe = false, ra != 0 : non-probe -- we will take both memory access
|
||||
* traps and watchpoint traps.
|
||||
* (probe = true, ra != 0 is invalid and will assert.)
|
||||
*/
|
||||
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
uint64_t ptr, MMUAccessType ptr_access,
|
||||
int ptr_size, MMUAccessType tag_access,
|
||||
int tag_size, uintptr_t ra)
|
||||
static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
|
||||
uint64_t ptr, MMUAccessType ptr_access,
|
||||
int ptr_size, MMUAccessType tag_access,
|
||||
bool probe, uintptr_t ra)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
uint64_t clean_ptr = useronly_clean_ptr(ptr);
|
||||
@ -85,6 +92,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
uint8_t *tags;
|
||||
uintptr_t index;
|
||||
|
||||
assert(!(probe && ra));
|
||||
|
||||
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
|
||||
cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
|
||||
!(flags & PAGE_VALID), ra);
|
||||
@ -115,12 +124,16 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
* exception for inaccessible pages, and resolves the virtual address
|
||||
* into the softmmu tlb.
|
||||
*
|
||||
* When RA == 0, this is for mte_probe. The page is expected to be
|
||||
* valid. Indicate to probe_access_flags no-fault, then assert that
|
||||
* we received a valid page.
|
||||
* When RA == 0, this is either a pure probe or a no-fault-expected probe.
|
||||
* Indicate to probe_access_flags no-fault, then either return NULL
|
||||
* for the pure probe, or assert that we received a valid page for the
|
||||
* no-fault-expected probe.
|
||||
*/
|
||||
flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
|
||||
ra == 0, &host, &full, ra);
|
||||
if (probe && (flags & TLB_INVALID_MASK)) {
|
||||
return NULL;
|
||||
}
|
||||
assert(!(flags & TLB_INVALID_MASK));
|
||||
|
||||
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
||||
@ -161,7 +174,7 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
}
|
||||
|
||||
/* Any debug exception has priority over a tag check exception. */
|
||||
if (unlikely(flags & TLB_WATCHPOINT)) {
|
||||
if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
|
||||
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
||||
assert(ra != 0);
|
||||
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
|
||||
@ -203,6 +216,15 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
#endif
|
||||
}
|
||||
|
||||
static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
||||
uint64_t ptr, MMUAccessType ptr_access,
|
||||
int ptr_size, MMUAccessType tag_access,
|
||||
uintptr_t ra)
|
||||
{
|
||||
return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
|
||||
ptr_size, tag_access, false, ra);
|
||||
}
|
||||
|
||||
uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
|
||||
{
|
||||
uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
|
||||
@ -275,7 +297,7 @@ uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
|
||||
|
||||
/* Trap if accessing an invalid page. */
|
||||
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
|
||||
MMU_DATA_LOAD, 1, GETPC());
|
||||
MMU_DATA_LOAD, GETPC());
|
||||
|
||||
/* Load if page supports tags. */
|
||||
if (mem) {
|
||||
@ -329,7 +351,7 @@ static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
||||
|
||||
/* Trap if accessing an invalid page. */
|
||||
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
|
||||
MMU_DATA_STORE, 1, ra);
|
||||
MMU_DATA_STORE, ra);
|
||||
|
||||
/* Store if page supports tags. */
|
||||
if (mem) {
|
||||
@ -372,10 +394,10 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
||||
if (ptr & TAG_GRANULE) {
|
||||
/* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
|
||||
mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
||||
TAG_GRANULE, MMU_DATA_STORE, 1, ra);
|
||||
TAG_GRANULE, MMU_DATA_STORE, ra);
|
||||
mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
|
||||
MMU_DATA_STORE, TAG_GRANULE,
|
||||
MMU_DATA_STORE, 1, ra);
|
||||
MMU_DATA_STORE, ra);
|
||||
|
||||
/* Store if page(s) support tags. */
|
||||
if (mem1) {
|
||||
@ -387,7 +409,7 @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
|
||||
} else {
|
||||
/* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
|
||||
mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
||||
2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
|
||||
2 * TAG_GRANULE, MMU_DATA_STORE, ra);
|
||||
if (mem1) {
|
||||
tag |= tag << 4;
|
||||
qatomic_set(mem1, tag);
|
||||
@ -435,8 +457,7 @@ uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
|
||||
|
||||
/* Trap if accessing an invalid page. */
|
||||
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
|
||||
gm_bs_bytes, MMU_DATA_LOAD,
|
||||
gm_bs_bytes / (2 * TAG_GRANULE), ra);
|
||||
gm_bs_bytes, MMU_DATA_LOAD, ra);
|
||||
|
||||
/* The tag is squashed to zero if the page does not support tags. */
|
||||
if (!tag_mem) {
|
||||
@ -495,8 +516,7 @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
|
||||
|
||||
/* Trap if accessing an invalid page. */
|
||||
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
|
||||
gm_bs_bytes, MMU_DATA_LOAD,
|
||||
gm_bs_bytes / (2 * TAG_GRANULE), ra);
|
||||
gm_bs_bytes, MMU_DATA_LOAD, ra);
|
||||
|
||||
/*
|
||||
* Tag store only happens if the page support tags,
|
||||
@ -552,7 +572,7 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
|
||||
ptr &= -dcz_bytes;
|
||||
|
||||
mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
|
||||
MMU_DATA_STORE, tag_bytes, ra);
|
||||
MMU_DATA_STORE, ra);
|
||||
if (mem) {
|
||||
int tag_pair = (val & 0xf) * 0x11;
|
||||
memset(mem, tag_pair, tag_bytes);
|
||||
@ -597,8 +617,8 @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
|
||||
}
|
||||
|
||||
/* Record a tag check failure. */
|
||||
static void mte_check_fail(CPUARMState *env, uint32_t desc,
|
||||
uint64_t dirty_ptr, uintptr_t ra)
|
||||
void mte_check_fail(CPUARMState *env, uint32_t desc,
|
||||
uint64_t dirty_ptr, uintptr_t ra)
|
||||
{
|
||||
int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
|
||||
@ -714,6 +734,55 @@ static int checkN(uint8_t *mem, int odd, int cmp, int count)
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* checkNrev:
|
||||
* @tag: tag memory to test
|
||||
* @odd: true to begin testing at tags at odd nibble
|
||||
* @cmp: the tag to compare against
|
||||
* @count: number of tags to test
|
||||
*
|
||||
* Return the number of successful tests.
|
||||
* Thus a return value < @count indicates a failure.
|
||||
*
|
||||
* This is like checkN, but it runs backwards, checking the
|
||||
* tags starting with @tag and then the tags preceding it.
|
||||
* This is needed by the backwards-memory-copying operations.
|
||||
*/
|
||||
static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
|
||||
{
|
||||
int n = 0, diff;
|
||||
|
||||
/* Replicate the test tag and compare. */
|
||||
cmp *= 0x11;
|
||||
diff = *mem-- ^ cmp;
|
||||
|
||||
if (!odd) {
|
||||
goto start_even;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
/* Test odd tag. */
|
||||
if (unlikely((diff) & 0xf0)) {
|
||||
break;
|
||||
}
|
||||
if (++n == count) {
|
||||
break;
|
||||
}
|
||||
|
||||
start_even:
|
||||
/* Test even tag. */
|
||||
if (unlikely((diff) & 0x0f)) {
|
||||
break;
|
||||
}
|
||||
if (++n == count) {
|
||||
break;
|
||||
}
|
||||
|
||||
diff = *mem-- ^ cmp;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/**
|
||||
* mte_probe_int() - helper for mte_probe and mte_check
|
||||
* @env: CPU environment
|
||||
@ -732,8 +801,7 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
|
||||
int mmu_idx, ptr_tag, bit55;
|
||||
uint64_t ptr_last, prev_page, next_page;
|
||||
uint64_t tag_first, tag_last;
|
||||
uint64_t tag_byte_first, tag_byte_last;
|
||||
uint32_t sizem1, tag_count, tag_size, n, c;
|
||||
uint32_t sizem1, tag_count, n, c;
|
||||
uint8_t *mem1, *mem2;
|
||||
MMUAccessType type;
|
||||
|
||||
@ -763,19 +831,14 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
|
||||
tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
|
||||
tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
|
||||
|
||||
/* Round the bounds to twice the tag granule, and compute the bytes. */
|
||||
tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
|
||||
tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
|
||||
|
||||
/* Locate the page boundaries. */
|
||||
prev_page = ptr & TARGET_PAGE_MASK;
|
||||
next_page = prev_page + TARGET_PAGE_SIZE;
|
||||
|
||||
if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
|
||||
/* Memory access stays on one page. */
|
||||
tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
|
||||
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
|
||||
MMU_DATA_LOAD, tag_size, ra);
|
||||
MMU_DATA_LOAD, ra);
|
||||
if (!mem1) {
|
||||
return 1;
|
||||
}
|
||||
@ -783,14 +846,12 @@ static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
|
||||
n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
|
||||
} else {
|
||||
/* Memory access crosses to next page. */
|
||||
tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
|
||||
mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
|
||||
MMU_DATA_LOAD, tag_size, ra);
|
||||
MMU_DATA_LOAD, ra);
|
||||
|
||||
tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
|
||||
mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
|
||||
ptr_last - next_page + 1,
|
||||
MMU_DATA_LOAD, tag_size, ra);
|
||||
MMU_DATA_LOAD, ra);
|
||||
|
||||
/*
|
||||
* Perform all of the comparisons.
|
||||
@ -918,7 +979,7 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
(void) probe_write(env, ptr, 1, mmu_idx, ra);
|
||||
mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
|
||||
dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
|
||||
dcz_bytes, MMU_DATA_LOAD, ra);
|
||||
if (!mem) {
|
||||
goto done;
|
||||
}
|
||||
@ -979,3 +1040,143 @@ uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
|
||||
done:
|
||||
return useronly_clean_ptr(ptr);
|
||||
}
|
||||
|
||||
uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t desc)
|
||||
{
|
||||
int mmu_idx, tag_count;
|
||||
uint64_t ptr_tag, tag_first, tag_last;
|
||||
void *mem;
|
||||
bool w = FIELD_EX32(desc, MTEDESC, WRITE);
|
||||
uint32_t n;
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
/* True probe; this will never fault */
|
||||
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
|
||||
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
|
||||
size, MMU_DATA_LOAD, true, 0);
|
||||
if (!mem) {
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: checkN() is not designed for checks of the size we expect
|
||||
* for FEAT_MOPS operations, so we should implement this differently.
|
||||
* Maybe we should do something like
|
||||
* if (region start and size are aligned nicely) {
|
||||
* do direct loads of 64 tag bits at a time;
|
||||
* } else {
|
||||
* call checkN()
|
||||
* }
|
||||
*/
|
||||
/* Round the bounds to the tag granule, and compute the number of tags. */
|
||||
ptr_tag = allocation_tag_from_addr(ptr);
|
||||
tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
|
||||
tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE);
|
||||
tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
|
||||
n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
|
||||
if (likely(n == tag_count)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Failure; for the first granule, it's at @ptr. Otherwise
|
||||
* it's at the first byte of the nth granule. Calculate how
|
||||
* many bytes we can access without hitting that failure.
|
||||
*/
|
||||
if (n == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return n * TAG_GRANULE - (ptr - tag_first);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t desc)
|
||||
{
|
||||
int mmu_idx, tag_count;
|
||||
uint64_t ptr_tag, tag_first, tag_last;
|
||||
void *mem;
|
||||
bool w = FIELD_EX32(desc, MTEDESC, WRITE);
|
||||
uint32_t n;
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
/* True probe; this will never fault */
|
||||
mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
|
||||
w ? MMU_DATA_STORE : MMU_DATA_LOAD,
|
||||
size, MMU_DATA_LOAD, true, 0);
|
||||
if (!mem) {
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: checkNrev() is not designed for checks of the size we expect
|
||||
* for FEAT_MOPS operations, so we should implement this differently.
|
||||
* Maybe we should do something like
|
||||
* if (region start and size are aligned nicely) {
|
||||
* do direct loads of 64 tag bits at a time;
|
||||
* } else {
|
||||
* call checkN()
|
||||
* }
|
||||
*/
|
||||
/* Round the bounds to the tag granule, and compute the number of tags. */
|
||||
ptr_tag = allocation_tag_from_addr(ptr);
|
||||
tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
|
||||
tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
|
||||
tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
|
||||
n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
|
||||
if (likely(n == tag_count)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Failure; for the first granule, it's at @ptr. Otherwise
|
||||
* it's at the last byte of the nth granule. Calculate how
|
||||
* many bytes we can access without hitting that failure.
|
||||
*/
|
||||
if (n == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
|
||||
}
|
||||
}
|
||||
|
||||
void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
|
||||
uint32_t desc)
|
||||
{
|
||||
int mmu_idx, tag_count;
|
||||
uint64_t ptr_tag;
|
||||
void *mem;
|
||||
|
||||
if (!desc) {
|
||||
/* Tags not actually enabled */
|
||||
return;
|
||||
}
|
||||
|
||||
mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
|
||||
/* True probe: this will never fault */
|
||||
mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
|
||||
MMU_DATA_STORE, true, 0);
|
||||
if (!mem) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We know that ptr and size are both TAG_GRANULE aligned; store
|
||||
* the tag from the pointer value into the tag memory.
|
||||
*/
|
||||
ptr_tag = allocation_tag_from_addr(ptr);
|
||||
tag_count = size / TAG_GRANULE;
|
||||
if (ptr & TAG_GRANULE) {
|
||||
/* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
|
||||
store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
|
||||
mem++;
|
||||
tag_count--;
|
||||
}
|
||||
memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
|
||||
if (tag_count & 1) {
|
||||
/* Final trailing unaligned nibble */
|
||||
mem += tag_count / 2;
|
||||
store_tag1_parallel(0, mem, ptr_tag);
|
||||
}
|
||||
}
|
||||
|
@ -105,9 +105,17 @@ void a64_translate_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the core mmu_idx to use for A64 "unprivileged load/store" insns
|
||||
* Return the core mmu_idx to use for A64 load/store insns which
|
||||
* have a "unprivileged load/store" variant. Those insns access
|
||||
* EL0 if executed from an EL which has control over EL0 (usually
|
||||
* EL1) but behave like normal loads and stores if executed from
|
||||
* elsewhere (eg EL3).
|
||||
*
|
||||
* @unpriv : true for the unprivileged encoding; false for the
|
||||
* normal encoding (in which case we will return the same
|
||||
* thing as get_mem_index().
|
||||
*/
|
||||
static int get_a64_user_mem_index(DisasContext *s)
|
||||
static int get_a64_user_mem_index(DisasContext *s, bool unpriv)
|
||||
{
|
||||
/*
|
||||
* If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
|
||||
@ -115,7 +123,7 @@ static int get_a64_user_mem_index(DisasContext *s)
|
||||
*/
|
||||
ARMMMUIdx useridx = s->mmu_idx;
|
||||
|
||||
if (s->unpriv) {
|
||||
if (unpriv && s->unpriv) {
|
||||
/*
|
||||
* We have pre-computed the condition for AccType_UNPRIV.
|
||||
* Therefore we should never get here with a mmu_idx for
|
||||
@ -1453,6 +1461,10 @@ static bool trans_TBZ(DisasContext *s, arg_tbz *a)
|
||||
|
||||
static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
|
||||
{
|
||||
/* BC.cond is only present with FEAT_HBC */
|
||||
if (a->c && !dc_isar_feature(aa64_hbc, s)) {
|
||||
return false;
|
||||
}
|
||||
reset_btype(s);
|
||||
if (a->cond < 0x0e) {
|
||||
/* genuinely conditional branches */
|
||||
@ -2260,7 +2272,7 @@ static void handle_sys(DisasContext *s, bool isread,
|
||||
clean_addr = clean_data_tbi(s, tcg_rt);
|
||||
gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
/* Extract the tag from the register to match STZGM. */
|
||||
tag = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(tag, tcg_rt, 56);
|
||||
@ -2277,7 +2289,7 @@ static void handle_sys(DisasContext *s, bool isread,
|
||||
clean_addr = clean_data_tbi(s, tcg_rt);
|
||||
gen_helper_dc_zva(cpu_env, clean_addr);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
/* Extract the tag from the register to match STZGM. */
|
||||
tag = tcg_temp_new_i64();
|
||||
tcg_gen_shri_i64(tag, tcg_rt, 56);
|
||||
@ -3058,7 +3070,7 @@ static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
|
||||
tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
|
||||
|
||||
/* Perform the tag store, if tag access enabled. */
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
if (tb_cflags(s->base.tb) & CF_PARALLEL) {
|
||||
gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
|
||||
} else {
|
||||
@ -3084,7 +3096,7 @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
|
||||
if (!a->p) {
|
||||
tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
|
||||
}
|
||||
memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
|
||||
memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
*clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
|
||||
a->w || a->rn != 31,
|
||||
mop, a->unpriv, memidx);
|
||||
@ -3105,7 +3117,7 @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
|
||||
{
|
||||
bool iss_sf, iss_valid = !a->w;
|
||||
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
|
||||
int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
|
||||
int memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
|
||||
|
||||
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
|
||||
@ -3123,7 +3135,7 @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
|
||||
{
|
||||
bool iss_sf, iss_valid = !a->w;
|
||||
TCGv_i64 clean_addr, dirty_addr, tcg_rt;
|
||||
int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
|
||||
int memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
|
||||
|
||||
op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
|
||||
@ -3756,7 +3768,7 @@ static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
|
||||
tcg_gen_addi_i64(addr, addr, a->imm);
|
||||
tcg_rt = cpu_reg(s, a->rt);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
|
||||
}
|
||||
/*
|
||||
@ -3788,7 +3800,7 @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
|
||||
tcg_gen_addi_i64(addr, addr, a->imm);
|
||||
tcg_rt = cpu_reg(s, a->rt);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_stgm(cpu_env, addr, tcg_rt);
|
||||
} else {
|
||||
MMUAccessType acc = MMU_DATA_STORE;
|
||||
@ -3820,7 +3832,7 @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
|
||||
tcg_gen_addi_i64(addr, addr, a->imm);
|
||||
tcg_rt = cpu_reg(s, a->rt);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_ldgm(tcg_rt, cpu_env, addr);
|
||||
} else {
|
||||
MMUAccessType acc = MMU_DATA_LOAD;
|
||||
@ -3855,7 +3867,7 @@ static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
|
||||
|
||||
tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
|
||||
tcg_rt = cpu_reg(s, a->rt);
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
|
||||
} else {
|
||||
/*
|
||||
@ -3892,7 +3904,7 @@ static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair)
|
||||
tcg_gen_addi_i64(addr, addr, a->imm);
|
||||
}
|
||||
tcg_rt = cpu_reg_sp(s, a->rt);
|
||||
if (!s->ata) {
|
||||
if (!s->ata[0]) {
|
||||
/*
|
||||
* For STG and ST2G, we need to check alignment and probe memory.
|
||||
* TODO: For STZG and STZ2G, we could rely on the stores below,
|
||||
@ -3950,6 +3962,123 @@ TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
|
||||
TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
|
||||
TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
|
||||
|
||||
typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32);
|
||||
|
||||
static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
|
||||
bool is_setg, SetFn fn)
|
||||
{
|
||||
int memidx;
|
||||
uint32_t syndrome, desc = 0;
|
||||
|
||||
if (is_setg && !dc_isar_feature(aa64_mte, s)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* UNPREDICTABLE cases: we choose to UNDEF, which allows
|
||||
* us to pull this check before the CheckMOPSEnabled() test
|
||||
* (which we do in the helper function)
|
||||
*/
|
||||
if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd ||
|
||||
a->rd == 31 || a->rn == 31) {
|
||||
return false;
|
||||
}
|
||||
|
||||
memidx = get_a64_user_mem_index(s, a->unpriv);
|
||||
|
||||
/*
|
||||
* We pass option_a == true, matching our implementation;
|
||||
* we pass wrong_option == false: helper function may set that bit.
|
||||
*/
|
||||
syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv,
|
||||
is_epilogue, false, true, a->rd, a->rs, a->rn);
|
||||
|
||||
if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) {
|
||||
/* We may need to do MTE tag checking, so assemble the descriptor */
|
||||
desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
|
||||
desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
|
||||
desc = FIELD_DP32(desc, MTEDESC, WRITE, true);
|
||||
/* SIZEM1 and ALIGN we leave 0 (byte write) */
|
||||
}
|
||||
/* The helper function always needs the memidx even with MTE disabled */
|
||||
desc = FIELD_DP32(desc, MTEDESC, MIDX, memidx);
|
||||
|
||||
/*
|
||||
* The helper needs the register numbers, but since they're in
|
||||
* the syndrome anyway, we let it extract them from there rather
|
||||
* than passing in an extra three integer arguments.
|
||||
*/
|
||||
fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc));
|
||||
return true;
|
||||
}
|
||||
|
||||
TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp)
|
||||
TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm)
|
||||
TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete)
|
||||
TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp)
|
||||
TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm)
|
||||
TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge)
|
||||
|
||||
typedef void CpyFn(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32);
|
||||
|
||||
static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn)
|
||||
{
|
||||
int rmemidx, wmemidx;
|
||||
uint32_t syndrome, rdesc = 0, wdesc = 0;
|
||||
bool wunpriv = extract32(a->options, 0, 1);
|
||||
bool runpriv = extract32(a->options, 1, 1);
|
||||
|
||||
/*
|
||||
* UNPREDICTABLE cases: we choose to UNDEF, which allows
|
||||
* us to pull this check before the CheckMOPSEnabled() test
|
||||
* (which we do in the helper function)
|
||||
*/
|
||||
if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd ||
|
||||
a->rd == 31 || a->rs == 31 || a->rn == 31) {
|
||||
return false;
|
||||
}
|
||||
|
||||
rmemidx = get_a64_user_mem_index(s, runpriv);
|
||||
wmemidx = get_a64_user_mem_index(s, wunpriv);
|
||||
|
||||
/*
|
||||
* We pass option_a == true, matching our implementation;
|
||||
* we pass wrong_option == false: helper function may set that bit.
|
||||
*/
|
||||
syndrome = syn_mop(false, false, a->options, is_epilogue,
|
||||
false, true, a->rd, a->rs, a->rn);
|
||||
|
||||
/* If we need to do MTE tag checking, assemble the descriptors */
|
||||
if (s->mte_active[runpriv]) {
|
||||
rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid);
|
||||
rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma);
|
||||
}
|
||||
if (s->mte_active[wunpriv]) {
|
||||
wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid);
|
||||
wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma);
|
||||
wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true);
|
||||
}
|
||||
/* The helper function needs these parts of the descriptor regardless */
|
||||
rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx);
|
||||
wdesc = FIELD_DP32(wdesc, MTEDESC, MIDX, wmemidx);
|
||||
|
||||
/*
|
||||
* The helper needs the register numbers, but since they're in
|
||||
* the syndrome anyway, we let it extract them from there rather
|
||||
* than passing in an extra three integer arguments.
|
||||
*/
|
||||
fn(cpu_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc),
|
||||
tcg_constant_i32(rdesc));
|
||||
return true;
|
||||
}
|
||||
|
||||
TRANS_FEAT(CPYP, aa64_mops, do_CPY, a, false, gen_helper_cpyp)
|
||||
TRANS_FEAT(CPYM, aa64_mops, do_CPY, a, false, gen_helper_cpym)
|
||||
TRANS_FEAT(CPYE, aa64_mops, do_CPY, a, true, gen_helper_cpye)
|
||||
TRANS_FEAT(CPYFP, aa64_mops, do_CPY, a, false, gen_helper_cpyfp)
|
||||
TRANS_FEAT(CPYFM, aa64_mops, do_CPY, a, false, gen_helper_cpyfm)
|
||||
TRANS_FEAT(CPYFE, aa64_mops, do_CPY, a, true, gen_helper_cpyfe)
|
||||
|
||||
typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
|
||||
|
||||
//// --- Begin LibAFL code ---
|
||||
@ -4025,7 +4154,7 @@ static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
|
||||
tcg_rn = cpu_reg_sp(s, a->rn);
|
||||
tcg_rd = cpu_reg_sp(s, a->rd);
|
||||
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
|
||||
tcg_constant_i32(imm),
|
||||
tcg_constant_i32(a->uimm4));
|
||||
@ -5432,7 +5561,7 @@ static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
|
||||
if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
|
||||
goto do_unallocated;
|
||||
}
|
||||
if (s->ata) {
|
||||
if (s->ata[0]) {
|
||||
gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
|
||||
cpu_reg_sp(s, rn), cpu_reg(s, rm));
|
||||
} else {
|
||||
@ -13923,7 +14052,8 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
|
||||
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
|
||||
dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
|
||||
dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
|
||||
dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
|
||||
dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA);
|
||||
dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0);
|
||||
dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
|
||||
dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
|
||||
dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
|
||||
|
@ -114,8 +114,8 @@ typedef struct DisasContext {
|
||||
bool unpriv;
|
||||
/* True if v8.3-PAuth is active. */
|
||||
bool pauth_active;
|
||||
/* True if v8.5-MTE access to tags is enabled. */
|
||||
bool ata;
|
||||
/* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */
|
||||
bool ata[2];
|
||||
/* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */
|
||||
bool mte_active[2];
|
||||
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
|
||||
|
@ -1155,7 +1155,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
||||
NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no",
|
||||
NULL, "fb-clear", NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
"pbrsb-no", NULL, NULL, NULL,
|
||||
"pbrsb-no", NULL, "gds-no", NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
},
|
||||
.msr = {
|
||||
@ -1340,6 +1340,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
||||
.feat_names = {
|
||||
[54] = "vmx-ins-outs",
|
||||
[55] = "vmx-true-ctls",
|
||||
[56] = "vmx-any-errcode",
|
||||
},
|
||||
.msr = {
|
||||
.index = MSR_IA32_VMX_BASIC,
|
||||
@ -5975,9 +5976,10 @@ static void x86_register_cpudef_types(const X86CPUDefinition *def)
|
||||
/* Versioned models: */
|
||||
|
||||
for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
|
||||
X86CPUModel *m = g_new0(X86CPUModel, 1);
|
||||
g_autofree char *name =
|
||||
x86_cpu_versioned_model_name(def, vdef->version);
|
||||
|
||||
m = g_new0(X86CPUModel, 1);
|
||||
m->cpudef = def;
|
||||
m->version = vdef->version;
|
||||
m->note = vdef->note;
|
||||
|
@ -1039,6 +1039,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
#define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
|
||||
#define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
|
||||
#define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
|
||||
#define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56)
|
||||
|
||||
#define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
|
||||
#define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
|
||||
|
@ -2699,8 +2699,6 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
|
||||
|
||||
if (enable_cpu_pm) {
|
||||
int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
|
||||
int ret;
|
||||
|
||||
/* Work around for kernel header with a typo. TODO: fix header and drop. */
|
||||
#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
|
||||
#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
|
||||
@ -3610,7 +3608,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
||||
if (kvm_enabled() && cpu->enable_pmu &&
|
||||
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
||||
uint64_t depth;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Only migrate Arch LBR states when the host Arch LBR depth
|
||||
@ -3643,8 +3641,6 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
|
||||
}
|
||||
|
||||
if (env->mcg_cap) {
|
||||
int i;
|
||||
|
||||
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
|
||||
kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
|
||||
if (has_msr_mcg_ext_ctl) {
|
||||
@ -4041,7 +4037,6 @@ static int kvm_get_msrs(X86CPU *cpu)
|
||||
if (kvm_enabled() && cpu->enable_pmu &&
|
||||
(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
|
||||
uint64_t depth;
|
||||
int i, ret;
|
||||
|
||||
ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
|
||||
if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
|
||||
|
@ -226,14 +226,29 @@ static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
|
||||
}
|
||||
}
|
||||
|
||||
static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
target_ulong ptr = env->gdt.base + (env->tr.selector & ~7);
|
||||
uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
|
||||
|
||||
if (value) {
|
||||
e2 |= DESC_TSS_BUSY_MASK;
|
||||
} else {
|
||||
e2 &= ~DESC_TSS_BUSY_MASK;
|
||||
}
|
||||
|
||||
cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
|
||||
}
|
||||
|
||||
#define SWITCH_TSS_JMP 0
|
||||
#define SWITCH_TSS_IRET 1
|
||||
#define SWITCH_TSS_CALL 2
|
||||
|
||||
/* XXX: restore CPU state in registers (PowerPC case) */
|
||||
static void switch_tss_ra(CPUX86State *env, int tss_selector,
|
||||
uint32_t e1, uint32_t e2, int source,
|
||||
uint32_t next_eip, uintptr_t retaddr)
|
||||
/* return 0 if switching to a 16-bit selector */
|
||||
static int switch_tss_ra(CPUX86State *env, int tss_selector,
|
||||
uint32_t e1, uint32_t e2, int source,
|
||||
uint32_t next_eip, uintptr_t retaddr)
|
||||
{
|
||||
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
|
||||
target_ulong tss_base;
|
||||
@ -341,13 +356,7 @@ static void switch_tss_ra(CPUX86State *env, int tss_selector,
|
||||
|
||||
/* clear busy bit (it is restartable) */
|
||||
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
|
||||
target_ulong ptr;
|
||||
uint32_t e2;
|
||||
|
||||
ptr = env->gdt.base + (env->tr.selector & ~7);
|
||||
e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
|
||||
e2 &= ~DESC_TSS_BUSY_MASK;
|
||||
cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
|
||||
tss_set_busy(env, env->tr.selector, 0, retaddr);
|
||||
}
|
||||
old_eflags = cpu_compute_eflags(env);
|
||||
if (source == SWITCH_TSS_IRET) {
|
||||
@ -399,13 +408,7 @@ static void switch_tss_ra(CPUX86State *env, int tss_selector,
|
||||
|
||||
/* set busy bit */
|
||||
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
|
||||
target_ulong ptr;
|
||||
uint32_t e2;
|
||||
|
||||
ptr = env->gdt.base + (tss_selector & ~7);
|
||||
e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
|
||||
e2 |= DESC_TSS_BUSY_MASK;
|
||||
cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
|
||||
tss_set_busy(env, tss_selector, 1, retaddr);
|
||||
}
|
||||
|
||||
/* set the new CPU state */
|
||||
@ -499,13 +502,14 @@ static void switch_tss_ra(CPUX86State *env, int tss_selector,
|
||||
cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
|
||||
}
|
||||
#endif
|
||||
return type >> 3;
|
||||
}
|
||||
|
||||
static void switch_tss(CPUX86State *env, int tss_selector,
|
||||
uint32_t e1, uint32_t e2, int source,
|
||||
uint32_t next_eip)
|
||||
static int switch_tss(CPUX86State *env, int tss_selector,
|
||||
uint32_t e1, uint32_t e2, int source,
|
||||
uint32_t next_eip)
|
||||
{
|
||||
switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
|
||||
return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
|
||||
}
|
||||
|
||||
static inline unsigned int get_sp_mask(unsigned int e2)
|
||||
@ -647,14 +651,11 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
|
||||
if (!(e2 & DESC_P_MASK)) {
|
||||
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
|
||||
}
|
||||
switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
|
||||
shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
|
||||
if (has_error_code) {
|
||||
int type;
|
||||
uint32_t mask;
|
||||
|
||||
/* push the error code */
|
||||
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
|
||||
shift = type >> 3;
|
||||
if (env->segs[R_SS].flags & DESC_B_MASK) {
|
||||
mask = 0xffffffff;
|
||||
} else {
|
||||
|
@ -387,8 +387,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
env->hflags2 |= HF2_GIF_MASK;
|
||||
|
||||
if (ctl_has_irq(env)) {
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
|
||||
}
|
||||
|
||||
|
@ -3261,7 +3261,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x30 ... 0x35:
|
||||
case 0x38 ... 0x3d:
|
||||
{
|
||||
int op, f, val;
|
||||
int f;
|
||||
op = (b >> 3) & 7;
|
||||
f = (b >> 1) & 3;
|
||||
|
||||
@ -3321,8 +3321,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
|
||||
case 0x81:
|
||||
case 0x83:
|
||||
{
|
||||
int val;
|
||||
|
||||
ot = mo_b_d(b, dflag);
|
||||
|
||||
modrm = x86_ldub_code(env, s);
|
||||
|
@ -15,6 +15,10 @@
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* The semihosting protocol implemented here is described in the
|
||||
* libgloss sources:
|
||||
* https://sourceware.org/git/?p=newlib-cygwin.git;a=blob;f=libgloss/m68k/m68k-semi.txt;hb=HEAD
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
@ -93,7 +93,7 @@ class ReplayLinux(LinuxTest):
|
||||
% os.path.getsize(replay_path))
|
||||
else:
|
||||
vm.event_wait('SHUTDOWN', self.timeout)
|
||||
vm.shutdown(True)
|
||||
vm.wait()
|
||||
logger.info('successfully fihished the replay')
|
||||
elapsed = time.time() - start_time
|
||||
logger.info('elapsed time %.2f sec' % elapsed)
|
||||
|
@ -979,10 +979,15 @@ _require_drivers()
|
||||
#
|
||||
_require_large_file()
|
||||
{
|
||||
if ! truncate --size="$1" "$TEST_IMG"; then
|
||||
if [ -z "$TEST_IMG_FILE" ]; then
|
||||
FILENAME="$TEST_IMG"
|
||||
else
|
||||
FILENAME="$TEST_IMG_FILE"
|
||||
fi
|
||||
if ! truncate --size="$1" "$FILENAME"; then
|
||||
_notrun "file system on $TEST_DIR does not support large enough files"
|
||||
fi
|
||||
rm "$TEST_IMG"
|
||||
rm "$FILENAME"
|
||||
}
|
||||
|
||||
# Check that a set of devices is available in the QEMU binary
|
||||
|
@ -142,4 +142,4 @@ if __name__ == '__main__':
|
||||
|
||||
iotests.main(supported_fmts=['qcow2'])
|
||||
except ImportError:
|
||||
iotests.notrun('libnbd not installed')
|
||||
iotests.notrun('Python bindings to libnbd are not installed')
|
||||
|
@ -46,7 +46,8 @@ static void *es1370_create(void *pci_bus, QGuestAllocator *alloc, void *addr)
|
||||
static void es1370_register_nodes(void)
|
||||
{
|
||||
QOSGraphEdgeOptions opts = {
|
||||
.extra_device_opts = "addr=04.0",
|
||||
.extra_device_opts = "addr=04.0,audiodev=audio0",
|
||||
.before_cmd_line = "-audiodev driver=none,id=audio0",
|
||||
};
|
||||
add_qpci_address(&opts, &(QPCIAddress) { .devfn = QPCI_DEVFN(4, 0) });
|
||||
|
||||
|
@ -106,8 +106,10 @@ const generic_fuzz_config predefined_configs[] = {
|
||||
},{
|
||||
.name = "intel-hda",
|
||||
.args = "-machine q35 -nodefaults -device intel-hda,id=hda0 "
|
||||
"-device hda-output,bus=hda0.0 -device hda-micro,bus=hda0.0 "
|
||||
"-device hda-duplex,bus=hda0.0",
|
||||
"-audiodev driver=none,id=audio0",
|
||||
"-device hda-output,bus=hda0.0,audiodev=audio0 "
|
||||
"-device hda-micro,bus=hda0.0,audiodev=audio0 "
|
||||
"-device hda-duplex,bus=hda0.0,audiodev=audio0",
|
||||
.objects = "intel-hda",
|
||||
},{
|
||||
.name = "ide-hd",
|
||||
|
@ -11,20 +11,24 @@
|
||||
#include "libqtest-single.h"
|
||||
|
||||
#define HDA_ID "hda0"
|
||||
#define CODEC_DEVICES " -device hda-output,bus=" HDA_ID ".0" \
|
||||
" -device hda-micro,bus=" HDA_ID ".0" \
|
||||
" -device hda-duplex,bus=" HDA_ID ".0"
|
||||
#define AUDIODEV " -audiodev driver=none,id=audio0 "
|
||||
#define AUDIODEV_REF "audiodev=audio0"
|
||||
#define CODEC_DEVICES " -device hda-output,bus=" HDA_ID ".0," AUDIODEV_REF \
|
||||
" -device hda-micro,bus=" HDA_ID ".0," AUDIODEV_REF \
|
||||
" -device hda-duplex,bus=" HDA_ID ".0," AUDIODEV_REF
|
||||
|
||||
/* Tests only initialization so far. TODO: Replace with functional tests */
|
||||
static void ich6_test(void)
|
||||
{
|
||||
qtest_start("-machine pc -device intel-hda,id=" HDA_ID CODEC_DEVICES);
|
||||
qtest_start(AUDIODEV "-machine pc -device intel-hda,id=" HDA_ID CODEC_DEVICES);
|
||||
qtest_end();
|
||||
}
|
||||
|
||||
static void ich9_test(void)
|
||||
{
|
||||
qtest_start("-machine q35 -device ich9-intel-hda,bus=pcie.0,addr=1b.0,id="
|
||||
qtest_start("-machine q35"
|
||||
AUDIODEV
|
||||
"-device ich9-intel-hda,bus=pcie.0,addr=1b.0,id="
|
||||
HDA_ID CODEC_DEVICES);
|
||||
qtest_end();
|
||||
}
|
||||
@ -39,6 +43,7 @@ static void test_issue542_ich6(void)
|
||||
QTestState *s;
|
||||
|
||||
s = qtest_init("-nographic -nodefaults -M pc-q35-6.2 "
|
||||
AUDIODEV
|
||||
"-device intel-hda,id=" HDA_ID CODEC_DEVICES);
|
||||
|
||||
qtest_outl(s, 0xcf8, 0x80000804);
|
||||
|
@ -155,7 +155,7 @@ static void bcd_check_time(void)
|
||||
struct tm *datep;
|
||||
time_t ts;
|
||||
const int wiggle = 2;
|
||||
QTestState *s = m48t59_qtest_start();
|
||||
QTestState *qts = m48t59_qtest_start();
|
||||
|
||||
/*
|
||||
* This check assumes a few things. First, we cannot guarantee that we get
|
||||
@ -173,10 +173,10 @@ static void bcd_check_time(void)
|
||||
ts = time(NULL);
|
||||
gmtime_r(&ts, &start);
|
||||
|
||||
cmos_get_date_time(s, &date[0]);
|
||||
cmos_get_date_time(s, &date[1]);
|
||||
cmos_get_date_time(s, &date[2]);
|
||||
cmos_get_date_time(s, &date[3]);
|
||||
cmos_get_date_time(qts, &date[0]);
|
||||
cmos_get_date_time(qts, &date[1]);
|
||||
cmos_get_date_time(qts, &date[2]);
|
||||
cmos_get_date_time(qts, &date[3]);
|
||||
|
||||
ts = time(NULL);
|
||||
gmtime_r(&ts, &end);
|
||||
@ -192,22 +192,25 @@ static void bcd_check_time(void)
|
||||
}
|
||||
|
||||
if (!(tm_cmp(&start, datep) <= 0 && tm_cmp(datep, &end) <= 0)) {
|
||||
long t, s;
|
||||
long date_s, start_s;
|
||||
unsigned long diff;
|
||||
|
||||
start.tm_isdst = datep->tm_isdst;
|
||||
|
||||
t = (long)mktime(datep);
|
||||
s = (long)mktime(&start);
|
||||
if (t < s) {
|
||||
g_test_message("RTC is %ld second(s) behind wall-clock", (s - t));
|
||||
date_s = (long)mktime(datep);
|
||||
start_s = (long)mktime(&start);
|
||||
if (date_s < start_s) {
|
||||
diff = start_s - date_s;
|
||||
g_test_message("RTC is %ld second(s) behind wall-clock", diff);
|
||||
} else {
|
||||
g_test_message("RTC is %ld second(s) ahead of wall-clock", (t - s));
|
||||
diff = date_s - start_s;
|
||||
g_test_message("RTC is %ld second(s) ahead of wall-clock", diff);
|
||||
}
|
||||
|
||||
g_assert_cmpint(ABS(t - s), <=, wiggle);
|
||||
g_assert_cmpint(diff, <=, wiggle);
|
||||
}
|
||||
|
||||
qtest_quit(s);
|
||||
qtest_quit(qts);
|
||||
}
|
||||
|
||||
/* success if no crash or abort */
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "qapi/qobject-input-visitor.h"
|
||||
#include "qapi/qapi-visit-sockets.h"
|
||||
|
||||
#define CONNECTION_TIMEOUT 60
|
||||
#define CONNECTION_TIMEOUT 120
|
||||
|
||||
#define EXPECT_STATE(q, e, t) \
|
||||
do { \
|
||||
@ -401,7 +401,7 @@ static void test_dgram_inet(void)
|
||||
qtest_quit(qts0);
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
#if !defined(_WIN32) && !defined(CONFIG_DARWIN)
|
||||
static void test_dgram_mcast(void)
|
||||
{
|
||||
QTestState *qts;
|
||||
@ -414,7 +414,9 @@ static void test_dgram_mcast(void)
|
||||
|
||||
qtest_quit(qts);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32
|
||||
static void test_dgram_unix(void)
|
||||
{
|
||||
QTestState *qts0, *qts1;
|
||||
@ -511,7 +513,7 @@ int main(int argc, char **argv)
|
||||
if (has_ipv4) {
|
||||
qtest_add_func("/netdev/stream/inet/ipv4", test_stream_inet_ipv4);
|
||||
qtest_add_func("/netdev/dgram/inet", test_dgram_inet);
|
||||
#ifndef _WIN32
|
||||
#if !defined(_WIN32) && !defined(CONFIG_DARWIN)
|
||||
qtest_add_func("/netdev/dgram/mcast", test_dgram_mcast);
|
||||
#endif
|
||||
}
|
||||
|
@ -313,18 +313,10 @@ int main(int argc, char **argv)
|
||||
"xlevel2", 0);
|
||||
}
|
||||
/*
|
||||
* QEMU 1.4.0 had auto-level enabled for CPUID[7], already,
|
||||
* QEMU 2.3.0 had auto-level enabled for CPUID[7], already,
|
||||
* and the compat code that sets default level shouldn't
|
||||
* disable the auto-level=7 code:
|
||||
*/
|
||||
if (qtest_has_machine("pc-i440fx-1.4")) {
|
||||
add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
|
||||
"-machine pc-i440fx-1.4 -cpu Nehalem",
|
||||
"level", 2);
|
||||
add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
|
||||
"-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
|
||||
"level", 7);
|
||||
}
|
||||
if (qtest_has_machine("pc-i440fx-2.3")) {
|
||||
add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
|
||||
"-machine pc-i440fx-2.3 -cpu Penryn",
|
||||
|
@ -126,7 +126,7 @@ int main(void)
|
||||
*/
|
||||
get_cpu_reg_check_mask(id_aa64isar0_el1, _m(f0ff,ffff,f0ff,fff0));
|
||||
get_cpu_reg_check_mask(id_aa64isar1_el1, _m(00ff,f0ff,ffff,ffff));
|
||||
get_cpu_reg_check_mask(SYS_ID_AA64ISAR2_EL1, _m(0000,0000,0000,ffff));
|
||||
get_cpu_reg_check_mask(SYS_ID_AA64ISAR2_EL1, _m(00ff,0000,00ff,ffff));
|
||||
/* TGran4 & TGran64 as pegged to -1 */
|
||||
get_cpu_reg_check_mask(id_aa64mmfr0_el1, _m(f000,0000,ff00,0000));
|
||||
get_cpu_reg_check_mask(id_aa64mmfr1_el1, _m(0000,f000,0000,0000));
|
||||
@ -138,7 +138,7 @@ int main(void)
|
||||
get_cpu_reg_check_mask(id_aa64dfr0_el1, _m(0000,0000,0000,0006));
|
||||
get_cpu_reg_check_zero(id_aa64dfr1_el1);
|
||||
get_cpu_reg_check_mask(SYS_ID_AA64ZFR0_EL1, _m(0ff0,ff0f,00ff,00ff));
|
||||
get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(80f1,00fd,0000,0000));
|
||||
get_cpu_reg_check_mask(SYS_ID_AA64SMFR0_EL1, _m(8ff1,fcff,0000,0000));
|
||||
|
||||
get_cpu_reg_check_zero(id_aa64afr0_el1);
|
||||
get_cpu_reg_check_zero(id_aa64afr1_el1);
|
||||
|
6
ui/vnc.c
6
ui/vnc.c
@ -2205,7 +2205,7 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings)
|
||||
break;
|
||||
case VNC_ENCODING_XVP:
|
||||
if (vs->vd->power_control) {
|
||||
vs->features |= VNC_FEATURE_XVP;
|
||||
vs->features |= VNC_FEATURE_XVP_MASK;
|
||||
send_xvp_message(vs, VNC_XVP_CODE_INIT);
|
||||
}
|
||||
break;
|
||||
@ -2454,7 +2454,7 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len)
|
||||
vnc_client_cut_text(vs, read_u32(data, 4), data + 8);
|
||||
break;
|
||||
case VNC_MSG_CLIENT_XVP:
|
||||
if (!(vs->features & VNC_FEATURE_XVP)) {
|
||||
if (!vnc_has_feature(vs, VNC_FEATURE_XVP)) {
|
||||
error_report("vnc: xvp client message while disabled");
|
||||
vnc_client_error(vs);
|
||||
break;
|
||||
@ -2551,7 +2551,7 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len)
|
||||
vs, vs->ioc, vs->as.fmt, vs->as.nchannels, vs->as.freq);
|
||||
break;
|
||||
default:
|
||||
VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 4));
|
||||
VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 2));
|
||||
vnc_client_error(vs);
|
||||
break;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user